• Home
  • Raw
  • Download

Lines Matching +full:per +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
35 * Track per-node information needed to setup the boot memory allocator, the
36 * per-node areas, and the real VM.
52 * To prevent cache aliasing effects, align per-node structures so that they
57 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
58 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
61 * build_node_maps - callback to setup mem_data structs for each node
69 * if necessary. Any non-existent pages will simply be part of the virtual
92 * early_nr_cpus_node - return number of cpus on a given node
97 * called yet. Note that node 0 will also count all non-existent cpus.
101 int cpu, n = 0; in early_nr_cpus_node() local
103 for_each_possible_early_cpu(cpu) in early_nr_cpus_node()
104 if (node == node_cpuid[cpu].nid) in early_nr_cpus_node()
111 * compute_pernodesize - compute size of pernode data
129 * per_cpu_node_setup - setup per-cpu areas on each node
130 * @cpu_data: per-cpu area on this node
133 * Copy the static per-cpu data into the region we just set aside and then
134 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
140 int cpu; in per_cpu_node_setup() local
142 for_each_possible_early_cpu(cpu) { in per_cpu_node_setup()
143 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; in per_cpu_node_setup()
145 if (node != node_cpuid[cpu].nid) in per_cpu_node_setup()
148 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); in per_cpu_node_setup()
149 __per_cpu_offset[cpu] = (char *)__va(cpu_data) - in per_cpu_node_setup()
161 if (cpu == 0) in per_cpu_node_setup()
163 (unsigned long)cpu_data - in per_cpu_node_setup()
174 * setup_per_cpu_areas - setup percpu areas
188 unsigned int cpu; in setup_per_cpu_areas() local
195 cpu_map = ai->groups[0].cpu_map; in setup_per_cpu_areas()
199 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
201 (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); in setup_per_cpu_areas()
202 base_offset = (void *)__per_cpu_start - base; in setup_per_cpu_areas()
207 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
208 if (node == node_cpuid[cpu].nid) in setup_per_cpu_areas()
209 cpu_map[unit++] = cpu; in setup_per_cpu_areas()
213 static_size = __per_cpu_end - __per_cpu_start; in setup_per_cpu_areas()
215 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; in setup_per_cpu_areas()
220 ai->static_size = static_size; in setup_per_cpu_areas()
221 ai->reserved_size = reserved_size; in setup_per_cpu_areas()
222 ai->dyn_size = dyn_size; in setup_per_cpu_areas()
223 ai->unit_size = PERCPU_PAGE_SIZE; in setup_per_cpu_areas()
224 ai->atom_size = PAGE_SIZE; in setup_per_cpu_areas()
225 ai->alloc_size = PERCPU_PAGE_SIZE; in setup_per_cpu_areas()
231 prev_node = -1; in setup_per_cpu_areas()
232 ai->nr_groups = 0; in setup_per_cpu_areas()
234 cpu = cpu_map[unit]; in setup_per_cpu_areas()
235 node = node_cpuid[cpu].nid; in setup_per_cpu_areas()
238 gi->nr_units++; in setup_per_cpu_areas()
243 gi = &ai->groups[ai->nr_groups++]; in setup_per_cpu_areas()
244 gi->nr_units = 1; in setup_per_cpu_areas()
245 gi->base_offset = __per_cpu_offset[cpu] + base_offset; in setup_per_cpu_areas()
246 gi->cpu_map = &cpu_map[unit]; in setup_per_cpu_areas()
258 * fill_pernode - initialize pernode data.
290 * find_pernode_space - allocate memory for memory map and per-node structures
295 * This routine reserves space for the per-cpu data struct, the list of
296 * pg_data_ts and the per-node data struct. Each node will have something like
301 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
303 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
304 * |------------------------|
306 * |------------------------|
308 * |------------------------|
313 * could probably move the allocation of the per-cpu and ia64_node_data space
352 * reserve_pernode_space - reserve memory for per-node space
354 * Reserve the space used by the bootmem maps & per-node space in the boot
367 /* Now the per-node space */ in reserve_pernode_space()
381 * node_online_map is not set for hot-added nodes at this time, in scatter_node_data()
389 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; in scatter_node_data()
396 * initialize_pernode_data - fixup per-cpu & per-node pointers
398 * Each node's per-node area has a copy of the global pg_data_t list, so
399 * we copy that to each node here, as well as setting the per-cpu pointer
400 * to the local node data structure. The active_cpus field of the per-node
405 int cpu, node; in initialize_pernode_data() local
410 /* Set the node_data pointer for each per-cpu struct */ in initialize_pernode_data()
411 for_each_possible_early_cpu(cpu) { in initialize_pernode_data()
412 node = node_cpuid[cpu].nid; in initialize_pernode_data()
413 per_cpu(ia64_cpu_info, cpu).node_data = in initialize_pernode_data()
419 cpu = 0; in initialize_pernode_data()
420 node = node_cpuid[cpu].nid; in initialize_pernode_data()
422 ((char *)&ia64_cpu_info - __per_cpu_start)); in initialize_pernode_data()
423 cpu0_cpu_info->node_data = mem_data[node].node_data; in initialize_pernode_data()
429 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
439 int bestnode = -1, node, anynode = 0; in memory_less_node_alloc()
451 if (bestnode == -1) in memory_less_node_alloc()
461 * memory_less_nodes - allocate and initialize CPU only nodes pernode
480 * find_memory - walk the EFI memory map and setup the bootmem allocator
483 * allocate the per-cpu and per-node structures.
498 min_low_pfn = -1; in find_memory()
521 * per_cpu_init - setup per-cpu variables
528 int cpu; in per_cpu_init() local
533 for_each_possible_early_cpu(cpu) in per_cpu_init()
534 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; in per_cpu_init()
542 * call_pernode_memory - use SRAT to call callback functions with node info
570 (*func)(start, end - start, 0); in call_pernode_memory()
580 (*func)(rs, re - rs, node_memblk[i].nid); in call_pernode_memory()
588 * paging_init - setup page tables
607 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * in paging_init()
618 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; in paging_init()