Lines Matching full:node
35 * Track per-node information needed to setup the boot memory allocator, the
36 * per-node areas, and the real VM.
52 * To prevent cache aliasing effects, align per-node structures so that they
53 * start at addresses that are strided by node number.
56 #define NODEDATA_ALIGN(addr, node) \ argument
58 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
61 * build_node_maps - callback to setup mem_data structs for each node
64 * @node: node where this range resides
67 * treat as a virtually contiguous block (i.e. each node). Each such block
73 int node) in build_node_maps() argument
80 if (!mem_data[node].min_pfn) { in build_node_maps()
81 mem_data[node].min_pfn = spfn; in build_node_maps()
82 mem_data[node].max_pfn = epfn; in build_node_maps()
84 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn); in build_node_maps()
85 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn); in build_node_maps()
92 * early_nr_cpus_node - return number of cpus on a given node
93 * @node: node to check
95 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
97 * called yet. Note that node 0 will also count all non-existent cpus.
99 static int __meminit early_nr_cpus_node(int node) in early_nr_cpus_node() argument
104 if (node == node_cpuid[cpu].nid) in early_nr_cpus_node()
112 * @node: the node id.
114 static unsigned long __meminit compute_pernodesize(int node) in compute_pernodesize() argument
118 cpus = early_nr_cpus_node(node); in compute_pernodesize()
120 pernodesize += node * L1_CACHE_BYTES; in compute_pernodesize()
129 * per_cpu_node_setup - setup per-cpu areas on each node
130 * @cpu_data: per-cpu area on this node
131 * @node: node to setup
134 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
137 static void *per_cpu_node_setup(void *cpu_data, int node) in per_cpu_node_setup() argument
145 if (node != node_cpuid[cpu].nid) in per_cpu_node_setup()
156 * area for cpu0 is on the correct node and its in per_cpu_node_setup()
190 int node, prev_node, unit, nr_units, rc; in setup_per_cpu_areas() local
204 /* build cpu_map, units are grouped by node */ in setup_per_cpu_areas()
206 for_each_node(node) in setup_per_cpu_areas()
208 if (node == node_cpuid[cpu].nid) in setup_per_cpu_areas()
228 * CPUs are put into groups according to node. Walk cpu_map in setup_per_cpu_areas()
229 * and create new groups at node boundaries. in setup_per_cpu_areas()
235 node = node_cpuid[cpu].nid; in setup_per_cpu_areas()
237 if (node == prev_node) { in setup_per_cpu_areas()
241 prev_node = node; in setup_per_cpu_areas()
259 * @node: the node id.
263 static void __init fill_pernode(int node, unsigned long pernode, in fill_pernode() argument
267 int cpus = early_nr_cpus_node(node); in fill_pernode()
269 mem_data[node].pernode_addr = pernode; in fill_pernode()
270 mem_data[node].pernode_size = pernodesize; in fill_pernode()
275 pernode += node * L1_CACHE_BYTES; in fill_pernode()
277 pgdat_list[node] = __va(pernode); in fill_pernode()
280 mem_data[node].node_data = __va(pernode); in fill_pernode()
284 cpu_data = per_cpu_node_setup(cpu_data, node); in fill_pernode()
290 * find_pernode_space - allocate memory for memory map and per-node structures
293 * @node: node where this range resides
296 * pg_data_ts and the per-node data struct. Each node will have something like
301 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
303 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
318 int node) in find_pernode_space() argument
327 * Make sure this memory falls within this node's usable memory in find_pernode_space()
330 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn) in find_pernode_space()
333 /* Don't setup this node's local space twice... */ in find_pernode_space()
334 if (mem_data[node].pernode_addr) in find_pernode_space()
341 pernodesize = compute_pernodesize(node); in find_pernode_space()
342 pernode = NODEDATA_ALIGN(start, node); in find_pernode_space()
346 fill_pernode(node, pernode, pernodesize); in find_pernode_space()
352 * reserve_pernode_space - reserve memory for per-node space
354 * Reserve the space used by the bootmem maps & per-node space in the boot
361 int node; in reserve_pernode_space() local
363 for_each_online_node(node) { in reserve_pernode_space()
364 if (node_isset(node, memory_less_mask)) in reserve_pernode_space()
367 /* Now the per-node space */ in reserve_pernode_space()
368 size = mem_data[node].pernode_size; in reserve_pernode_space()
369 base = __pa(mem_data[node].pernode_addr); in reserve_pernode_space()
377 int node; in scatter_node_data() local
382 * because we are halfway through initialization of the new node's in scatter_node_data()
383 * structures. If for_each_online_node() is used, a new node's in scatter_node_data()
387 for_each_node(node) { in scatter_node_data()
388 if (pgdat_list[node]) { in scatter_node_data()
389 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; in scatter_node_data()
396 * initialize_pernode_data - fixup per-cpu & per-node pointers
398 * Each node's per-node area has a copy of the global pg_data_t list, so
399 * we copy that to each node here, as well as setting the per-cpu pointer
400 * to the local node data structure. The active_cpus field of the per-node
405 int cpu, node; in initialize_pernode_data() local
412 node = node_cpuid[cpu].nid; in initialize_pernode_data()
414 mem_data[node].node_data; in initialize_pernode_data()
420 node = node_cpuid[cpu].nid; in initialize_pernode_data()
423 cpu0_cpu_info->node_data = mem_data[node].node_data; in initialize_pernode_data()
430 * node but fall back to any other node when __alloc_bootmem_node fails
432 * @nid: node id
433 * @pernodesize: size of this node's pernode data
439 int bestnode = -1, node, anynode = 0; in memory_less_node_alloc() local
441 for_each_online_node(node) { in memory_less_node_alloc()
442 if (node_isset(node, memory_less_mask)) in memory_less_node_alloc()
444 else if (node_distance(nid, node) < best) { in memory_less_node_alloc()
445 best = node_distance(nid, node); in memory_less_node_alloc()
446 bestnode = node; in memory_less_node_alloc()
448 anynode = node; in memory_less_node_alloc()
468 int node; in memory_less_nodes() local
470 for_each_node_mask(node, memory_less_mask) { in memory_less_nodes()
471 pernodesize = compute_pernodesize(node); in memory_less_nodes()
472 pernode = memory_less_node_alloc(node, pernodesize); in memory_less_nodes()
473 fill_pernode(node, __pa(pernode), pernodesize); in memory_less_nodes()
483 * allocate the per-cpu and per-node structures.
487 int node; in find_memory() local
493 printk(KERN_ERR "node info missing!\n"); in find_memory()
506 for_each_online_node(node) in find_memory()
507 if (mem_data[node].min_pfn) in find_memory()
508 node_clear(node, memory_less_mask); in find_memory()
542 * call_pernode_memory - use SRAT to call callback functions with node info
548 * out to which node a block of memory belongs. Ignore memory that we cannot
568 /* No SRAT table, so assume one node (node 0) */ in call_pernode_memory()
590 * paging_init() sets up the page tables for each node of the system and frees
598 int node; in paging_init() local
614 for_each_online_node(node) { in paging_init()
615 pfn_offset = mem_data[node].min_pfn; in paging_init()
618 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; in paging_init()
620 if (mem_data[node].max_pfn > max_pfn) in paging_init()
621 max_pfn = mem_data[node].max_pfn; in paging_init()
655 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, in vmemmap_populate() argument
658 return vmemmap_populate_basepages(start, end, node); in vmemmap_populate()