Home
last modified time | relevance | path

Searched full:nodes (Results 1 – 25 of 2967) sorted by relevance

12345678910>>...119

/kernel/linux/linux-5.10/drivers/gpu/drm/selftests/
Dtest-drm_mm.c268 struct drm_mm_node nodes[2]; in igt_debug() local
271 /* Create a small drm_mm with a couple of nodes and a few holes, and in igt_debug()
277 memset(nodes, 0, sizeof(nodes)); in igt_debug()
278 nodes[0].start = 512; in igt_debug()
279 nodes[0].size = 1024; in igt_debug()
280 ret = drm_mm_reserve_node(&mm, &nodes[0]); in igt_debug()
283 nodes[0].start, nodes[0].size); in igt_debug()
287 nodes[1].size = 1024; in igt_debug()
288 nodes[1].start = 4096 - 512 - nodes[1].size; in igt_debug()
289 ret = drm_mm_reserve_node(&mm, &nodes[1]); in igt_debug()
[all …]
/kernel/linux/linux-4.19/Documentation/admin-guide/mm/
Dnuma_memory_policy.rst19 which is an administrative mechanism for restricting the nodes from which
42 allocations across all nodes with "sufficient" memory, so as
166 an optional set of nodes. The mode determines the behavior of the
168 and the optional set of nodes can be viewed as the arguments to the
190 does not use the optional set of nodes.
192 It is an error for the set of nodes specified for this policy to
197 nodes specified by the policy. Memory will be allocated from
204 allocation fails, the kernel will search other nodes, in order
224 page granularity, across the nodes specified in the policy.
229 Interleave mode indexes the set of nodes specified by the
[all …]
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/
Dnuma_memory_policy.rst19 which is an administrative mechanism for restricting the nodes from which
42 allocations across all nodes with "sufficient" memory, so as
166 an optional set of nodes. The mode determines the behavior of the
168 and the optional set of nodes can be viewed as the arguments to the
190 does not use the optional set of nodes.
192 It is an error for the set of nodes specified for this policy to
197 nodes specified by the policy. Memory will be allocated from
204 allocation fails, the kernel will search other nodes, in order
224 page granularity, across the nodes specified in the policy.
229 Interleave mode indexes the set of nodes specified by the
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/selftests/
Dtest-drm_mm.c266 struct drm_mm_node nodes[2]; in igt_debug() local
269 /* Create a small drm_mm with a couple of nodes and a few holes, and in igt_debug()
275 memset(nodes, 0, sizeof(nodes)); in igt_debug()
276 nodes[0].start = 512; in igt_debug()
277 nodes[0].size = 1024; in igt_debug()
278 ret = drm_mm_reserve_node(&mm, &nodes[0]); in igt_debug()
281 nodes[0].start, nodes[0].size); in igt_debug()
285 nodes[1].size = 1024; in igt_debug()
286 nodes[1].start = 4096 - 512 - nodes[1].size; in igt_debug()
287 ret = drm_mm_reserve_node(&mm, &nodes[1]); in igt_debug()
[all …]
/kernel/linux/linux-4.19/Documentation/md/
Dmd-cluster.txt47 node may write to those sectors. This is used when a new nodes
52 Each node has to communicate with other nodes when starting or ending
61 Normally all nodes hold a concurrent-read lock on this device.
65 Messages can be broadcast to all nodes, and the sender waits for all
66 other nodes to acknowledge the message before proceeding. Only one
73 3.1.1 METADATA_UPDATED: informs other nodes that the metadata has
78 3.1.2 RESYNCING: informs other nodes that a resync is initiated or
85 3.1.3 NEWDISK: informs other nodes that a device is being added to
101 The DLM LVB is used to communicate within nodes of the cluster. There
112 acknowledged by all nodes in the cluster. The BAST of the resource
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/usb/
Dusb-device.txt7 Four types of device-tree nodes are defined: "host-controller nodes"
8 representing USB host controllers, "device nodes" representing USB devices,
9 "interface nodes" representing USB interfaces and "combined nodes"
20 Required properties for device nodes:
30 Required properties for device nodes with interface nodes:
35 Required properties for interface nodes:
49 Required properties for combined nodes:
59 Required properties for hub nodes with device nodes:
64 Required properties for host-controller nodes with device nodes:
/kernel/linux/linux-4.19/Documentation/devicetree/bindings/usb/
Dusb-device.txt7 Four types of device-tree nodes are defined: "host-controller nodes"
8 representing USB host controllers, "device nodes" representing USB devices,
9 "interface nodes" representing USB interfaces and "combined nodes"
20 Required properties for device nodes:
30 Required properties for device nodes with interface nodes:
35 Required properties for interface nodes:
49 Required properties for combined nodes:
59 Required properties for hub nodes with device nodes:
64 Required properties for host-controller nodes with device nodes:
/kernel/linux/linux-4.19/drivers/gpu/drm/amd/amdgpu/
Damdgpu_vram_mgr.c111 struct drm_mm_node *nodes = mem->mm_node; in amdgpu_vram_mgr_bo_visible_size() local
121 for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) in amdgpu_vram_mgr_bo_visible_size()
122 usage += amdgpu_vram_mgr_vis_size(adev, nodes); in amdgpu_vram_mgr_bo_visible_size()
145 struct drm_mm_node *nodes; in amdgpu_vram_mgr_new() local
166 nodes = kvmalloc_array(num_nodes, sizeof(*nodes), in amdgpu_vram_mgr_new()
168 if (!nodes) in amdgpu_vram_mgr_new()
187 r = drm_mm_insert_node_in_range(mm, &nodes[i], in amdgpu_vram_mgr_new()
194 usage += nodes[i].size << PAGE_SHIFT; in amdgpu_vram_mgr_new()
195 vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); in amdgpu_vram_mgr_new()
200 start = nodes[i].start + nodes[i].size; in amdgpu_vram_mgr_new()
[all …]
/kernel/linux/linux-4.19/Documentation/devicetree/bindings/arm/
Dtopology.txt16 The cpu nodes (bindings defined in [1]) represent the devices that
22 For instance in a system where CPUs support SMT, "cpu" nodes represent all
24 In systems where SMT is not supported "cpu" nodes represent all cores present
27 ARM topology bindings allow one to associate cpu nodes with hierarchical groups
29 tree nodes.
39 A topology description containing phandles to cpu nodes that are not compliant
48 nodes are listed.
64 The cpu-map node's child nodes can be:
66 - one or more cluster nodes
70 The cpu-map node can only contain three types of child nodes:
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/cpu/
Dcpu-topology.txt20 For instance in a system where CPUs support SMT, "cpu" nodes represent all
22 In systems where SMT is not supported "cpu" nodes represent all cores present
25 CPU topology bindings allow one to associate cpu nodes with hierarchical groups
27 tree nodes.
32 The cpu nodes, as per bindings defined in [4], represent the devices that
35 A topology description containing phandles to cpu nodes that are not compliant
44 nodes are listed.
60 The cpu-map node's child nodes can be:
62 - one or more cluster nodes or
63 - one or more socket nodes in a multi-socket system
[all …]
/kernel/linux/linux-4.19/fs/ubifs/
Dgc.c26 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which
27 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete
28 * nodes to the journal, at which point the garbage-collected LEB is free to be
29 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes
31 * to be reused. Garbage collection will cause the number of dirty index nodes
45 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
46 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
47 * have to waste large pieces of free space at the end of LEB B, because nodes
48 * from LEB A would not fit. And the worst situation is when all nodes are of
113 * data_nodes_cmp - compare 2 data nodes.
[all …]
/kernel/linux/linux-4.19/mm/
Dmempolicy.c15 * interleave Allocate memory interleaved over a set of nodes,
22 * bind Only allocate memory on a specific set of nodes,
26 * the allocation to memory nodes instead
150 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
167 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_interleave() argument
169 if (nodes_empty(*nodes)) in mpol_new_interleave()
171 pol->v.nodes = *nodes; in mpol_new_interleave()
175 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() argument
177 if (!nodes) in mpol_new_preferred()
[all …]
/kernel/linux/linux-5.10/mm/
Dmempolicy.c15 * interleave Allocate memory interleaved over a set of nodes,
22 * bind Only allocate memory on a specific set of nodes,
26 * the allocation to memory nodes instead
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
193 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_interleave() argument
195 if (nodes_empty(*nodes)) in mpol_new_interleave()
197 pol->v.nodes = *nodes; in mpol_new_interleave()
201 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() argument
203 if (!nodes) in mpol_new_preferred()
[all …]
/kernel/linux/linux-4.19/Documentation/vm/
Dnuma.rst49 abstractions called "nodes". Linux maps the nodes onto the physical cells
51 architectures. As with physical cells, software nodes may contain 0 or more
53 "closer" nodes--nodes that map to closer cells--will generally experience
64 the emulation of additional nodes. For NUMA emulation, linux will carve up
65 the existing nodes--or the system memory for non-NUMA platforms--into multiple
66 nodes. Each emulated node will manage a fraction of the underlying cells'
76 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
81 Because some nodes contain multiple zones containing different types of
87 from the same node before using remote nodes which are ordered by NUMA distance.
94 nodes' zones in the selected zonelist looking for the first zone in the list
[all …]
/kernel/linux/linux-5.10/Documentation/vm/
Dnuma.rst49 abstractions called "nodes". Linux maps the nodes onto the physical cells
51 architectures. As with physical cells, software nodes may contain 0 or more
53 "closer" nodes--nodes that map to closer cells--will generally experience
64 the emulation of additional nodes. For NUMA emulation, linux will carve up
65 the existing nodes--or the system memory for non-NUMA platforms--into multiple
66 nodes. Each emulated node will manage a fraction of the underlying cells'
76 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
81 Because some nodes contain multiple zones containing different types of
87 from the same node before using remote nodes which are ordered by NUMA distance.
94 nodes' zones in the selected zonelist looking for the first zone in the list
[all …]
/kernel/linux/linux-5.10/fs/ubifs/
Dgc.c14 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which
15 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete
16 * nodes to the journal, at which point the garbage-collected LEB is free to be
17 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes
19 * to be reused. Garbage collection will cause the number of dirty index nodes
33 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
34 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
35 * have to waste large pieces of free space at the end of LEB B, because nodes
36 * from LEB A would not fit. And the worst situation is when all nodes are of
97 * data_nodes_cmp - compare 2 data nodes.
[all …]
/kernel/linux/linux-5.10/Documentation/driver-api/md/
Dmd-cluster.rst54 node may write to those sectors. This is used when a new nodes
60 Each node has to communicate with other nodes when starting or ending
70 Normally all nodes hold a concurrent-read lock on this device.
75 Messages can be broadcast to all nodes, and the sender waits for all
76 other nodes to acknowledge the message before proceeding. Only one
87 informs other nodes that the metadata has
94 informs other nodes that a resync is initiated or
104 informs other nodes that a device is being added to
128 The DLM LVB is used to communicate within nodes of the cluster. There
145 acknowledged by all nodes in the cluster. The BAST of the resource
[all …]
/kernel/linux/linux-5.10/Documentation/filesystems/
Dubifs-authentication.rst80 - *Index*: an on-flash B+ tree where the leaf nodes contain filesystem data
98 Basic on-flash UBIFS entities are called *nodes*. UBIFS knows different types
99 of nodes. Eg. data nodes (``struct ubifs_data_node``) which store chunks of file
100 contents or inode nodes (``struct ubifs_ino_node``) which represent VFS inodes.
101 Almost all types of nodes share a common header (``ubifs_ch``) containing basic
104 and some less important node types like padding nodes which are used to pad
108 as *wandering tree*, where only the changed nodes are re-written and previous
121 a dirty-flag which marks nodes that have to be persisted the next time the
126 on-flash filesystem structures like the index. On every commit, the TNC nodes
135 any changes (in form of inode nodes, data nodes etc.) between commits
[all …]
/kernel/linux/linux-4.19/Documentation/devicetree/bindings/pinctrl/
Dmeson,pinctrl.txt18 === GPIO sub-nodes ===
23 Required properties for sub-nodes are:
32 === Other sub-nodes ===
34 Child nodes without the "gpio-controller" represent some desired
35 configuration for a pin or a group. Those nodes can be pinmux nodes or
36 configuration nodes.
38 Required properties for pinmux nodes are:
45 Required properties for configuration nodes:
48 Configuration nodes support the generic properties "bias-disable",
/kernel/linux/linux-4.19/arch/ia64/include/asm/sn/
Darch.h21 * This is the maximum number of NUMALINK nodes that can be part of a single
22 * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
24 * The number of compact nodes cannot exceed size of a coherency domain.
26 * all C/M/TIO nodes in an SSI system.
28 * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
30 * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
31 * to ACPI3.0, this limit will be removed. The notion of "compact nodes"
38 * Maximum number of nodes in all partitions and in all coherency domains.
39 * This is the total number of nodes accessible in the numalink fabric. It
/kernel/linux/linux-4.19/lib/
Dinterval_tree_test.c13 __param(int, nnodes, 100, "Number of nodes in the interval tree");
18 __param(bool, search_all, false, "Searches will iterate all nodes in the tree");
23 static struct interval_tree_node *nodes = NULL; variable
48 nodes[i].start = a; in init()
49 nodes[i].last = b; in init()
67 nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), in interval_tree_test_init()
69 if (!nodes) in interval_tree_test_init()
74 kfree(nodes); in interval_tree_test_init()
87 interval_tree_insert(nodes + j, &root); in interval_tree_test_init()
89 interval_tree_remove(nodes + j, &root); in interval_tree_test_init()
[all …]
/kernel/linux/linux-5.10/lib/
Dinterval_tree_test.c14 __param(int, nnodes, 100, "Number of nodes in the interval tree");
19 __param(bool, search_all, false, "Searches will iterate all nodes in the tree");
24 static struct interval_tree_node *nodes = NULL; variable
49 nodes[i].start = a; in init()
50 nodes[i].last = b; in init()
68 nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), in interval_tree_test_init()
70 if (!nodes) in interval_tree_test_init()
75 kfree(nodes); in interval_tree_test_init()
88 interval_tree_insert(nodes + j, &root); in interval_tree_test_init()
90 interval_tree_remove(nodes + j, &root); in interval_tree_test_init()
[all …]
/kernel/linux/linux-5.10/fs/btrfs/
Dinode-item.c93 return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], in btrfs_lookup_inode_extref()
135 extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], in btrfs_del_inode_extref()
143 leaf = path->nodes[0]; in btrfs_del_inode_extref()
207 ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name, in btrfs_del_inode_ref()
214 leaf = path->nodes[0]; in btrfs_del_inode_ref()
277 if (btrfs_find_name_in_ext_backref(path->nodes[0], in btrfs_insert_inode_extref()
289 leaf = path->nodes[0]; in btrfs_insert_inode_extref()
295 btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len); in btrfs_insert_inode_extref()
296 btrfs_set_inode_extref_index(path->nodes[0], extref, index); in btrfs_insert_inode_extref()
297 btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); in btrfs_insert_inode_extref()
[all …]
/kernel/linux/linux-4.19/fs/btrfs/
Dinode-item.c101 if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], in btrfs_lookup_inode_extref()
145 if (!btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0], in btrfs_del_inode_extref()
153 leaf = path->nodes[0]; in btrfs_del_inode_extref()
216 if (!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], in btrfs_del_inode_ref()
222 leaf = path->nodes[0]; in btrfs_del_inode_ref()
285 if (btrfs_find_name_in_ext_backref(path->nodes[0], in btrfs_insert_inode_extref()
297 leaf = path->nodes[0]; in btrfs_insert_inode_extref()
303 btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len); in btrfs_insert_inode_extref()
304 btrfs_set_inode_extref_index(path->nodes[0], extref, index); in btrfs_insert_inode_extref()
305 btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid); in btrfs_insert_inode_extref()
[all …]
/kernel/linux/linux-4.19/drivers/net/ethernet/intel/ice/
Dice_sched.c70 /* Check if teid matches to any of the children nodes */ in ice_sched_find_node_by_teid()
174 * ice_sched_remove_elems - remove nodes from hw
177 * @num_nodes: number of nodes
180 * This function remove nodes from hw
273 * The parent array is updated below and that shifts the nodes in ice_free_sched_node()
279 /* Leaf, TC and root nodes can't be deleted by SW */ in ice_free_sched_node()
326 /* leaf nodes have no children */ in ice_free_sched_node()
487 * ice_sched_suspend_resume_elems - suspend or resume hw nodes
489 * @num_nodes: number of nodes
493 * This function suspends or resumes hw nodes
[all …]

12345678910>>...119