Searched refs:shared (Results 1 – 4 of 4) sorted by relevance
/mm/ |
D | interval_tree.c | 24 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, 25 unsigned long, shared.rb_subtree_last, 39 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after() 41 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after() 43 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after() 44 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 45 if (parent->shared.rb_subtree_last < last) in vma_interval_tree_insert_after() 46 parent->shared.rb_subtree_last = last; in vma_interval_tree_insert_after() 47 while (parent->shared.rb.rb_left) { in vma_interval_tree_insert_after() 48 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after() [all …]
|
D | slab.c | 262 parent->shared = NULL; in kmem_cache_node_init() 454 .shared = 1, 920 if (n->shared) in __drain_alien_cache() 921 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache() 1092 struct array_cache *shared; in cpuup_canceled() local 1117 shared = n->shared; in cpuup_canceled() 1118 if (shared) { in cpuup_canceled() 1119 free_block(cachep, shared->entry, in cpuup_canceled() 1120 shared->avail, node, &list); in cpuup_canceled() 1121 n->shared = NULL; in cpuup_canceled() [all …]
|
D | slab.h | 156 unsigned int shared; member 336 struct array_cache *shared; /* shared per node */ member
|
D | slab_common.c | 1112 sinfo.limit, sinfo.batchcount, sinfo.shared); in cache_show()
|