| /kernel/linux/linux-5.10/drivers/staging/android/ion/ |
| D | ion_system_heap.c | 19 #define NUM_ORDERS ARRAY_SIZE(orders) 24 static const unsigned int orders[] = {8, 4, 0}; variable 31 if (order == orders[i]) in order_to_index() 82 if (size < order_to_size(orders[i])) in alloc_largest_available() 84 if (max_order < orders[i]) in alloc_largest_available() 87 page = alloc_buffer_page(heap, buffer, orders[i]); in alloc_largest_available() 111 unsigned int max_order = orders[0]; in ion_system_heap_allocate() 232 if (orders[i] > 4) in ion_system_heap_create_pools() 235 pool = ion_page_pool_create(gfp_flags, orders[i]); in ion_system_heap_create_pools()
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/ttm/tests/ |
| D | ttm_pool_test.c | 251 pt = &pool->caching[caching].orders[order]; in ttm_pool_alloc_order_caching_match() 280 pt_pool = &pool->caching[pool_caching].orders[order]; in ttm_pool_alloc_caching_mismatch() 281 pt_tt = &pool->caching[tt_caching].orders[order]; in ttm_pool_alloc_caching_mismatch() 314 pt_pool = &pool->caching[caching].orders[order]; in ttm_pool_alloc_order_mismatch() 315 pt_tt = &pool->caching[caching].orders[0]; in ttm_pool_alloc_order_mismatch() 355 pt = &pool->caching[caching].orders[order]; in ttm_pool_free_dma_alloc() 386 pt = &pool->caching[caching].orders[order]; in ttm_pool_free_no_dma_alloc() 406 pt = &pool->caching[caching].orders[order]; in ttm_pool_fini_basic()
|
| /kernel/linux/linux-6.6/drivers/dma-buf/heaps/ |
| D | system_heap.c | 49 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed 54 static const unsigned int orders[] = {8, 4, 0}; variable 55 #define NUM_ORDERS ARRAY_SIZE(orders) 321 if (size < (PAGE_SIZE << orders[i])) in alloc_largest_available() 323 if (max_order < orders[i]) in alloc_largest_available() 326 page = alloc_pages(order_flags[i], orders[i]); in alloc_largest_available() 342 unsigned int max_order = orders[0]; in system_heap_allocate()
|
| /kernel/linux/linux-6.6/tools/memory-model/Documentation/ |
| D | glossary.txt | 29 a special operation that includes a load and which orders that 117 Fully Ordered: An operation such as smp_mb() that orders all of 120 that orders all of its CPU's prior accesses, itself, and 167 a special operation that includes a store and which orders that
|
| D | cheatsheet.txt | 34 SELF: Orders self, as opposed to accesses before and/or after 35 SV: Orders later accesses to the same variable
|
| D | recipes.txt | 232 The smp_store_release() macro orders any prior accesses against the 233 store, while the smp_load_acquire macro orders the load against any 273 smp_store_release(), but the rcu_dereference() macro orders the load only 310 The smp_wmb() macro orders prior stores against later stores, and the 311 smp_rmb() macro orders prior loads against later loads. Therefore, if
|
| /kernel/linux/linux-5.10/tools/memory-model/Documentation/ |
| D | cheatsheet.txt | 34 SELF: Orders self, as opposed to accesses before and/or after 35 SV: Orders later accesses to the same variable
|
| D | recipes.txt | 232 The smp_store_release() macro orders any prior accesses against the 233 store, while the smp_load_acquire macro orders the load against any 273 smp_store_release(), but the rcu_dereference() macro orders the load only 310 The smp_wmb() macro orders prior stores against later stores, and the 311 smp_rmb() macro orders prior loads against later loads. Therefore, if
|
| /kernel/linux/linux-6.6/arch/s390/kvm/ |
| D | sigp.c | 266 /* handle unknown orders in user space */ in __prepare_sigp_unknown() 280 * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders in handle_sigp_dst() 283 * interrupt, we need to return any new non-reset orders "busy". in handle_sigp_dst() 298 * their orders, while the guest cannot observe a in handle_sigp_dst() 299 * difference when issuing other orders from two in handle_sigp_dst()
|
| /kernel/linux/linux-6.6/include/drm/ttm/ |
| D | ttm_pool.h | 61 * struct ttm_pool - Pool for all caching and orders 77 struct ttm_pool_type orders[MAX_ORDER + 1]; member
|
| /kernel/linux/linux-5.10/arch/powerpc/mm/ |
| D | mmu_context.c | 45 * This full barrier orders the store to the cpumask above vs in switch_mm_irqs_off() 57 * radix which orders earlier stores to clear the PTEs vs in switch_mm_irqs_off()
|
| /kernel/linux/linux-5.10/arch/s390/kvm/ |
| D | sigp.c | 278 /* handle unknown orders in user space */ in __prepare_sigp_unknown() 292 * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders in handle_sigp_dst() 295 * interrupt, we need to return any new non-reset orders "busy". in handle_sigp_dst() 310 * their orders, while the guest cannot observe a in handle_sigp_dst() 311 * difference when issuing other orders from two in handle_sigp_dst()
|
| /kernel/linux/linux-5.10/Documentation/ |
| D | atomic_t.txt | 194 smp_mb__before_atomic() orders all earlier accesses against the RMW op 195 itself and all accesses following it, and smp_mb__after_atomic() orders all 226 a RELEASE because it orders preceding instructions against both the read
|
| /kernel/linux/linux-6.6/arch/powerpc/mm/ |
| D | mmu_context.c | 56 * This full barrier orders the store to the cpumask above vs in switch_mm_irqs_off() 69 * radix which orders earlier stores to clear the PTEs before in switch_mm_irqs_off()
|
| /kernel/linux/linux-6.6/Documentation/ |
| D | atomic_t.txt | 194 smp_mb__before_atomic() orders all earlier accesses against the RMW op 195 itself and all accesses following it, and smp_mb__after_atomic() orders all 226 a RELEASE because it orders preceding instructions against both the read
|
| /kernel/linux/linux-5.10/Documentation/userspace-api/media/v4l/ |
| D | pixfmt-bayer.rst | 15 orders. See also `the Wikipedia article on Bayer filter
|
| D | field-order.rst | 80 If multiple field orders are possible the 81 driver must choose one of the possible field orders during
|
| /kernel/linux/linux-6.6/Documentation/userspace-api/media/v4l/ |
| D | pixfmt-bayer.rst | 15 orders. See also `the Wikipedia article on Bayer filter
|
| D | field-order.rst | 80 If multiple field orders are possible the 81 driver must choose one of the possible field orders during
|
| /kernel/linux/linux-5.10/include/linux/ |
| D | refcount.h | 177 * and thereby orders future stores. See the comment on top. 213 * and thereby orders future stores. See the comment on top. 239 * and thereby orders future stores. See the comment on top.
|
| /kernel/linux/linux-6.6/include/linux/ |
| D | refcount.h | 177 * and thereby orders future stores. See the comment on top. 213 * and thereby orders future stores. See the comment on top. 239 * and thereby orders future stores. See the comment on top.
|
| /kernel/linux/linux-5.10/samples/bpf/ |
| D | tc_l2_redirect_user.c | 59 /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ in main()
|
| /kernel/linux/linux-6.6/samples/bpf/ |
| D | tc_l2_redirect_user.c | 59 /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ in main()
|
| /kernel/linux/linux-6.6/drivers/android/ |
| D | binder_alloc_selftest.c | 204 /* Generate BUFFER_NUM factorial free orders. */ 279 * then free them in all orders possible. Check that pages are
|
| /kernel/linux/linux-5.10/drivers/android/ |
| D | binder_alloc_selftest.c | 204 /* Generate BUFFER_NUM factorial free orders. */ 279 * then free them in all orders possible. Check that pages are
|