/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/ |
D | nv50.c | 37 struct list_head lru; member 52 struct list_head lru; member 142 eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru); in nv50_instobj_kmap() 148 list_del_init(&eobj->lru); in nv50_instobj_kmap() 204 if (likely(iobj->lru.next) && iobj->map) { in nv50_instobj_release() 205 BUG_ON(!list_empty(&iobj->lru)); in nv50_instobj_release() 206 list_add_tail(&iobj->lru, &imem->lru); in nv50_instobj_release() 245 if (likely(iobj->lru.next)) in nv50_instobj_acquire() 246 list_del_init(&iobj->lru); in nv50_instobj_acquire() 269 if (likely(iobj->lru.next)) { in nv50_instobj_boot() [all …]
|
/drivers/xen/ |
D | balloon.c | 170 list_add_tail(&page->lru, &ballooned_pages); in balloon_append() 173 list_add(&page->lru, &ballooned_pages); in balloon_append() 187 page = list_entry(ballooned_pages.next, struct page, lru); in balloon_retrieve() 190 list_del(&page->lru); in balloon_retrieve() 203 struct list_head *next = page->lru.next; in balloon_next_page() 206 return list_entry(next, struct page, lru); in balloon_next_page() 411 page = list_first_entry_or_null(&ballooned_pages, struct page, lru); in increase_reservation() 461 list_add(&page->lru, &pages); in decrease_reservation() 478 list_for_each_entry_safe(page, tmp, &pages, lru) { in decrease_reservation() 483 list_del(&page->lru); in decrease_reservation()
|
D | privcmd.c | 94 list_for_each_entry_safe(p, n, pages, lru) in free_page_list() 129 list_add_tail(&page->lru, pagelist); in gather_array() 169 page = list_entry(pos, struct page, lru); in traverse_pages() 203 page = list_entry(pos, struct page, lru); in traverse_pages_block() 282 struct page, lru); in privcmd_ioctl_mmap()
|
D | grant-table.c | 864 page = list_first_entry(&cache->pages, struct page, lru); in cache_deq() 865 list_del(&page->lru); in cache_deq() 872 list_add(&page->lru, &cache->pages); in cache_enq()
|
/drivers/md/ |
D | dm-writecache.c | 84 struct list_head lru; member 114 struct list_head lru; member 670 list_add(&ins->lru, &wc->lru); in writecache_insert_entry() 676 list_del(&e->lru); in writecache_unlink() 696 list_add_tail(&e->lru, &wc->freelist); in writecache_add_to_freelist() 736 e = container_of(wc->freelist.next, struct wc_entry, lru); in writecache_pop_from_freelist() 739 list_del(&e->lru); in writecache_pop_from_freelist() 775 wc->lru.next = LIST_POISON1; in writecache_poison_lists() 776 wc->lru.prev = LIST_POISON2; in writecache_poison_lists() 801 if (list_empty(&wc->lru)) in writecache_flush() [all …]
|
D | dm-bufio.c | 87 struct list_head lru[LIST_SIZE]; member 512 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer() 549 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru() 808 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer() 820 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer() 958 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async() 1315 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers() 1623 list_for_each_entry(b, &c->lru[i], lru_list) { in drop_buffers() 1641 BUG_ON(!list_empty(&c->lru[i])); in drop_buffers() 1693 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan() [all …]
|
D | raid5-cache.c | 1366 BUG_ON(list_empty(&sh->lru)); in r5c_flush_stripe() 1377 list_del_init(&sh->lru); in r5c_flush_stripe() 1407 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { in r5c_flush_cache() 1415 &conf->r5c_partial_stripe_list, lru) { in r5c_flush_cache() 1479 if (!list_empty(&sh->lru) && in r5c_do_reclaim() 1948 list_for_each_entry(sh, list, lru) in r5c_recovery_lookup_stripe() 1960 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { in r5c_recovery_drop_stripes() 1962 list_del_init(&sh->lru); in r5c_recovery_drop_stripes() 1973 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) in r5c_recovery_replay_stripes() 1976 list_del_init(&sh->lru); in r5c_recovery_replay_stripes() [all …]
|
D | raid5.c | 179 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread() 183 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread() 185 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread() 219 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe() 244 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe() 247 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe() 253 list_add_tail(&sh->lru, in do_release_stripe() 256 list_add_tail(&sh->lru, in do_release_stripe() 273 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe() 277 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe() [all …]
|
D | raid5.h | 201 struct list_head lru; /* inactive_list or handle_list */ member
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo.c | 76 list_del_init(&bo->lru); in ttm_bo_del_from_lru() 109 list_move_tail(&bo->lru, &man->lru[bo->priority]); in ttm_bo_move_to_lru_tail() 143 list_bulk_move_tail(&man->lru[i], &pos->first->lru, in ttm_bo_bulk_move_lru_tail() 144 &pos->last->lru); in ttm_bo_bulk_move_lru_tail() 158 list_bulk_move_tail(&man->lru[i], &pos->first->lru, in ttm_bo_bulk_move_lru_tail() 159 &pos->last->lru); in ttm_bo_bulk_move_lru_tail() 687 list_for_each_entry(bo, &man->lru[i], lru) { in ttm_mem_evict_first() 707 if (&bo->lru != &man->lru[i]) in ttm_mem_evict_first() 1025 INIT_LIST_HEAD(&bo->lru); in ttm_bo_init_reserved()
|
D | ttm_device.c | 157 list_for_each_entry(bo, &man->lru[j], lru) { in ttm_device_swapout() 252 if (list_empty(&man->lru[0])) in ttm_device_fini()
|
D | ttm_pool.c | 235 list_add(&p->lru, &pt->pages); in ttm_pool_type_give() 246 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); in ttm_pool_type_take() 249 list_del(&p->lru); in ttm_pool_type_take() 622 list_for_each_entry(p, &pt->pages, lru) in ttm_pool_type_count()
|
D | ttm_resource.c | 87 INIT_LIST_HEAD(&man->lru[i]); in ttm_resource_manager_init() 119 while (!list_empty(&man->lru[i])) { in ttm_resource_manager_evict_all()
|
/drivers/misc/ |
D | vmw_balloon.c | 676 struct page, lru); in vmballoon_alloc_page_list() 677 list_del(&page->lru); in vmballoon_alloc_page_list() 691 list_add(&page->lru, &ctl->pages); in vmballoon_alloc_page_list() 869 list_for_each_entry(page, &ctl->pages, lru) in vmballoon_lock() 901 list_move(&page->lru, &ctl->refused_pages); in vmballoon_lock() 926 list_for_each_entry_safe(page, tmp, page_list, lru) { in vmballoon_release_page_list() 927 list_del(&page->lru); in vmballoon_release_page_list() 1013 list_for_each_entry(page, pages, lru) { in vmballoon_enqueue_page_list() 1058 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { in vmballoon_dequeue_page_list() 1061 list_move(&page->lru, pages); in vmballoon_dequeue_page_list() [all …]
|
/drivers/android/ |
D | binder_alloc.c | 238 on_lru = list_lru_del(&binder_alloc_lru, &page->lru); in binder_update_page_range() 258 INIT_LIST_HEAD(&page->lru); in binder_update_page_range() 289 ret = list_lru_add(&binder_alloc_lru, &page->lru); in binder_update_page_range() 853 &alloc->pages[i].lru); in binder_alloc_deferred_release() 914 int lru = 0; in binder_alloc_print_pages() local 927 else if (list_empty(&page->lru)) in binder_alloc_print_pages() 930 lru++; in binder_alloc_print_pages() 934 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); in binder_alloc_print_pages() 980 struct list_lru_one *lru, in binder_alloc_free_page() argument 988 lru); in binder_alloc_free_page() [all …]
|
D | binder_alloc_selftest.c | 105 !list_empty(&alloc->pages[page_index].lru)) { in check_buffer_pages_allocated() 147 if (list_empty(&alloc->pages[i].lru)) { in binder_selftest_free_buf() 169 list_empty(&alloc->pages[i].lru) ? in binder_selftest_free_page()
|
D | binder_alloc.h | 70 struct list_head lru; member 124 struct list_lru_one *lru,
|
/drivers/dma-buf/heaps/ |
D | page_pool.c | 53 list_add_tail(&page->lru, &pool->items[index]); in dmabuf_page_pool_add() 67 page = list_first_entry_or_null(&pool->items[index], struct page, lru); in dmabuf_page_pool_remove() 70 list_del(&page->lru); in dmabuf_page_pool_remove()
|
D | system_heap.c | 428 list_add_tail(&page->lru, &pages); in system_heap_do_allocate() 439 list_for_each_entry_safe(page, tmp_page, &pages, lru) { in system_heap_do_allocate() 442 list_del(&page->lru); in system_heap_do_allocate() 478 list_for_each_entry_safe(page, tmp_page, &pages, lru) in system_heap_do_allocate()
|
/drivers/staging/android/ |
D | ashmem.c | 67 struct list_head lru; member 158 list_add_tail(&range->lru, &ashmem_lru_list); in lru_add() 171 list_del(&range->lru); in lru_del() 491 list_first_entry(&ashmem_lru_list, typeof(*range), lru); in ashmem_shrink_scan()
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_validation.c | 132 list_add_tail(&page->lru, &ctx->page_list); in vmw_validation_mem_alloc() 155 list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { in vmw_validation_mem_free() 156 list_del_init(&entry->lru); in vmw_validation_mem_free()
|
/drivers/block/ |
D | xen-blkfront.c | 418 struct page, lru); in get_indirect_grant() 419 list_del(&indirect_page->lru); in get_indirect_grant() 1226 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkif_free_ring() 1227 list_del(&indirect_page->lru); in blkif_free_ring() 1514 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkif_completion() 2229 list_add(&indirect_page->lru, &rinfo->indirect_pages); in blkfront_setup_indirect() 2269 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { in blkfront_setup_indirect() 2270 list_del(&indirect_page->lru); in blkfront_setup_indirect()
|
/drivers/gpu/drm/i915/ |
D | TODO.txt | 18 the bo and vm, plus some lru locks is all that needed. No complex rcu,
|
/drivers/virtio/ |
D | virtio_balloon.c | 266 list_for_each_entry_safe(page, next, pages, lru) { in release_pages_balloon() 270 list_del(&page->lru); in release_pages_balloon() 294 list_add(&page->lru, &pages); in leak_balloon()
|
/drivers/md/bcache/ |
D | bcache.h | 286 struct list_head lru; member
|