Searched refs:new_pages (Results 1 – 7 of 7) sorted by relevance
/kernel/linux/linux-5.10/drivers/block/drbd/ |
D | drbd_bitmap.c | 381 struct page **new_pages, *page; in bm_realloc_pages() local 397 new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN); in bm_realloc_pages() 398 if (!new_pages) { in bm_realloc_pages() 399 new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO); in bm_realloc_pages() 400 if (!new_pages) in bm_realloc_pages() 406 new_pages[i] = old_pages[i]; in bm_realloc_pages() 410 bm_free_pages(new_pages + have, i - have); in bm_realloc_pages() 411 bm_vk_free(new_pages); in bm_realloc_pages() 417 new_pages[i] = page; in bm_realloc_pages() 421 new_pages[i] = old_pages[i]; in bm_realloc_pages() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 592 struct list_head new_pages; in ttm_page_pool_fill_locked() local 601 INIT_LIST_HEAD(&new_pages); in ttm_page_pool_fill_locked() 602 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, in ttm_page_pool_fill_locked() 607 list_splice(&new_pages, &pool->list); in ttm_page_pool_fill_locked() 613 list_for_each_entry(p, &new_pages, lru) { in ttm_page_pool_fill_locked() 616 list_splice(&new_pages, &pool->list); in ttm_page_pool_fill_locked()
|
/kernel/linux/linux-5.10/drivers/base/firmware_loader/ |
D | main.c | 323 struct page **new_pages; in fw_grow_paged_buf() local 325 new_pages = kvmalloc_array(new_array_size, sizeof(void *), in fw_grow_paged_buf() 327 if (!new_pages) in fw_grow_paged_buf() 329 memcpy(new_pages, fw_priv->pages, in fw_grow_paged_buf() 331 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * in fw_grow_paged_buf() 334 fw_priv->pages = new_pages; in fw_grow_paged_buf()
|
/kernel/linux/linux-5.10/drivers/virtio/ |
D | virtio_mem.c | 263 int new_pages = PFN_UP(new_bytes); in virtio_mem_mb_state_prepare_next_mb() local 266 if (vm->mb_state && old_pages == new_pages) in virtio_mem_mb_state_prepare_next_mb() 269 new_mb_state = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_mb_state_prepare_next_mb() 375 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long)); in virtio_mem_sb_bitmap_prepare_next_mb() local 378 if (vm->sb_bitmap && old_pages == new_pages) in virtio_mem_sb_bitmap_prepare_next_mb() 381 new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE); in virtio_mem_sb_bitmap_prepare_next_mb()
|
/kernel/linux/linux-5.10/arch/s390/kernel/ |
D | debug.c | 1263 int rc, new_pages; in debug_input_pages_fn() local 1277 new_pages = debug_get_uint(str); in debug_input_pages_fn() 1278 if (new_pages < 0) { in debug_input_pages_fn() 1282 rc = debug_set_size(id, id->nr_areas, new_pages); in debug_input_pages_fn()
|
/kernel/linux/linux-5.10/kernel/trace/ |
D | ring_buffer.c | 529 struct list_head new_pages; /* new pages to add */ member 1569 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer() 1855 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages() 1920 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages() 2020 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize() 2022 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize() 2085 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize() 2088 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize() 2142 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize() 2145 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize() [all …]
|
/kernel/linux/linux-5.10/fs/ |
D | io_uring.c | 8009 unsigned long page_limit, cur_pages, new_pages; in __io_account_mem() local 8016 new_pages = cur_pages + nr_pages; in __io_account_mem() 8017 if (new_pages > page_limit) in __io_account_mem() 8020 new_pages) != cur_pages); in __io_account_mem()
|