| /kernel/linux/linux-6.6/drivers/iommu/iommufd/ |
| D | pages.c | 69 * allocation can hold about 26M of 4k pages and 13G of 2M pages in an 163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument 167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned() 169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned() 172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument 176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned() 178 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned() 181 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument 189 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin() 195 * covers a portion of the first and last pages in the range. [all …]
|
| /kernel/linux/linux-6.6/Documentation/admin-guide/mm/ |
| D | hugetlbpage.rst | 2 HugeTLB Pages 28 persistent hugetlb pages in the kernel's huge page pool. It also displays 30 and surplus huge pages in the pool of huge pages of default size. 46 is the size of the pool of huge pages. 48 is the number of huge pages in the pool that are not yet 51 is short for "reserved," and is the number of huge pages for 53 but no allocation has yet been made. Reserved huge pages 55 huge page from the pool of huge pages at fault time. 57 is short for "surplus," and is the number of huge pages in 59 maximum number of surplus huge pages is controlled by [all …]
|
| D | zswap.rst | 8 Zswap is a lightweight compressed cache for swap pages. It takes pages that are 26 Zswap evicts pages from compressed cache on an LRU basis to the backing swap 40 When zswap is disabled at runtime it will stop storing pages that are 42 back into memory all of the pages stored in the compressed pool. The 43 pages stored in zswap will remain in the compressed pool until they are 45 pages out of the compressed pool, a swapoff on the swap device(s) will 46 fault back into memory all swapped out pages, including those in the 52 Zswap receives pages for compression from the swap subsystem and is able to 53 evict pages from its own compressed pool on an LRU basis and write them back to 60 pages are freed. The pool is not preallocated. By default, a zpool [all …]
|
| D | concepts.rst | 41 The physical system memory is divided into page frames, or pages. The 48 pages. These mappings are described by page tables that allow 53 addresses of actual pages used by the software. The tables at higher 54 levels contain physical addresses of the pages belonging to the lower 64 Huge Pages 75 Many modern CPU architectures allow mapping of the memory pages 77 it is possible to map 2M and even 1G pages using entries in the second 78 and the third level page tables. In Linux such pages are called 79 `huge`. Usage of huge pages significantly reduces pressure on TLB, 83 memory with the huge pages. The first one is `HugeTLB filesystem`, or [all …]
|
| /kernel/linux/linux-5.10/mm/ |
| D | percpu-vm.c | 22 * pcpu_get_pages - get temp pages array 29 * Pointer to temp pages array on success. 33 static struct page **pages; in pcpu_get_pages() local 34 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 38 if (!pages) in pcpu_get_pages() 39 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 40 return pages; in pcpu_get_pages() 44 * pcpu_free_pages - free pages which were allocated for @chunk 45 * @chunk: chunk pages were allocated for 46 * @pages: array of pages to be freed, indexed by pcpu_page_idx() [all …]
|
| D | balloon_compaction.c | 5 * Common interface for making balloon pages movable by compaction. 30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 33 * @pages: pages to enqueue - allocated using balloon_page_alloc. 35 * Driver must call this function to properly enqueue balloon pages before 38 * Return: number of pages that were enqueued. 41 struct list_head *pages) in balloon_page_list_enqueue() argument 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 59 * balloon_page_list_dequeue() - removes pages from balloon's page list and 60 * returns a list of the pages. 62 * @pages: pointer to the list of pages that would be returned to the caller. [all …]
|
| D | gup.c | 83 * So now that the head page is stable, recheck that the pages still in try_get_compound_head() 234 * Pages that were pinned via pin_user_pages*() must be released via either 236 * that such pages can be separately tracked and uniquely handled. In 246 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 247 * @pages: array of pages to be maybe marked dirty, and definitely released. 248 * @npages: number of pages in the @pages array. 249 * @make_dirty: whether to mark the pages dirty 254 * For each page in the @pages array, make that page (or its head page, if a 256 * listed as clean. In any case, releases all pages using unpin_user_page(), 267 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument [all …]
|
| /kernel/linux/linux-5.10/Documentation/admin-guide/mm/ |
| D | hugetlbpage.rst | 4 HugeTLB Pages 30 persistent hugetlb pages in the kernel's huge page pool. It also displays 32 and surplus huge pages in the pool of huge pages of default size. 48 is the size of the pool of huge pages. 50 is the number of huge pages in the pool that are not yet 53 is short for "reserved," and is the number of huge pages for 55 but no allocation has yet been made. Reserved huge pages 57 huge page from the pool of huge pages at fault time. 59 is short for "surplus," and is the number of huge pages in 61 maximum number of surplus huge pages is controlled by [all …]
|
| /kernel/linux/linux-6.6/mm/ |
| D | percpu-vm.c | 23 * pcpu_get_pages - get temp pages array 30 * Pointer to temp pages array on success. 34 static struct page **pages; in pcpu_get_pages() local 35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 39 if (!pages) in pcpu_get_pages() 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 41 return pages; in pcpu_get_pages() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 47 * @pages: array of pages to be freed, indexed by pcpu_page_idx() [all …]
|
| D | balloon_compaction.c | 5 * Common interface for making balloon pages movable by compaction. 30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 33 * @pages: pages to enqueue - allocated using balloon_page_alloc. 35 * Driver must call this function to properly enqueue balloon pages before 38 * Return: number of pages that were enqueued. 41 struct list_head *pages) in balloon_page_list_enqueue() argument 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 59 * balloon_page_list_dequeue() - removes pages from balloon's page list and 60 * returns a list of the pages. 62 * @pages: pointer to the list of pages that would be returned to the caller. [all …]
|
| D | gup.c | 33 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument 40 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages() 44 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages() 51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages() 52 struct page *page = *pages; in sanity_check_pinned_pages() 179 * Pages that were pinned via pin_user_pages*() must be released via either 181 * that such pages can be separately tracked and uniquely handled. In 249 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 250 * @pages: array of pages to be maybe marked dirty, and definitely released. 251 * @npages: number of pages in the @pages array. [all …]
|
| D | gup_test.c | 10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument 19 put_page(pages[i]); in put_back_pages() 25 unpin_user_pages(pages, nr_pages); in put_back_pages() 29 unpin_user_pages(pages, nr_pages); in put_back_pages() 32 put_page(pages[i]); in put_back_pages() 39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument 50 folio = page_folio(pages[i]); in verify_dma_pinned() 53 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned() 59 "pages[%lu] is NOT pinnable but pinned\n", in verify_dma_pinned() 69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument [all …]
|
| /kernel/linux/linux-5.10/net/ceph/ |
| D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 37 * allocate a vector new pages 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() [all …]
|
| /kernel/linux/linux-6.6/net/ceph/ |
| D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 37 * allocate a vector new pages 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector() [all …]
|
| /kernel/linux/linux-5.10/Documentation/vm/ |
| D | unevictable-lru.rst | 15 pages. 30 pages and to hide these pages from vmscan. This mechanism is based on a patch 36 main memory will have over 32 million 4k pages in a single zone. When a large 37 fraction of these pages are not evictable for any reason [see below], vmscan 39 of pages that are evictable. This can result in a situation where all CPUs are 43 The unevictable list addresses the following classes of unevictable pages: 51 The infrastructure may also be able to handle other conditions that make pages 66 The Unevictable LRU infrastructure maintains unevictable pages on an additional 69 (1) We get to "treat unevictable pages just like we treat other pages in the 74 (2) We want to be able to migrate unevictable pages between nodes for memory [all …]
|
| D | zswap.rst | 10 Zswap is a lightweight compressed cache for swap pages. It takes pages that are 34 Zswap evicts pages from compressed cache on an LRU basis to the backing swap 48 When zswap is disabled at runtime it will stop storing pages that are 50 back into memory all of the pages stored in the compressed pool. The 51 pages stored in zswap will remain in the compressed pool until they are 53 pages out of the compressed pool, a swapoff on the swap device(s) will 54 fault back into memory all swapped out pages, including those in the 60 Zswap receives pages for compression through the Frontswap API and is able to 61 evict pages from its own compressed pool on an LRU basis and write them back to 68 pages are freed. The pool is not preallocated. By default, a zpool [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
| D | ttm_page_alloc.c | 29 * - Pool collects resently freed pages for reuse 31 * - doesn't track currently in use pages 59 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. 65 * @list: Pool of free uc/wc pages for fast reuse. 67 * @npages: Number of pages in pool. 100 * @free_interval: minimum number of jiffies between freeing pages from pool. 103 * some pages to free. 104 * @small_allocation: Limit in number of pages what is small allocation. 164 /* Convert kb to number of pages */ in ttm_pool_store() 246 /* set memory back to wb and free the pages. */ [all …]
|
| /kernel/linux/linux-5.10/include/xen/ |
| D | xen-ops.h | 67 unsigned int domid, bool no_translate, struct page **pages); 72 bool no_translate, struct page **pages) in xen_remap_pfn() argument 87 struct page **pages); 89 int nr, struct page **pages); 100 struct page **pages) in xen_xlate_remap_gfn_array() argument 106 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument 117 * @vma: VMA to map the pages into 118 * @addr: Address at which to map the pages 123 * @domid: Domain owning the pages 124 * @pages: Array of pages if this domain has an auto-translated physmap [all …]
|
| /kernel/linux/linux-5.10/fs/isofs/ |
| D | compress.c | 37 * to one zisofs block. Store the data in the @pages array with @pcount 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument 68 if (!pages[i]) in zisofs_uncompress_block() 70 memset(page_address(pages[i]), 0, PAGE_SIZE); in zisofs_uncompress_block() 71 flush_dcache_page(pages[i]); in zisofs_uncompress_block() 72 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 122 if (pages[curpage]) { in zisofs_uncompress_block() 123 stream.next_out = page_address(pages[curpage]) in zisofs_uncompress_block() 175 if (pages[curpage]) { in zisofs_uncompress_block() 176 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/selftests/ |
| D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 42 if (!pages) in huge_get_pages() 45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 46 kfree(pages); in huge_get_pages() 50 sg = pages->sgl; in huge_get_pages() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
| D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() argument 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 37 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 38 if (!pages) in huge_get_pages() 41 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 42 kfree(pages); in huge_get_pages() 46 sg = pages->sgl; in huge_get_pages() [all …]
|
| /kernel/linux/linux-6.6/Documentation/mm/ |
| D | unevictable-lru.rst | 34 main memory will have over 32 million 4k pages in a single node. When a large 35 fraction of these pages are not evictable for any reason [see below], vmscan 37 of pages that are evictable. This can result in a situation where all CPUs are 41 The unevictable list addresses the following classes of unevictable pages: 51 The infrastructure may also be able to handle other conditions that make pages 83 lists (or "Movable" pages: outside of consideration here). If we were to 104 lru_list enum element). The memory controller tracks the movement of pages to 108 not attempt to reclaim pages on the unevictable list. This has a couple of 111 (1) Because the pages are "hidden" from reclaim on the unevictable list, the 112 reclaim process can be more efficient, dealing only with pages that have a [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/vkms/ |
| D | vkms_gem.c | 37 WARN_ON(gem->pages); in vkms_gem_free_object() 61 if (obj->pages) { in vkms_gem_fault() 62 get_page(obj->pages[page_offset]); in vkms_gem_fault() 63 vmf->page = obj->pages[page_offset]; in vkms_gem_fault() 155 if (!vkms_obj->pages) { in _get_pages() 156 struct page **pages = drm_gem_get_pages(gem_obj); in _get_pages() local 158 if (IS_ERR(pages)) in _get_pages() 159 return pages; in _get_pages() 161 if (cmpxchg(&vkms_obj->pages, NULL, pages)) in _get_pages() 162 drm_gem_put_pages(gem_obj, pages, false, true); in _get_pages() [all …]
|
| /kernel/linux/linux-6.6/fs/isofs/ |
| D | compress.c | 37 * to one zisofs block. Store the data in the @pages array with @pcount 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument 68 if (!pages[i]) in zisofs_uncompress_block() 70 memzero_page(pages[i], 0, PAGE_SIZE); in zisofs_uncompress_block() 71 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 121 if (pages[curpage]) { in zisofs_uncompress_block() 122 stream.next_out = kmap_local_page(pages[curpage]) in zisofs_uncompress_block() 174 if (pages[curpage]) { in zisofs_uncompress_block() 175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block() 176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/xen/ |
| D | xen_drm_front_gem.c | 30 struct page **pages; member 49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 51 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array() 56 kvfree(xen_obj->pages); in gem_free_pages_array() 57 xen_obj->pages = NULL; in gem_free_pages_array() 93 * only allocate array of pointers to pages in gem_create() 100 * allocate ballooned pages which will be used to map in gem_create() 104 xen_obj->pages); in gem_create() 106 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", in gem_create() 116 * need to allocate backing pages now, so we can share those in gem_create() [all …]
|