/fs/isofs/ |
D | compress.c | 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument 68 if (!pages[i]) in zisofs_uncompress_block() 70 memset(page_address(pages[i]), 0, PAGE_SIZE); in zisofs_uncompress_block() 71 flush_dcache_page(pages[i]); in zisofs_uncompress_block() 72 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 122 if (pages[curpage]) { in zisofs_uncompress_block() 123 stream.next_out = page_address(pages[curpage]) in zisofs_uncompress_block() 175 if (pages[curpage]) { in zisofs_uncompress_block() 176 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block() 177 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block() [all …]
|
/fs/ramfs/ |
D | file-nommu.c | 65 struct page *pages; in ramfs_nommu_expand_for_mapping() local 84 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping() 85 if (!pages) in ramfs_nommu_expand_for_mapping() 92 split_page(pages, order); in ramfs_nommu_expand_for_mapping() 96 __free_page(pages + loop); in ramfs_nommu_expand_for_mapping() 100 data = page_address(pages); in ramfs_nommu_expand_for_mapping() 105 struct page *page = pages + loop; in ramfs_nommu_expand_for_mapping() 124 __free_page(pages + loop++); in ramfs_nommu_expand_for_mapping() 207 struct page **pages = NULL, **ptr, *page; in ramfs_nommu_get_unmapped_area() local 223 pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); in ramfs_nommu_get_unmapped_area() [all …]
|
/fs/squashfs/ |
D | page_actor.c | 30 if (actor->next_page == actor->pages) in cache_next_page() 42 int pages, int length) in squashfs_page_actor_init() argument 49 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init() 51 actor->pages = pages; in squashfs_page_actor_init() 71 return actor->pageaddr = actor->next_page == actor->pages ? NULL : in direct_next_page() 82 int pages, int length) in squashfs_page_actor_init_special() argument 89 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init_special() 91 actor->pages = pages; in squashfs_page_actor_init_special()
|
D | file_direct.c | 22 int pages, struct page **page, int bytes); 36 int i, n, pages, missing_pages, bytes, res = -ENOMEM; in squashfs_readpage_block() local 44 pages = end_index - start_index + 1; in squashfs_readpage_block() 46 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL); in squashfs_readpage_block() 54 actor = squashfs_page_actor_init_special(page, pages, 0); in squashfs_readpage_block() 59 for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) { in squashfs_readpage_block() 84 res = squashfs_read_cache(target_page, block, bsize, pages, in squashfs_readpage_block() 105 pageaddr = kmap_atomic(page[pages - 1]); in squashfs_readpage_block() 111 for (i = 0; i < pages; i++) { in squashfs_readpage_block() 128 for (i = 0; i < pages; i++) { in squashfs_readpage_block() [all …]
|
D | page_actor.h | 12 int pages; member 18 int pages, int length) in squashfs_page_actor_init() argument 25 actor->length = length ? : pages * PAGE_SIZE; in squashfs_page_actor_init() 27 actor->pages = pages; in squashfs_page_actor_init() 40 return actor->next_page == actor->pages ? NULL : in squashfs_next_page() 58 int pages; member
|
D | cache.c | 206 for (j = 0; j < cache->pages; j++) in squashfs_cache_delete() 245 cache->pages = block_size >> PAGE_SHIFT; in squashfs_cache_init() 246 cache->pages = cache->pages ? cache->pages : 1; in squashfs_cache_init() 258 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); in squashfs_cache_init() 264 for (j = 0; j < cache->pages; j++) { in squashfs_cache_init() 273 cache->pages, 0); in squashfs_cache_init() 408 int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; in squashfs_read_table() local 417 data = kcalloc(pages, sizeof(void *), GFP_KERNEL); in squashfs_read_table() 423 actor = squashfs_page_actor_init(data, pages, length); in squashfs_read_table() 429 for (i = 0; i < pages; i++, buffer += PAGE_SIZE) in squashfs_read_table()
|
/fs/erofs/ |
D | zpvec.h | 32 erofs_vtptr_t *pages; member 44 kunmap_atomic(ctor->pages); in z_erofs_pagevec_ctor_exit() 60 const erofs_vtptr_t t = ctor->pages[index]; in z_erofs_pagevec_ctor_next_page() 80 ctor->pages = atomic ? in z_erofs_pagevec_ctor_pagedown() 89 erofs_vtptr_t *pages, in z_erofs_pagevec_ctor_init() argument 94 ctor->pages = pages; in z_erofs_pagevec_ctor_init() 130 ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); in z_erofs_pagevec_enqueue() 145 t = ctor->pages[ctor->index]; in z_erofs_pagevec_dequeue() 153 ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); in z_erofs_pagevec_dequeue()
|
D | zdata.c | 171 struct page **pages = clt->compressedpages; in preload_compressed_pages() local 172 pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); in preload_compressed_pages() 178 for (; pages < pcl->compressed_pages + clusterpages; ++pages) { in preload_compressed_pages() 183 if (READ_ONCE(*pages)) in preload_compressed_pages() 194 clt->compressedpages = pages; in preload_compressed_pages() 199 if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) in preload_compressed_pages() 764 struct page **pages, **compressed_pages, *page; in z_erofs_decompress_pcluster() local 779 pages = pages_onstack; in z_erofs_decompress_pcluster() 782 pages = z_pagemap_global; in z_erofs_decompress_pcluster() 789 pages = kvmalloc_array(nr_pages, sizeof(struct page *), in z_erofs_decompress_pcluster() [all …]
|
/fs/proc/ |
D | meminfo.c | 39 unsigned long pages[NR_LRU_LISTS]; in meminfo_proc_show() local 53 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); in meminfo_proc_show() 65 show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + in meminfo_proc_show() 66 pages[LRU_ACTIVE_FILE]); in meminfo_proc_show() 67 show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + in meminfo_proc_show() 68 pages[LRU_INACTIVE_FILE]); in meminfo_proc_show() 69 show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); in meminfo_proc_show() 70 show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); in meminfo_proc_show() 71 show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); in meminfo_proc_show() 72 show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); in meminfo_proc_show() [all …]
|
/fs/ntfs/ |
D | compress.c | 502 struct page **pages; in ntfs_read_compressed_block() local 515 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS); in ntfs_read_compressed_block() 522 if (unlikely(!pages || !bhs || !completed_pages)) { in ntfs_read_compressed_block() 524 kfree(pages); in ntfs_read_compressed_block() 537 pages[xpage] = page; in ntfs_read_compressed_block() 551 kfree(pages); in ntfs_read_compressed_block() 563 pages[i] = grab_cache_page_nowait(mapping, offset); in ntfs_read_compressed_block() 564 page = pages[i]; in ntfs_read_compressed_block() 579 pages[i] = NULL; in ntfs_read_compressed_block() 738 page = pages[cur_page]; in ntfs_read_compressed_block() [all …]
|
D | file.c | 495 pgoff_t index, const unsigned nr_pages, struct page **pages, in __ntfs_grab_cache_pages() argument 503 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | in __ntfs_grab_cache_pages() 505 if (!pages[nr]) { in __ntfs_grab_cache_pages() 521 pages[nr] = *cached_page; in __ntfs_grab_cache_pages() 531 unlock_page(pages[--nr]); in __ntfs_grab_cache_pages() 532 put_page(pages[nr]); in __ntfs_grab_cache_pages() 570 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, in ntfs_prepare_pages_for_non_resident_write() argument 600 BUG_ON(!pages); in ntfs_prepare_pages_for_non_resident_write() 601 BUG_ON(!*pages); in ntfs_prepare_pages_for_non_resident_write() 602 vi = pages[0]->mapping->host; in ntfs_prepare_pages_for_non_resident_write() [all …]
|
/fs/afs/ |
D | file.c | 26 struct list_head *pages, unsigned nr_pages); 194 if (req->pages) { in afs_put_read() 196 if (req->pages[i]) in afs_put_read() 197 put_page(req->pages[i]); in afs_put_read() 198 if (req->pages != req->array) in afs_put_read() 199 kfree(req->pages); in afs_put_read() 327 req->pages = req->array; in afs_page_filler() 328 req->pages[0] = page; in afs_page_filler() 419 struct page *page = req->pages[req->index]; in afs_readpages_page_done() 421 req->pages[req->index] = NULL; in afs_readpages_page_done() [all …]
|
D | write.c | 55 req->pages = req->array; in afs_fill_page() 56 req->pages[0] = page; in afs_fill_page() 250 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); in afs_kill_pages() 254 struct page *page = pv.pages[loop]; in afs_kill_pages() 293 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); in afs_redirty_pages() 297 struct page *page = pv.pages[loop]; in afs_redirty_pages() 333 first, count, pv.pages); in afs_pages_written_back() 337 priv = page_private(pv.pages[loop]); in afs_pages_written_back() 339 pv.pages[loop]->index, priv); in afs_pages_written_back() 340 set_page_private(pv.pages[loop], 0); in afs_pages_written_back() [all …]
|
/fs/ceph/ |
D | addr.c | 279 struct page *page = osd_data->pages[i]; in finish_read() 300 kfree(osd_data->pages); in finish_read() 319 struct page **pages; in start_read() local 377 pages = kmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); in start_read() 378 if (!pages) { in start_read() 403 pages[i] = page; in start_read() 405 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); in start_read() 424 ceph_fscache_readpage_cancel(inode, pages[i]); in start_read() 425 unlock_page(pages[i]); in start_read() 427 ceph_put_page_vector(pages, nr_pages, false); in start_read() [all …]
|
D | cache.h | 30 struct list_head *pages, 69 struct list_head *pages) in ceph_fscache_readpages_cancel() argument 72 return fscache_readpages_cancel(ci->fscache, pages); in ceph_fscache_readpages_cancel() 122 struct page *pages) in ceph_fscache_uncache_page() argument 134 struct list_head *pages, in ceph_readpages_from_fscache() argument 165 struct list_head *pages) in ceph_fscache_readpages_cancel() argument
|
/fs/cramfs/ |
D | inode.c | 186 struct page *pages[BLKS_PER_BUF]; in cramfs_blkdev_read() local 224 pages[i] = page; in cramfs_blkdev_read() 228 struct page *page = pages[i]; in cramfs_blkdev_read() 235 pages[i] = NULL; in cramfs_blkdev_read() 247 struct page *page = pages[i]; in cramfs_blkdev_read() 297 static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages) in cramfs_get_block_range() argument 318 pgoff+i, pgoff + *pages - 1, in cramfs_get_block_range() 324 } while (++i < *pages); in cramfs_get_block_range() 326 *pages = i; in cramfs_get_block_range() 359 unsigned int pages, max_pages, offset; in cramfs_physmem_mmap() local [all …]
|
/fs/nfs/ |
D | nfs3acl.c | 50 struct page *pages[NFSACL_MAXPAGES] = { }; in nfs3_get_acl() local 54 .pages = pages, in nfs3_get_acl() 100 for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) in nfs3_get_acl() 101 __free_page(args.pages[count]); in nfs3_get_acl() 163 struct page *pages[NFSACL_MAXPAGES]; in __nfs3_proc_setacls() local 168 .pages = pages, in __nfs3_proc_setacls() 202 args.pages[args.npages] = alloc_page(GFP_KERNEL); in __nfs3_proc_setacls() 203 if (args.pages[args.npages] == NULL) in __nfs3_proc_setacls() 239 __free_page(args.pages[args.npages]); in __nfs3_proc_setacls()
|
D | pnfs_dev.c | 101 struct page **pages = NULL; in nfs4_get_device_info() local 122 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); in nfs4_get_device_info() 123 if (!pages) in nfs4_get_device_info() 127 pages[i] = alloc_page(gfp_flags); in nfs4_get_device_info() 128 if (!pages[i]) in nfs4_get_device_info() 134 pdev->pages = pages; in nfs4_get_device_info() 156 __free_page(pages[i]); in nfs4_get_device_info() 157 kfree(pages); in nfs4_get_device_info()
|
D | pnfs_nfs.c | 34 struct nfs_page *first = nfs_list_entry(data->pages.next); in pnfs_generic_prepare_to_resend_writes() 166 LIST_HEAD(pages); in pnfs_generic_retry_commit() 178 list_splice_init(&bucket->committing, &pages); in pnfs_generic_retry_commit() 180 nfs_retry_commit(&pages, freeme, cinfo, i); in pnfs_generic_retry_commit() 206 list_add(&data->pages, list); in pnfs_generic_alloc_ds_commits() 216 void pnfs_fetch_commit_bucket_list(struct list_head *pages, in pnfs_fetch_commit_bucket_list() argument 227 list_splice_init(&bucket->committing, pages); in pnfs_fetch_commit_bucket_list() 241 pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages, in pnfs_generic_commit_cancel_empty_pagelist() argument 245 if (list_empty(pages)) { in pnfs_generic_commit_cancel_empty_pagelist() 273 list_add(&data->pages, &list); in pnfs_generic_commit_pagelist() [all …]
|
/fs/orangefs/ |
D | inode.c | 90 struct page **pages; member 97 struct inode *inode = ow->pages[0]->mapping->host; in orangefs_writepages_work() 108 set_page_writeback(ow->pages[i]); in orangefs_writepages_work() 109 ow->bv[i].bv_page = ow->pages[i]; in orangefs_writepages_work() 110 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE, in orangefs_writepages_work() 112 max(ow->off, page_offset(ow->pages[i])); in orangefs_writepages_work() 115 page_offset(ow->pages[i]); in orangefs_writepages_work() 132 SetPageError(ow->pages[i]); in orangefs_writepages_work() 133 mapping_set_error(ow->pages[i]->mapping, ret); in orangefs_writepages_work() 134 if (PagePrivate(ow->pages[i])) { in orangefs_writepages_work() [all …]
|
/fs/cifs/ |
D | fscache.h | 94 struct list_head *pages, in cifs_readpages_from_fscache() argument 98 return __cifs_readpages_from_fscache(inode, mapping, pages, in cifs_readpages_from_fscache() 111 struct list_head *pages) in cifs_fscache_readpages_cancel() argument 114 return __cifs_fscache_readpages_cancel(inode, pages); in cifs_fscache_readpages_cancel() 148 struct list_head *pages, in cifs_readpages_from_fscache() argument 158 struct list_head *pages) in cifs_fscache_readpages_cancel() argument
|
/fs/fuse/ |
D | file.c | 25 struct page **pages; in fuse_pages_alloc() local 27 pages = kzalloc(npages * (sizeof(struct page *) + in fuse_pages_alloc() 29 *desc = (void *) (pages + npages); in fuse_pages_alloc() 31 return pages; in fuse_pages_alloc() 578 set_page_dirty_lock(ap->pages[i]); in fuse_release_user_pages() 579 put_page(ap->pages[i]); in fuse_release_user_pages() 657 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, in fuse_io_alloc() 659 if (!ia->ap.pages) { in fuse_io_alloc() 669 kfree(ia->ap.pages); in fuse_io_free() 772 zero_user_segment(ap->pages[i], off, PAGE_SIZE); in fuse_short_read() [all …]
|
/fs/9p/ |
D | cache.h | 36 struct list_head *pages, 61 struct list_head *pages, in v9fs_readpages_from_fscache() argument 64 return __v9fs_readpages_from_fscache(inode, mapping, pages, in v9fs_readpages_from_fscache() 117 struct list_head *pages, in v9fs_readpages_from_fscache() argument
|
/fs/btrfs/ |
D | extent_io.c | 1871 struct page *pages[16]; in __process_pages_contig() local 1887 nr_pages, ARRAY_SIZE(pages)), pages); in __process_pages_contig() 1900 SetPagePrivate2(pages[i]); in __process_pages_contig() 1902 if (locked_page && pages[i] == locked_page) { in __process_pages_contig() 1903 put_page(pages[i]); in __process_pages_contig() 1908 clear_page_dirty_for_io(pages[i]); in __process_pages_contig() 1910 set_page_writeback(pages[i]); in __process_pages_contig() 1912 SetPageError(pages[i]); in __process_pages_contig() 1914 end_page_writeback(pages[i]); in __process_pages_contig() 1916 unlock_page(pages[i]); in __process_pages_contig() [all …]
|
/fs/btrfs/tests/ |
D | extent-io-tests.c | 23 struct page *pages[16]; in process_page_range() local 34 ARRAY_SIZE(pages)), pages); in process_page_range() 37 !PageLocked(pages[i])) in process_page_range() 39 if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) in process_page_range() 40 unlock_page(pages[i]); in process_page_range() 41 put_page(pages[i]); in process_page_range() 43 put_page(pages[i]); in process_page_range()
|