/drivers/staging/lustre/lustre/llite/ |
D | vvp_page.c | 57 struct page *vmpage = cp->cpg_page; in vvp_page_fini_common() local 59 LASSERT(vmpage != NULL); in vvp_page_fini_common() 60 page_cache_release(vmpage); in vvp_page_fini_common() 67 struct page *vmpage = cp->cpg_page; in vvp_page_fini() local 73 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); in vvp_page_fini() 82 struct page *vmpage = vpg->cpg_page; in vvp_page_own() local 84 LASSERT(vmpage != NULL); in vvp_page_own() 86 if (!trylock_page(vmpage)) in vvp_page_own() 89 if (unlikely(PageWriteback(vmpage))) { in vvp_page_own() 90 unlock_page(vmpage); in vvp_page_own() [all …]
|
D | rw26.c | 72 static void ll_invalidatepage(struct page *vmpage, unsigned int offset, in ll_invalidatepage() argument 82 LASSERT(PageLocked(vmpage)); in ll_invalidatepage() 83 LASSERT(!PageWriteback(vmpage)); in ll_invalidatepage() 93 inode = vmpage->mapping->host; in ll_invalidatepage() 96 page = cl_vmpage_page(vmpage, obj); in ll_invalidatepage() 99 "delete", vmpage); in ll_invalidatepage() 102 "delete", vmpage); in ll_invalidatepage() 106 LASSERT(vmpage->private == 0); in ll_invalidatepage() 117 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) in ll_releasepage() argument 126 LASSERT(PageLocked(vmpage)); in ll_releasepage() [all …]
|
D | llite_mmap.c | 169 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, in ll_page_mkwrite0() argument 181 LASSERT(vmpage != NULL); in ll_page_mkwrite0() 183 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); in ll_page_mkwrite0() 198 vio->u.fault.ft_vmpage = vmpage; in ll_page_mkwrite0() 219 lock_page(vmpage); in ll_page_mkwrite0() 220 if (vmpage->mapping == NULL) { in ll_page_mkwrite0() 221 unlock_page(vmpage); in ll_page_mkwrite0() 228 } else if (!PageDirty(vmpage)) { in ll_page_mkwrite0() 235 unlock_page(vmpage); in ll_page_mkwrite0() 238 vmpage, vmpage->index); in ll_page_mkwrite0() [all …]
|
D | rw.c | 88 struct page *vmpage, int create) in ll_cl_init() argument 99 clob = ll_i2info(vmpage->mapping->host)->lli_clob; in ll_cl_init() 115 struct inode *inode = vmpage->mapping->host; in ll_cl_init() 148 pos = vmpage->index << PAGE_CACHE_SHIFT; in ll_cl_init() 174 page = cl_page_find(env, clob, vmpage->index, vmpage, in ll_cl_init() 189 vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result, in ll_cl_init() 215 int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from, in ll_prepare_write() argument 221 lcc = ll_cl_init(file, vmpage, 1); in ll_prepare_write() 250 int ll_commit_write(struct file *file, struct page *vmpage, unsigned from, in ll_commit_write() argument 425 struct page *vmpage) in cl_read_ahead_page() argument [all …]
|
D | vvp_dev.c | 402 struct page *vmpage; in vvp_pgcache_page_show() local 406 vmpage = cpg->cpg_page; in vvp_pgcache_page_show() 413 PageWriteback(vmpage) ? "wb" : "-", in vvp_pgcache_page_show() 414 vmpage, vmpage->mapping->host->i_ino, in vvp_pgcache_page_show() 415 vmpage->mapping->host->i_generation, in vvp_pgcache_page_show() 416 vmpage->mapping->host, vmpage->index, in vvp_pgcache_page_show() 417 page_count(vmpage)); in vvp_pgcache_page_show() 419 seq_page_flag(seq, vmpage, locked, has_flags); in vvp_pgcache_page_show() 420 seq_page_flag(seq, vmpage, error, has_flags); in vvp_pgcache_page_show() 421 seq_page_flag(seq, vmpage, referenced, has_flags); in vvp_pgcache_page_show() [all …]
|
D | vvp_io.c | 656 struct page *vmpage = NULL; in vvp_io_fault_start() local 684 vmpage = cfio->ft_vmpage; in vvp_io_fault_start() 685 LASSERT(PageLocked(vmpage)); in vvp_io_fault_start() 688 ll_invalidate_page(vmpage); in vvp_io_fault_start() 693 if (unlikely((vmpage->mapping != inode->i_mapping) || in vvp_io_fault_start() 694 (page_offset(vmpage) > size))) { in vvp_io_fault_start() 715 vmpage->mapping, fio->ft_index, last_index); in vvp_io_fault_start() 732 page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE); in vvp_io_fault_start() 741 wait_on_page_writeback(vmpage); in vvp_io_fault_start() 742 if (set_page_dirty(vmpage)) { in vvp_io_fault_start() [all …]
|
D | vvp_internal.h | 52 struct cl_page *page, struct page *vmpage);
|
D | llite_internal.h | 965 static inline void ll_invalidate_page(struct page *vmpage) in ll_invalidate_page() argument 967 struct address_space *mapping = vmpage->mapping; in ll_invalidate_page() 968 loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; in ll_invalidate_page() 970 LASSERT(PageLocked(vmpage)); in ll_invalidate_page() 975 truncate_complete_page(mapping, vmpage); in ll_invalidate_page()
|
/drivers/staging/lustre/lustre/lov/ |
D | lov_page.c | 158 struct cl_page *page, struct page *vmpage) in lov_page_init_raid0() argument 190 cl_index(subobj, suboff), vmpage, page); in lov_page_init_raid0() 217 struct cl_page *page, struct page *vmpage) in lov_page_init_empty() argument 223 addr = kmap(vmpage); in lov_page_init_empty() 225 kunmap(vmpage); in lov_page_init_empty()
|
D | lov_cl_internal.h | 614 struct cl_page *page, struct page *vmpage); 616 struct cl_page *page, struct page *vmpage); 620 struct cl_page *page, struct page *vmpage); 623 struct cl_page *page, struct page *vmpage);
|
D | lov_object.c | 70 struct cl_page *page, struct page *vmpage); 821 struct cl_page *page, struct page *vmpage) in lov_page_init() argument 824 llo_page_init, env, obj, page, vmpage); in lov_page_init()
|
/drivers/staging/lustre/lustre/obdclass/ |
D | cl_page.c | 280 struct cl_object *o, pgoff_t ind, struct page *vmpage, in cl_page_alloc() argument 310 page, vmpage); in cl_page_alloc() 338 pgoff_t idx, struct page *vmpage, in cl_page_find0() argument 353 idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type); in cl_page_find0() 360 KLASSERT(PageLocked(vmpage)); in cl_page_find0() 370 page = cl_vmpage_page(vmpage, o); in cl_page_find0() 373 cl_page_vmpage(env, page) == vmpage && in cl_page_find0() 382 page = cl_page_alloc(env, o, idx, vmpage, type); in cl_page_find0() 437 pgoff_t idx, struct page *vmpage, in cl_page_find() argument 440 return cl_page_find0(env, o, idx, vmpage, type, NULL); in cl_page_find() [all …]
|
/drivers/staging/lustre/lustre/include/ |
D | cl_object.h | 323 struct cl_page *page, struct page *vmpage); 2793 pgoff_t idx, struct page *vmpage, 2797 pgoff_t idx, struct page *vmpage, 2810 struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
|
/drivers/staging/lustre/lustre/lclient/ |
D | lcommon_cl.c | 993 struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) in ccc_vmpage_page_transient() argument 995 KLASSERT(PageLocked(vmpage)); in ccc_vmpage_page_transient() 996 return (struct cl_page *)vmpage->private; in ccc_vmpage_page_transient()
|
/drivers/staging/lustre/lustre/obdecho/ |
D | echo_client.c | 282 struct page *vmpage = ep->ep_vmpage; in echo_page_fini() local 285 page_cache_release(vmpage); in echo_page_fini() 374 struct cl_page *page, struct page *vmpage) in echo_page_init() argument 379 ep->ep_vmpage = vmpage; in echo_page_init() 380 page_cache_get(vmpage); in echo_page_init()
|
/drivers/staging/lustre/lustre/osc/ |
D | osc_io.c | 362 struct page *vmpage = cl_page_vmpage(env, page); in trunc_check_cb() local 364 if (PageLocked(vmpage)) in trunc_check_cb()
|
D | osc_page.c | 502 struct cl_page *page, struct page *vmpage) in osc_page_init() argument 511 result = osc_prep_async_page(osc, opg, vmpage, in osc_page_init()
|
D | osc_cl_internal.h | 425 struct cl_page *page, struct page *vmpage);
|