Home
last modified time | relevance | path

Searched +full:page +full:- +full:offset (Results 1 – 25 of 1086) sorted by relevance

12345678910>>...44

/kernel/linux/linux-6.6/include/linux/
Dhighmem.h1 /* SPDX-License-Identifier: GPL-2.0 */
14 #include "highmem-internal.h"
17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
37 static inline void *kmap(struct page *page);
40 * kunmap - Unmap the virtual address mapped by kmap()
41 * @page: Pointer to the page which was mapped by kmap()
46 static inline void kunmap(struct page *page);
49 * kmap_to_page - Get the page for a kmap'ed address
52 * Returns: The page which is mapped to @addr.
[all …]
Dscatterlist.h1 /* SPDX-License-Identifier: GPL-2.0 */
13 unsigned int offset; member
31 #define sg_dma_address(sg) ((sg)->dma_address)
34 #define sg_dma_len(sg) ((sg)->dma_length)
36 #define sg_dma_len(sg) ((sg)->length)
55 * the page pointer AND encode information about the sg table as well. The two
71 * We overload the LSB of the page pointer to indicate whether it's
73 * Those low bits are there for everyone! (thanks mason :-)
79 return sg->page_link & SG_PAGE_LINK_MASK; in __sg_flags()
84 return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK); in sg_chain_ptr()
[all …]
/kernel/linux/linux-5.10/fs/iomap/
Dseek.c1 // SPDX-License-Identifier: GPL-2.0
14 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
15 * Returns true if found and updates @lastoff to the offset in file.
18 page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff, in page_seek_hole_data() argument
21 const struct address_space_operations *ops = inode->i_mapping->a_ops; in page_seek_hole_data()
24 loff_t poff = page_offset(page); in page_seek_hole_data()
31 * Last offset smaller than the start of the page means we found in page_seek_hole_data()
40 * Just check the page unless we can and should check block ranges: in page_seek_hole_data()
42 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate) in page_seek_hole_data()
43 return PageUptodate(page) == seek_data; in page_seek_hole_data()
[all …]
/kernel/linux/linux-5.10/fs/squashfs/
Dfile.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Squashfs - a compressed read only filesystem for Linux
14 * compressed fragment block (tail-end packed block). The compressed size
23 * retaining a simple and space-efficient block list on disk. The cache
44 * Locate cache slot in range [offset, index] for specified inode. If
47 static struct meta_index *locate_meta_index(struct inode *inode, int offset, in locate_meta_index() argument
51 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; in locate_meta_index()
54 mutex_lock(&msblk->meta_index_mutex); in locate_meta_index()
56 TRACE("locate_meta_index: index %d, offset %d\n", index, offset); in locate_meta_index()
58 if (msblk->meta_index == NULL) in locate_meta_index()
[all …]
/kernel/linux/linux-6.6/sound/pci/emu10k1/
Dmemory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
31 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) argument
[all …]
/kernel/linux/linux-6.6/fs/squashfs/
Dfile.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Squashfs - a compressed read only filesystem for Linux
14 * compressed fragment block (tail-end packed block). The compressed size
23 * retaining a simple and space-efficient block list on disk. The cache
45 * Locate cache slot in range [offset, index] for specified inode. If
48 static struct meta_index *locate_meta_index(struct inode *inode, int offset, in locate_meta_index() argument
52 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; in locate_meta_index()
55 mutex_lock(&msblk->meta_index_mutex); in locate_meta_index()
57 TRACE("locate_meta_index: index %d, offset %d\n", index, offset); in locate_meta_index()
59 if (msblk->meta_index == NULL) in locate_meta_index()
[all …]
Dblock.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Squashfs - a compressed read only filesystem for Linux
12 * This file implements the low-level routines to read and decompress
31 * Returns the amount of bytes copied to the page actor.
35 int offset, int req_length) in copy_bio_to_actor() argument
50 int bytes_to_copy = min_t(int, bvec->bv_len - offset, in copy_bio_to_actor()
51 PAGE_SIZE - actor_offset); in copy_bio_to_actor()
54 req_length - copied_bytes); in copy_bio_to_actor()
57 offset, bytes_to_copy); in copy_bio_to_actor()
61 offset += bytes_to_copy; in copy_bio_to_actor()
[all …]
/kernel/linux/linux-5.10/fs/jfs/
Djfs_metapage.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
25 uint pagealloc; /* # of page allocations */
26 uint pagefree; /* # of page frees */
31 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
32 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
36 clear_bit_unlock(META_locked, &mp->flag); in unlock_metapage()
37 wake_up(&mp->wait); in unlock_metapage()
44 add_wait_queue_exclusive(&mp->wait, &wait); in __lock_metapage()
[all …]
/kernel/linux/linux-6.6/fs/jfs/
Djfs_metapage.c1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
26 uint pagealloc; /* # of page allocations */
27 uint pagefree; /* # of page frees */
32 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
33 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
37 clear_bit_unlock(META_locked, &mp->flag); in unlock_metapage()
38 wake_up(&mp->wait); in unlock_metapage()
45 add_wait_queue_exclusive(&mp->wait, &wait); in __lock_metapage()
[all …]
/kernel/linux/linux-5.10/sound/pci/emu10k1/
Dmemory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
31 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) argument
[all …]
/kernel/linux/linux-5.10/fs/hfsplus/
Dbitmap.c1 // SPDX-License-Identifier: GPL-2.0
20 u32 offset, u32 *max) in hfsplus_block_allocate() argument
23 struct page *page; in hfsplus_block_allocate() local
34 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); in hfsplus_block_allocate()
35 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate()
36 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate()
37 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate()
38 if (IS_ERR(page)) { in hfsplus_block_allocate()
42 pptr = kmap(page); in hfsplus_block_allocate()
43 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; in hfsplus_block_allocate()
[all …]
/kernel/linux/linux-6.6/fs/ecryptfs/
Dread_write.c1 // SPDX-License-Identifier: GPL-2.0-or-later
19 * @offset: Byte offset in the lower file to which to write the data
20 * @size: Number of bytes from @data to write at @offset in the lower
28 loff_t offset, size_t size) in ecryptfs_write_lower() argument
33 lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; in ecryptfs_write_lower()
35 return -EIO; in ecryptfs_write_lower()
36 rc = kernel_write(lower_file, data, size, &offset); in ecryptfs_write_lower()
44 * @page_for_lower: The page containing the data to be written to the
46 * @offset_in_page: The offset in the @page_for_lower from which to
51 * Determines the byte offset in the file for the given page and
[all …]
/kernel/linux/linux-5.10/fs/ecryptfs/
Dread_write.c1 // SPDX-License-Identifier: GPL-2.0-or-later
19 * @offset: Byte offset in the lower file to which to write the data
20 * @size: Number of bytes from @data to write at @offset in the lower
28 loff_t offset, size_t size) in ecryptfs_write_lower() argument
33 lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; in ecryptfs_write_lower()
35 return -EIO; in ecryptfs_write_lower()
36 rc = kernel_write(lower_file, data, size, &offset); in ecryptfs_write_lower()
44 * @page_for_lower: The page containing the data to be written to the
46 * @offset_in_page: The offset in the @page_for_lower from which to
51 * Determines the byte offset in the file for the given page and
[all …]
/kernel/linux/linux-6.6/fs/hfsplus/
Dbitmap.c1 // SPDX-License-Identifier: GPL-2.0
20 u32 offset, u32 *max) in hfsplus_block_allocate() argument
23 struct page *page; in hfsplus_block_allocate() local
34 hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); in hfsplus_block_allocate()
35 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate()
36 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate()
37 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate()
38 if (IS_ERR(page)) { in hfsplus_block_allocate()
42 pptr = kmap_local_page(page); in hfsplus_block_allocate()
43 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; in hfsplus_block_allocate()
[all …]
/kernel/linux/linux-5.10/drivers/nvmem/
Drave-sp-eeprom.c1 // SPDX-License-Identifier: GPL-2.0+
10 #include <linux/mfd/rave-sp.h>
12 #include <linux/nvmem-provider.h>
18 * enum rave_sp_eeprom_access_type - Supported types of EEPROM access
29 * enum rave_sp_eeprom_header_size - EEPROM command header sizes
43 * struct rave_sp_eeprom_page - RAVE SP EEPROM page
59 * struct rave_sp_eeprom - RAVE SP EEPROM device
76 * rave_sp_eeprom_io - Low-level part of EEPROM page access
80 * @idx: number of the EEPROM page
81 * @page: Data to write or buffer to store result (via page->data)
[all …]
/kernel/linux/linux-6.6/drivers/nvmem/
Drave-sp-eeprom.c1 // SPDX-License-Identifier: GPL-2.0+
10 #include <linux/mfd/rave-sp.h>
12 #include <linux/nvmem-provider.h>
18 * enum rave_sp_eeprom_access_type - Supported types of EEPROM access
29 * enum rave_sp_eeprom_header_size - EEPROM command header sizes
43 * struct rave_sp_eeprom_page - RAVE SP EEPROM page
59 * struct rave_sp_eeprom - RAVE SP EEPROM device
76 * rave_sp_eeprom_io - Low-level part of EEPROM page access
80 * @idx: number of the EEPROM page
81 * @page: Data to write or buffer to store result (via page->data)
[all …]
/kernel/linux/linux-5.10/mm/
Ddmapool.c1 // SPDX-License-Identifier: GPL-2.0-only
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
23 #include <linux/dma-mapping.h>
58 unsigned int offset; member
[all …]
Dswapfile.c1 // SPDX-License-Identifier: GPL-2.0-only
30 #include <linux/backing-dev.h>
64 static int least_priority = -1;
68 static const char Bad_offset[] = "Bad swap offset entry ";
69 static const char Unused_offset[] = "Unused swap offset entry ";
84 * swap_info_struct changes between not-full/full, it needs to
85 * add/remove itself to/from this list, but the swap_info_struct->lock
87 * before any swap_info_struct->lock.
120 * corresponding page
128 unsigned long offset, unsigned long flags) in __try_to_reclaim_swap() argument
[all …]
/kernel/linux/linux-5.10/sound/pci/trident/
Dtrident_memory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * Trident 4DWave-NX memory page allocation (TLB area)
19 /* page arguments of these two macros are Trident page (4096 bytes), not like
22 #define __set_tlb_bus(trident,page,ptr,addr) \ argument
23 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
24 (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
25 #define __tlb_to_ptr(trident,page) \ argument
26 (void*)((trident)->tlb.shadow_entries[page])
27 #define __tlb_to_addr(trident,page) \ argument
28 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/
Di915_gem_object.h2 * SPDX-License-Identifier: MIT
23 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
29 if (overflows_type(size, obj->base.size)) in i915_gem_object_size_2big()
88 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
104 return idr_find(&file->object_idr, handle); in i915_gem_object_lookup_rcu()
110 if (obj && !kref_get_unless_zero(&obj->base.refcount)) in i915_gem_object_get_rcu()
137 drm_gem_object_get(&obj->base); in i915_gem_object_get()
145 __drm_gem_object_put(&obj->base); in i915_gem_object_put()
148 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
160 kref_read(&obj->base.refcount) > 0) in assert_object_held_shared()
[all …]
/kernel/linux/linux-6.6/drivers/video/fbdev/core/
Dfb_defio.c26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) in fb_deferred_io_page()
28 void *screen_base = (void __force *) info->screen_base; in fb_deferred_io_page()
29 struct page *page; in fb_deferred_io_page() local
32 page = vmalloc_to_page(screen_base + offs); in fb_deferred_io_page()
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); in fb_deferred_io_page()
36 return page; in fb_deferred_io_page()
40 unsigned long offset, in fb_deferred_io_pageref_get() argument
41 struct page *page) in fb_deferred_io_pageref_get() argument
43 struct fb_deferred_io *fbdefio = info->fbdefio; in fb_deferred_io_pageref_get()
44 struct list_head *pos = &fbdefio->pagereflist; in fb_deferred_io_pageref_get()
[all …]
/kernel/linux/linux-5.10/include/linux/
Dscatterlist.h1 /* SPDX-License-Identifier: GPL-2.0 */
13 unsigned int offset; member
34 #define sg_dma_address(sg) ((sg)->dma_address)
37 #define sg_dma_len(sg) ((sg)->dma_length)
39 #define sg_dma_len(sg) ((sg)->length)
52 * the page pointer AND encode information about the sg table as well. The two
68 * We overload the LSB of the page pointer to indicate whether it's
70 * Those low bits are there for everyone! (thanks mason :-)
72 #define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
73 #define sg_is_last(sg) ((sg)->page_link & SG_END)
[all …]
/kernel/linux/linux-6.6/arch/arm/mm/
Ddma-mapping.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
28 #include <asm/page.h>
33 #include <asm/dma-iommu.h>
36 #include <asm/xen/xen-ops.h>
55 struct page *page; member
64 struct page **ret_page);
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_vm.c71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
78 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) in drm_io_prot()
83 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
84 vma->vm_start)) in drm_io_prot()
96 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
109 * \return pointer to the page structure.
111 * Find the right map and if it's AGP memory find the real physical page to
112 * map, get the page, increment the use count and return it.
117 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault()
118 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_fault()
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/
Ddrm_vm.c69 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
73 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) in drm_io_prot()
78 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
79 vma->vm_start)) in drm_io_prot()
91 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
104 * \return pointer to the page structure.
106 * Find the right map and if it's AGP memory find the real physical page to
107 * map, get the page, increment the use count and return it.
112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault()
113 struct drm_file *priv = vma->vm_file->private_data; in drm_vm_fault()
[all …]

12345678910>>...44