Home
last modified time | relevance | path

Searched +full:page +full:- +full:size (Results 1 – 25 of 1063) sorted by relevance

12345678910>>...43

/kernel/linux/linux-4.19/arch/arm/mm/
Ddma-mapping.c2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-contiguous.h>
37 #include <asm/dma-iommu.h>
40 #include <asm/dma-contiguous.h>
47 size_t size; member
57 size_t size; member
59 struct page *page; member
68 struct page **ret_page);
[all …]
/kernel/linux/linux-5.10/arch/arm/mm/
Ddma-mapping.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
33 #include <asm/dma-iommu.h>
36 #include <xen/swiotlb-xen.h>
43 size_t size; member
53 size_t size; member
55 struct page *page; member
[all …]
/kernel/linux/linux-5.10/kernel/dma/
Ddirect.c1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
10 #include <linux/dma-map-ops.h>
21 * override the variable below for dma-direct to work properly.
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; in dma_direct_get_required_mask()
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; in dma_direct_get_required_mask()
50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) in dma_coherent_ok() argument
74 return dma_addr + size - 1 <= in dma_coherent_ok()
[all …]
Dops_helpers.c1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/dma-map-ops.h>
8 static struct page *dma_common_vaddr_to_page(void *cpu_addr) in dma_common_vaddr_to_page()
16 * Create scatter-list for the already allocated DMA buffer.
19 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_get_sgtable() argument
22 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local
27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
32 * Create userspace mapping for the DMA-coherent memory.
35 void *cpu_addr, dma_addr_t dma_addr, size_t size, in dma_common_mmap() argument
40 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_mmap()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
41 const struct nvkm_vmm_page *page) in nvkm_vmm_pt_new() argument
43 const u32 pten = 1 << desc->bits; in nvkm_vmm_pt_new()
47 if (desc->type > PGT) { in nvkm_vmm_pt_new()
48 if (desc->type == SPT) { in nvkm_vmm_pt_new()
49 const struct nvkm_vmm_desc *pair = page[-1].desc; in nvkm_vmm_pt_new()
50 lpte = pten >> (desc->bits - pair->bits); in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
61 if (desc->type == PGD) { in nvkm_vmm_pt_new()
[all …]
/kernel/linux/linux-5.10/arch/arm64/include/asm/
Dkvm_pgtable.h1 // SPDX-License-Identifier: GPL-2.0-only
17 * struct kvm_pgtable - KVM page-table.
18 * @ia_bits: Maximum input address size, in bits.
19 * @start_level: Level at which the page-table walk starts.
20 * @pgd: Pointer to the first top-level entry of the page-table.
21 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
28 /* Stage-2 only */
33 * enum kvm_pgtable_prot - Page-table permissions and attributes.
53 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
73 * struct kvm_pgtable_walker - Hook into a page-table walk.
[all …]
/kernel/linux/linux-5.10/mm/
Ddmapool.c1 // SPDX-License-Identifier: GPL-2.0-only
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
23 #include <linux/dma-mapping.h>
[all …]
Dreadahead.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/readahead.c - address_space-level file readahead.
16 #include <linux/backing-dev.h>
23 #include <linux/blk-cgroup.h>
36 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
37 ra->prev_pos = -1; in file_ra_state_init()
42 * see if a page needs releasing upon read_cache_pages() failure
43 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
49 struct page *page) in read_cache_pages_invalidate_page() argument
51 if (page_has_private(page)) { in read_cache_pages_invalidate_page()
[all …]
Dzsmalloc.c10 * Released under the terms of 3-clause BSD License
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->freelist(index): links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->units: first object offset in a subpage of zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
[all …]
/kernel/linux/linux-4.19/mm/
Ddmapool.c12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
26 #include <linux/dma-mapping.h>
48 size_t size; member
[all …]
Dreadahead.c2 * mm/readahead.c - address_space-level file readahead.
15 #include <linux/backing-dev.h>
22 #include <linux/blk-cgroup.h>
34 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init()
35 ra->prev_pos = -1; in file_ra_state_init()
40 * see if a page needs releasing upon read_cache_pages() failure
41 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
47 struct page *page) in read_cache_pages_invalidate_page() argument
49 if (page_has_private(page)) { in read_cache_pages_invalidate_page()
50 if (!trylock_page(page)) in read_cache_pages_invalidate_page()
[all …]
Dzsmalloc.c10 * Released under the terms of 3-clause BSD License
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->freelist(index): links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->units: first object offset in a subpage of zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dvmm.c32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
41 const struct nvkm_vmm_page *page) in nvkm_vmm_pt_new() argument
43 const u32 pten = 1 << desc->bits; in nvkm_vmm_pt_new()
47 if (desc->type > PGT) { in nvkm_vmm_pt_new()
48 if (desc->type == SPT) { in nvkm_vmm_pt_new()
49 const struct nvkm_vmm_desc *pair = page[-1].desc; in nvkm_vmm_pt_new()
50 lpte = pten >> (desc->bits - pair->bits); in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
61 if (desc->type == PGD) { in nvkm_vmm_pt_new()
[all …]
/kernel/linux/linux-4.19/arch/xtensa/kernel/
Dpci-dma.c9 * Copyright (C) 2002 - 2005 Tensilica Inc.
18 #include <linux/dma-contiguous.h>
19 #include <linux/dma-noncoherent.h>
20 #include <linux/dma-direct.h>
29 static void do_cache_op(phys_addr_t paddr, size_t size, in do_cache_op() argument
32 unsigned long off = paddr & (PAGE_SIZE - 1); in do_cache_op()
34 struct page *page = pfn_to_page(pfn); in do_cache_op() local
36 if (!PageHighMem(page)) in do_cache_op()
37 fn((unsigned long)phys_to_virt(paddr), size); in do_cache_op()
39 while (size > 0) { in do_cache_op()
[all …]
/kernel/linux/linux-4.19/arch/arm64/mm/
Ddma-mapping.c2 * SWIOTLB-based DMA API implementation
27 #include <linux/dma-direct.h>
28 #include <linux/dma-contiguous.h>
57 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) in __alloc_from_pool() argument
67 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
73 memset(ptr, 0, size); in __alloc_from_pool()
79 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
84 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
86 if (!__in_atomic_pool(start, size)) in __free_from_pool()
[all …]
/kernel/linux/linux-5.10/sound/pci/emu10k1/
Dmemory.c1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * EMU10K1 memory page allocation (PTB area)
18 /* page arguments of these two macros are Emu page (4096 bytes), not like
21 #define __set_ptb_entry(emu,page,addr) \ argument
22 (((__le32 *)(emu)->ptb_pages.area)[page] = \
23 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
24 #define __get_ptb_entry(emu, page) \ argument
25 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
30 /* get aligned page from offset address */
32 /* get offset address from aligned page */
[all …]
/kernel/linux/linux-4.19/sound/pci/emu10k1/
Dmemory.c5 * EMU10K1 memory page allocation (PTB area)
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 /* page arguments of these two macros are Emu page (4096 bytes), not like
36 #define __set_ptb_entry(emu,page,addr) \ argument
37 (((__le32 *)(emu)->ptb_pages.area)[page] = \
38 cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
39 #define __get_ptb_entry(emu, page) \ argument
40 (le32_to_cpu(((__le32 *)(emu)->ptb_pages.area)[page]))
45 /* get aligned page from offset address */
47 /* get offset address from aligned page */
[all …]
/kernel/linux/linux-4.19/arch/nds32/kernel/
Ddma.c1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
7 #include <linux/dma-noncoherent.h>
14 #include <asm/proc-fns.h>
17 * This is the page table (2MB) covering uncached, DMA consistent allocations
33 * struct page **pages;
55 struct page *vm_pages;
65 size_t size, int gfp) in vm_region_alloc() argument
67 unsigned long addr = head->vm_start, end = head->vm_end - size; in vm_region_alloc()
77 list_for_each_entry(c, &head->vm_list, vm_list) { in vm_region_alloc()
[all …]
/kernel/linux/linux-5.10/include/linux/
Ddma-map-ops.h1 /* SPDX-License-Identifier: GPL-2.0 */
9 #include <linux/dma-mapping.h>
15 void *(*alloc)(struct device *dev, size_t size,
18 void (*free)(struct device *dev, size_t size, void *vaddr,
20 struct page *(*alloc_pages)(struct device *dev, size_t size,
23 void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
25 void *(*alloc_noncoherent)(struct device *dev, size_t size,
28 void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
34 void *cpu_addr, dma_addr_t dma_addr, size_t size,
37 dma_addr_t (*map_page)(struct device *dev, struct page *page,
[all …]
/kernel/linux/linux-4.19/drivers/staging/gasket/
Dgasket_page_table.h1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Gasket Page Table functionality. This file describes the address
5 * As much as possible, internal details are hidden to simplify use -
6 * all calls are thread-safe (protected by an internal mutex) except where
29 * @ppage_table: Pointer to Gasket page table pointer. Set by this call.
42 * Description: Allocates and initializes data to track address translation -
43 * simple and extended page table metadata. Initially, the page table is
44 * partitioned such that all addresses are "simple" (single-level lookup).
55 * Deallocate and cleanup page table data.
56 * @page_table: Gasket page table pointer.
[all …]
/kernel/linux/linux-5.10/drivers/staging/gasket/
Dgasket_page_table.h1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Gasket Page Table functionality. This file describes the address
5 * As much as possible, internal details are hidden to simplify use -
6 * all calls are thread-safe (protected by an internal mutex) except where
29 * @ppage_table: Pointer to Gasket page table pointer. Set by this call.
42 * Description: Allocates and initializes data to track address translation -
43 * simple and extended page table metadata. Initially, the page table is
44 * partitioned such that all addresses are "simple" (single-level lookup).
55 * Deallocate and cleanup page table data.
56 * @page_table: Gasket page table pointer.
[all …]
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/
Dhugetlbpage.rst11 the Linux kernel. This support is built on top of multiple page size support
13 support 4K and 2M (1G if architecturally supported) page sizes, ia64
14 architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M,
15 256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical
21 Users can use the huge page support in Linux kernel by either using the mmap
30 persistent hugetlb pages in the kernel's huge page pool. It also displays
31 default huge page size and information about the number of free, reserved
32 and surplus huge pages in the pool of huge pages of default size.
33 The huge page size is needed for generating the proper alignment and
34 size of the arguments to system calls that map huge page regions.
[all …]
/kernel/linux/linux-4.19/include/xen/arm/
Dpage-coherent.h1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm/page.h>
6 #include <asm/dma-mapping.h>
7 #include <linux/dma-mapping.h>
11 if (dev && dev->archdata.dev_dma_ops) in xen_get_dma_ops()
12 return dev->archdata.dev_dma_ops; in xen_get_dma_ops()
16 void __xen_dma_map_page(struct device *hwdev, struct page *page,
17 dma_addr_t dev_addr, unsigned long offset, size_t size,
20 size_t size, enum dma_data_direction dir,
23 dma_addr_t handle, size_t size, enum dma_data_direction dir);
[all …]
/kernel/linux/linux-4.19/arch/powerpc/mm/
Ddma-noncoherent.c11 * -- Dan
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
18 * modified. -Matt
32 #include <linux/dma-mapping.h>
44 * the "Advanced Setup" menu. -Matt
48 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
51 * This is the page table (2MB) covering uncached, DMA consistent allocations
66 * struct page **pages;
97 ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) in ppc_vm_region_alloc() argument
[all …]
/kernel/linux/linux-4.19/Documentation/admin-guide/mm/
Dhugetlbpage.rst11 the Linux kernel. This support is built on top of multiple page size support
13 support 4K and 2M (1G if architecturally supported) page sizes, ia64
14 architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M,
15 256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical
21 Users can use the huge page support in Linux kernel by either using the mmap
30 persistent hugetlb pages in the kernel's huge page pool. It also displays
31 default huge page size and information about the number of free, reserved
32 and surplus huge pages in the pool of huge pages of default size.
33 The huge page size is needed for generating the proper alignment and
34 size of the arguments to system calls that map huge page regions.
[all …]

12345678910>>...43