Home
last modified time | relevance | path

Searched +full:page +full:- +full:level (Results 1 – 25 of 1058) sorted by relevance

12345678910>>...43

/kernel/linux/linux-5.10/fs/verity/
Dverify.c1 // SPDX-License-Identifier: GPL-2.0
3 * fs/verity/verify.c: data verification functions, i.e. hooks for ->readpages()
17 * hash_at_level() - compute the location of the block's hash at the given level
21 * @level: (in) the level of hash we want (0 is leaf level)
26 pgoff_t dindex, unsigned int level, pgoff_t *hindex, in hash_at_level() argument
31 /* Offset of the hash within the level's region, in hashes */ in hash_at_level()
32 position = dindex >> (level * params->log_arity); in hash_at_level()
35 *hindex = params->level_start[level] + (position >> params->log_arity); in hash_at_level()
38 *hoffset = (position & ((1 << params->log_arity) - 1)) << in hash_at_level()
39 (params->log_blocksize - params->log_arity); in hash_at_level()
[all …]
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
Dtdp_iter.c1 // SPDX-License-Identifier: GPL-2.0
8 * Recalculates the pointer to the SPTE for the current GFN and level and
13 iter->sptep = iter->pt_path[iter->level - 1] + in tdp_iter_refresh_sptep()
14 SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep()
15 iter->old_spte = READ_ONCE(*iter->sptep); in tdp_iter_refresh_sptep()
18 static gfn_t round_gfn_for_level(gfn_t gfn, int level) in round_gfn_for_level() argument
20 return gfn & -KVM_PAGES_PER_HPAGE(level); in round_gfn_for_level()
24 * Sets a TDP iterator to walk a pre-order traversal of the paging structure
33 iter->next_last_level_gfn = next_last_level_gfn; in tdp_iter_start()
34 iter->yielded_gfn = iter->next_last_level_gfn; in tdp_iter_start()
[all …]
Dpaging_tmpl.h1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
19 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
30 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) argument
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) argument
62 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) argument
66 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
82 * The guest_walker structure emulates the behavior of the hardware page
86 int level; member
[all …]
/kernel/linux/linux-6.6/Documentation/mm/
Dpage_tables.rst1 .. SPDX-License-Identifier: GPL-2.0
4 Page Tables
10 feature of all Unix-like systems as time went by. In 1985 the feature was
13 Page tables map virtual addresses as seen by the CPU into physical addresses
16 Linux defines page tables as a hierarchy which is currently five levels in
21 by the underlying physical page frame. The **page frame number** or **pfn**
22 is the physical address of the page (as seen on the external memory bus)
26 the last page of physical memory the external address bus of the CPU can
29 With a page granularity of 4KB and a address range of 32 bits, pfn 0 is at
34 As you can see, with 4KB pages the page base address uses bits 12-31 of the
[all …]
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/
Dtdp_iter.c1 // SPDX-License-Identifier: GPL-2.0
9 * Recalculates the pointer to the SPTE for the current GFN and level and
14 iter->sptep = iter->pt_path[iter->level - 1] + in tdp_iter_refresh_sptep()
15 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep()
16 iter->old_spte = kvm_tdp_mmu_read_spte(iter->sptep); in tdp_iter_refresh_sptep()
25 iter->yielded = false; in tdp_iter_restart()
26 iter->yielded_gfn = iter->next_last_level_gfn; in tdp_iter_restart()
27 iter->level = iter->root_level; in tdp_iter_restart()
29 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart()
32 iter->valid = true; in tdp_iter_restart()
[all …]
Dpaging_tmpl.h1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
19 * The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
21 * once per guest PTE type. The per-type defines are #undef'd at the end.
50 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
64 /* Common logic, but per-type values. These also need to be undefined. */
65 #define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
77 * The guest_walker structure emulates the behavior of the hardware page
[all …]
Dmmu_internal.h1 /* SPDX-License-Identifier: GPL-2.0 */
15 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
16 #define __PT_LEVEL_SHIFT(level, bits_per_level) \ argument
17 (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
18 #define __PT_INDEX(address, level, bits_per_level) \ argument
19 (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
21 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ argument
22 ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
24 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ argument
25 ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
[all …]
/kernel/linux/linux-5.10/tools/perf/pmu-events/arch/arm64/ampere/emag/
Dcache.json42 "PublicDescription": "Level 1 instruction cache refill",
48 "PublicDescription": "Level 1 instruction TLB refill",
54 "PublicDescription": "Level 1 data cache refill",
60 "PublicDescription": "Level 1 data cache access",
66 "PublicDescription": "Level 1 data TLB refill",
72 "PublicDescription": "Level 1 instruction cache access",
78 "PublicDescription": "Level 2 data cache access",
84 "PublicDescription": "Level 2 data refill",
90 "PublicDescription": "Level 2 data cache, Write-Back",
93 "BriefDescription": "L2D cache Write-Back"
[all …]
/kernel/linux/linux-6.6/arch/arm64/include/asm/
Dkvm_pgtable.h1 // SPDX-License-Identifier: GPL-2.0-only
17 * The largest supported block sizes for KVM (no 52-bit PA support):
18 * - 4K (level 1): 1GB
19 * - 16K (level 2): 32MB
20 * - 64K (level 2): 512MB
45 #define KVM_PHYS_INVALID (-1ULL)
79 static inline u64 kvm_granule_shift(u32 level) in kvm_granule_shift() argument
82 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); in kvm_granule_shift()
85 static inline u64 kvm_granule_size(u32 level) in kvm_granule_size() argument
87 return BIT(kvm_granule_shift(level)); in kvm_granule_size()
[all …]
/kernel/linux/linux-6.6/scripts/gdb/linux/
Dpgtable.py1 # SPDX-License-Identifier: GPL-2.0-only
5 # routines to introspect page table
18 def page_mask(level=1): argument
20 if level == 1:
23 elif level == 2:
26 elif level == 3:
29 raise Exception(f'Unknown page level: {level}')
44 return (bit_start, bit_end), data >> bit_start & ((1 << (1 + bit_end - bit_start)) - 1)
46 def entry_va(level, phys_addr, translating_va): argument
47 def start_bit(level): argument
[all …]
/kernel/linux/linux-6.6/tools/perf/pmu-events/arch/arm64/ampere/emag/
Dcache.json78 …"PublicDescription": "Level 2 access to data TLB that caused a page table walk. This event counts …
84 …"PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event …
108 "PublicDescription": "Level 1 data cache late miss",
114 "PublicDescription": "Level 1 data cache prefetch request",
120 "PublicDescription": "Level 2 data cache prefetch request",
126 "PublicDescription": "Level 1 stage 2 TLB refill",
132 "PublicDescription": "Page walk cache level-0 stage-1 hit",
135 "BriefDescription": "Page walk, L0 stage-1 hit"
138 "PublicDescription": "Page walk cache level-1 stage-1 hit",
141 "BriefDescription": "Page walk, L1 stage-1 hit"
[all …]
/kernel/linux/linux-6.6/fs/verity/
Dverify.c1 // SPDX-License-Identifier: GPL-2.0
3 * Data verification functions, i.e. hooks for ->readahead()
19 static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage, in is_hash_block_verified()
27 * When the Merkle tree block size and page size are the same, then the in is_hash_block_verified()
28 * ->hash_block_verified bitmap isn't allocated, and we use PG_checked in is_hash_block_verified()
29 * to directly indicate whether the page's block has been verified. in is_hash_block_verified()
31 * Using PG_checked also guarantees that we re-verify hash pages that in is_hash_block_verified()
32 * get evicted and re-instantiated from the backing storage, as new in is_hash_block_verified()
35 if (!vi->hash_block_verified) in is_hash_block_verified()
39 * When the Merkle tree block size and page size differ, we use a bitmap in is_hash_block_verified()
[all …]
/kernel/linux/linux-5.10/include/asm-generic/
Dpgalloc.h1 /* SPDX-License-Identifier: GPL-2.0 */
11 * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
15 * anything beyond simple page allocation.
26 * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
38 * pte_free_kernel - free PTE-level kernel page table page
40 * @pte: pointer to the memory containing the page table
48 * __pte_alloc_one - allocate a page for PTE-level user page table
52 * Allocates a page and runs the pgtable_pte_page_ctor().
55 * anything beyond simple page allocation or must have custom GFP flags.
57 * Return: `struct page` initialized as page table or %NULL on error
[all …]
/kernel/linux/linux-6.6/Documentation/virt/kvm/x86/
Dmmu.rst1 .. SPDX-License-Identifier: GPL-2.0
13 - correctness:
18 - security:
21 - performance:
23 - scaling:
25 - hardware:
27 - integration:
29 so that swapping, page migration, page merging, transparent
31 - dirty tracking:
33 and framebuffer-based displays
[all …]
/kernel/linux/linux-6.6/include/asm-generic/
Dpgalloc.h1 /* SPDX-License-Identifier: GPL-2.0 */
11 * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
15 * anything beyond simple page allocation.
31 * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
43 * pte_free_kernel - free PTE-level kernel page table memory
45 * @pte: pointer to the memory containing the page table
53 * __pte_alloc_one - allocate memory for a PTE-level user page table
57 * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
60 * anything beyond simple page allocation or must have custom GFP flags.
62 * Return: `struct page` referencing the ptdesc or %NULL on error
[all …]
/kernel/linux/linux-5.10/fs/f2fs/
Ddir.c1 // SPDX-License-Identifier: GPL-2.0
20 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1)) in dir_blocks()
24 static unsigned int dir_buckets(unsigned int level, int dir_level) in dir_buckets() argument
26 if (level + dir_level < MAX_DIR_HASH_DEPTH / 2) in dir_buckets()
27 return 1 << (level + dir_level); in dir_buckets()
32 static unsigned int bucket_blocks(unsigned int level) in bucket_blocks() argument
34 if (level < MAX_DIR_HASH_DEPTH / 2) in bucket_blocks()
63 de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT]; in set_de_type()
68 if (de->file_type < F2FS_FT_MAX) in f2fs_get_de_type()
69 return f2fs_filetype_table[de->file_type]; in f2fs_get_de_type()
[all …]
/kernel/linux/linux-5.10/drivers/staging/gasket/
Dgasket_page_table.c1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of Gasket page table support.
9 * Implementation of Gasket page table support.
13 * There is a configurable number of page table entries, as well as a
18 * page_table_config->total_entries = 8192
19 * page_table_config->extended_bit = 63
22 * Simple addresses - those whose containing pages are directly placed in the
23 * device's address translation registers - are laid out as:
24 * [ 63 - 25: 0 | 24 - 12: page index | 11 - 0: page offset ]
25 * page index: The index of the containing page in the device's address
[all …]
/kernel/linux/linux-6.6/drivers/iommu/amd/
Dio_pgtable_v2.c1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic AMD IO page table v2 allocator.
10 #define pr_fmt(fmt) "AMD-Vi: " fmt
14 #include <linux/io-pgtable.h>
25 #define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
26 #define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
29 #define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
48 static inline u64 set_pgtable_attr(u64 *page) in set_pgtable_attr() argument
55 return (iommu_virt_to_phys(page) | prot); in set_pgtable_attr()
74 /* Large page */ in set_pte_attr()
[all …]
/kernel/linux/linux-6.6/arch/powerpc/include/asm/nohash/64/
Dpgtable-4k.h1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm-generic/pgtable-nop4d.h>
8 * Entries per page directory level. The PTE level must use a 64b record
9 * for each page table entry. The PMD and PGD level use a 32b record for
10 * each entry by assuming that each entry is page aligned.
29 /* PMD_SHIFT determines what a second-level page table entry can map */
32 #define PMD_MASK (~(PMD_SIZE-1))
34 /* PUD_SHIFT determines what a third-level page table entry can map */
37 #define PUD_MASK (~(PUD_SIZE-1))
39 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
[all …]
/kernel/linux/linux-5.10/arch/powerpc/include/asm/nohash/64/
Dpgtable-4k.h1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <asm-generic/pgtable-nop4d.h>
8 * Entries per page directory level. The PTE level must use a 64b record
9 * for each page table entry. The PMD and PGD level use a 32b record for
10 * each entry by assuming that each entry is page aligned.
29 /* PMD_SHIFT determines what a second-level page table entry can map */
32 #define PMD_MASK (~(PMD_SIZE-1))
34 /* PUD_SHIFT determines what a third-level page table entry can map */
37 #define PUD_MASK (~(PUD_SIZE-1))
39 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
[all …]
/kernel/linux/linux-6.6/tools/perf/pmu-events/arch/x86/elkhartlake/
Dvirtual-memory.json3 …"BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Director…
10 …the number of first level TLB misses but second level hits due to a demand load that did not start…
17 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page
20page walks completed due to loads (including SW prefetches) whose address translations missed in a…
25 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G pag…
28page walks completed due to loads (including SW prefetches) whose address translations missed in a…
33 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or …
36page walks completed due to loads (including SW prefetches) whose address translations missed in a…
41 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K pag…
44page walks completed due to loads (including SW prefetches) whose address translations missed in a…
[all …]
/kernel/linux/linux-6.6/tools/perf/pmu-events/arch/x86/snowridgex/
Dvirtual-memory.json3 …"BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Director…
10 …the number of first level TLB misses but second level hits due to a demand load that did not start…
17 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page
20page walks completed due to loads (including SW prefetches) whose address translations missed in a…
25 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G pag…
28page walks completed due to loads (including SW prefetches) whose address translations missed in a…
33 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or …
36page walks completed due to loads (including SW prefetches) whose address translations missed in a…
41 …"BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K pag…
44page walks completed due to loads (including SW prefetches) whose address translations missed in a…
[all …]
/kernel/linux/linux-5.10/Documentation/virt/kvm/
Dmmu.rst1 .. SPDX-License-Identifier: GPL-2.0
13 - correctness:
18 - security:
21 - performance:
23 - scaling:
25 - hardware:
27 - integration:
29 so that swapping, page migration, page merging, transparent
31 - dirty tracking:
33 and framebuffer-based displays
[all …]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/
Damdgpu_vm_pt.c1 // SPDX-License-Identifier: GPL-2.0 OR MIT
31 * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
37 unsigned int level; member
41 * amdgpu_vm_pt_level_shift - return the addr shift for each level
44 * @level: VMPT level
47 * The number of bits the pfn needs to be right shifted for a level.
50 unsigned int level) in amdgpu_vm_pt_level_shift() argument
52 switch (level) { in amdgpu_vm_pt_level_shift()
56 return 9 * (AMDGPU_VM_PDB0 - level) + in amdgpu_vm_pt_level_shift()
57 adev->vm_manager.block_size; in amdgpu_vm_pt_level_shift()
[all …]
/kernel/linux/linux-6.6/fs/f2fs/
Ddir.c1 // SPDX-License-Identifier: GPL-2.0
25 return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1)) in dir_blocks()
29 static unsigned int dir_buckets(unsigned int level, int dir_level) in dir_buckets() argument
31 if (level + dir_level < MAX_DIR_HASH_DEPTH / 2) in dir_buckets()
32 return BIT(level + dir_level); in dir_buckets()
37 static unsigned int bucket_blocks(unsigned int level) in bucket_blocks() argument
39 if (level < MAX_DIR_HASH_DEPTH / 2) in bucket_blocks()
45 /* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
50 struct super_block *sb = dir->i_sb; in f2fs_init_casefolded_name()
53 !is_dot_dotdot(fname->usr_fname->name, fname->usr_fname->len)) { in f2fs_init_casefolded_name()
[all …]

12345678910>>...43