/arch/x86/kvm/mmu/ |
D | tdp_iter.c | 11 static void tdp_iter_refresh_sptep(struct tdp_iter *iter) in tdp_iter_refresh_sptep() argument 13 iter->sptep = iter->pt_path[iter->level - 1] + in tdp_iter_refresh_sptep() 14 SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep() 15 iter->old_spte = READ_ONCE(*rcu_dereference(iter->sptep)); in tdp_iter_refresh_sptep() 27 void tdp_iter_restart(struct tdp_iter *iter) in tdp_iter_restart() argument 29 iter->yielded = false; in tdp_iter_restart() 30 iter->yielded_gfn = iter->next_last_level_gfn; in tdp_iter_restart() 31 iter->level = iter->root_level; in tdp_iter_restart() 33 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart() 34 tdp_iter_refresh_sptep(iter); in tdp_iter_restart() [all …]
|
D | tdp_mmu.c | 518 struct tdp_iter *iter, in tdp_mmu_set_spte_atomic_no_dirty_log() argument 521 WARN_ON_ONCE(iter->yielded); in tdp_mmu_set_spte_atomic_no_dirty_log() 529 if (is_removed_spte(iter->old_spte)) in tdp_mmu_set_spte_atomic_no_dirty_log() 536 if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte, in tdp_mmu_set_spte_atomic_no_dirty_log() 537 new_spte) != iter->old_spte) in tdp_mmu_set_spte_atomic_no_dirty_log() 540 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, in tdp_mmu_set_spte_atomic_no_dirty_log() 541 new_spte, iter->level, true); in tdp_mmu_set_spte_atomic_no_dirty_log() 542 handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); in tdp_mmu_set_spte_atomic_no_dirty_log() 559 struct tdp_iter *iter, in tdp_mmu_map_set_spte_atomic() argument 564 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte)) in tdp_mmu_map_set_spte_atomic() [all …]
|
D | tdp_iter.h | 60 #define for_each_tdp_pte_min_level(iter, root, root_level, min_level, start, end) \ argument 61 for (tdp_iter_start(&iter, root, root_level, min_level, start); \ 62 iter.valid && iter.gfn < end; \ 63 tdp_iter_next(&iter)) 65 #define for_each_tdp_pte(iter, root, root_level, start, end) \ argument 66 for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end) 70 void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, 72 void tdp_iter_next(struct tdp_iter *iter); 73 void tdp_iter_restart(struct tdp_iter *iter);
|
D | mmu.c | 1116 struct rmap_iterator *iter) in rmap_get_first() argument 1124 iter->desc = NULL; in rmap_get_first() 1129 iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); in rmap_get_first() 1130 iter->pos = 0; in rmap_get_first() 1131 sptep = iter->desc->sptes[iter->pos]; in rmap_get_first() 1142 static u64 *rmap_get_next(struct rmap_iterator *iter) in rmap_get_next() argument 1146 if (iter->desc) { in rmap_get_next() 1147 if (iter->pos < PTE_LIST_EXT - 1) { in rmap_get_next() 1148 ++iter->pos; in rmap_get_next() 1149 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next() [all …]
|
D | mmu_audit.c | 194 struct rmap_iterator iter; in audit_write_protection() local 205 for_each_rmap_spte(rmap_head, &iter, sptep) { in audit_write_protection()
|
/arch/arm64/kvm/vgic/ |
D | vgic-debug.c | 36 static void iter_next(struct vgic_state_iter *iter) in iter_next() argument 38 if (iter->dist_id == 0) { in iter_next() 39 iter->dist_id++; in iter_next() 43 iter->intid++; in iter_next() 44 if (iter->intid == VGIC_NR_PRIVATE_IRQS && in iter_next() 45 ++iter->vcpu_id < iter->nr_cpus) in iter_next() 46 iter->intid = 0; in iter_next() 48 if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS)) { in iter_next() 49 if (iter->lpi_idx < iter->nr_lpis) in iter_next() 50 iter->intid = iter->lpi_array[iter->lpi_idx]; in iter_next() [all …]
|
D | vgic-mmio-v3.c | 258 struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg; in vgic_mmio_vcpu_rdist_is_last() local 273 list_for_each_entry(iter, rd_regions, list) { in vgic_mmio_vcpu_rdist_is_last() 274 if (iter->base == end && iter->free_index > 0) in vgic_mmio_vcpu_rdist_is_last()
|
/arch/x86/kvm/ |
D | mtrr.c | 472 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter) in mtrr_lookup_fixed_start() argument 476 if (!fixed_mtrr_is_enabled(iter->mtrr_state)) in mtrr_lookup_fixed_start() 479 seg = fixed_mtrr_addr_to_seg(iter->start); in mtrr_lookup_fixed_start() 483 iter->fixed = true; in mtrr_lookup_fixed_start() 484 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); in mtrr_lookup_fixed_start() 485 iter->index = index; in mtrr_lookup_fixed_start() 486 iter->seg = seg; in mtrr_lookup_fixed_start() 490 static bool match_var_range(struct mtrr_iter *iter, in match_var_range() argument 496 if (!(start >= iter->end || end <= iter->start)) { in match_var_range() 497 iter->range = range; in match_var_range() [all …]
|
/arch/sparc/prom/ |
D | bootstr_32.c | 19 int iter; in prom_getbootargs() local 31 for (iter = 1; iter < 8; iter++) { in prom_getbootargs() 32 arg = (*(romvec->pv_v0bootargs))->argv[iter]; in prom_getbootargs()
|
/arch/x86/kernel/cpu/microcode/ |
D | intel.c | 123 struct ucode_patch *iter, *tmp, *p = NULL; in save_microcode_patch() local 129 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { in save_microcode_patch() 130 mc_saved_hdr = (struct microcode_header_intel *)iter->data; in save_microcode_patch() 144 list_replace(&iter->plist, &p->plist); in save_microcode_patch() 145 kfree(iter->data); in save_microcode_patch() 146 kfree(iter); in save_microcode_patch() 678 struct ucode_patch *iter, *tmp; in find_patch() local 680 list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { in find_patch() 682 phdr = (struct microcode_header_intel *)iter->data; in find_patch() 692 return iter->data; in find_patch() [all …]
|
/arch/um/kernel/ |
D | kmsg_dump.c | 13 static struct kmsg_dump_iter iter; in kmsg_dumper_stdout() local 39 kmsg_dump_rewind(&iter); in kmsg_dumper_stdout() 42 while (kmsg_dump_get_line(&iter, true, line, sizeof(line), &len)) { in kmsg_dumper_stdout()
|
/arch/powerpc/kernel/ |
D | cacheinfo.c | 170 struct cache *iter; in release_cache_debugcheck() local 172 list_for_each_entry(iter, &cache_list, list) in release_cache_debugcheck() 173 WARN_ONCE(iter->next_local == cache, in release_cache_debugcheck() 175 iter->ofnode, in release_cache_debugcheck() 176 cache_type_string(iter), in release_cache_debugcheck() 308 struct cache *iter; in cache_find_first_sibling() local 314 list_for_each_entry(iter, &cache_list, list) in cache_find_first_sibling() 315 if (iter->ofnode == cache->ofnode && in cache_find_first_sibling() 316 iter->group_id == cache->group_id && in cache_find_first_sibling() 317 iter->next_local == cache) in cache_find_first_sibling() [all …]
|
D | nvram_64.c | 650 static struct kmsg_dump_iter iter; in oops_to_nvram() local 685 kmsg_dump_rewind(&iter); in oops_to_nvram() 686 kmsg_dump_get_buffer(&iter, false, in oops_to_nvram() 691 kmsg_dump_rewind(&iter); in oops_to_nvram() 692 kmsg_dump_get_buffer(&iter, false, in oops_to_nvram()
|
/arch/x86/kernel/ |
D | ftrace.c | 197 struct ftrace_rec_iter *iter; in ftrace_replace_code() local 202 for_ftrace_rec_iter(iter) { in ftrace_replace_code() 203 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code() 229 for_ftrace_rec_iter(iter) { in ftrace_replace_code() 230 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code()
|
/arch/sh/mm/ |
D | asids-debugfs.c | 29 static int asids_debugfs_show(struct seq_file *file, void *iter) in asids_debugfs_show() argument
|
D | pmb.c | 145 struct pmb_entry *pmbe, *iter; in pmb_mapping_exists() local 175 for (iter = pmbe->link; iter; iter = iter->link) in pmb_mapping_exists() 176 span += iter->size; in pmb_mapping_exists() 815 static int pmb_debugfs_show(struct seq_file *file, void *iter) in pmb_debugfs_show() argument
|
D | cache-debugfs.c | 25 static int cache_debugfs_show(struct seq_file *file, void *iter) in cache_debugfs_show() argument
|
D | tlb-debugfs.c | 39 static int tlb_seq_show(struct seq_file *file, void *iter) in tlb_seq_show() argument
|
/arch/m68k/emu/ |
D | nfblock.c | 65 struct bvec_iter iter; in nfhd_submit_bio() local 71 bio_for_each_segment(bvec, bio, iter) { in nfhd_submit_bio()
|
/arch/powerpc/kvm/ |
D | book3s_hv_uvmem.c | 363 struct kvmppc_uvmem_slot *p = NULL, *iter; in kvmppc_next_nontransitioned_gfn() local 367 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn() 368 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { in kvmppc_next_nontransitioned_gfn() 369 p = iter; in kvmppc_next_nontransitioned_gfn()
|
/arch/xtensa/platforms/iss/ |
D | simdisk.c | 107 struct bvec_iter iter; in simdisk_submit_bio() local 110 bio_for_each_segment(bvec, bio, iter) { in simdisk_submit_bio()
|
/arch/um/drivers/ |
D | vector_kern.c | 1051 int iter = 0; in vector_rx() local 1054 while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS)) in vector_rx() 1055 iter++; in vector_rx() 1057 while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS)) in vector_rx() 1058 iter++; in vector_rx() 1061 if (iter == MAX_ITERATIONS) in vector_rx()
|
/arch/mips/lib/ |
D | memcpy.S | 317 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 440 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
|
D | csum_partial.S | 466 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 604 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
|
/arch/s390/mm/ |
D | gmap.c | 127 struct radix_tree_iter iter; in gmap_radix_tree_free() local 137 radix_tree_for_each_slot(slot, root, &iter, index) { in gmap_radix_tree_free() 138 indices[nr] = iter.index; in gmap_radix_tree_free() 152 struct radix_tree_iter iter; in gmap_rmap_radix_tree_free() local 162 radix_tree_for_each_slot(slot, root, &iter, index) { in gmap_rmap_radix_tree_free() 163 indices[nr] = iter.index; in gmap_rmap_radix_tree_free()
|