/arch/s390/include/asm/ |
D | pci_dma.h | 100 static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) in set_pt_pfaa() argument 102 *entry &= ZPCI_PTE_FLAG_MASK; in set_pt_pfaa() 103 *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); in set_pt_pfaa() 106 static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) in set_rt_sto() argument 108 *entry &= ZPCI_RTE_FLAG_MASK; in set_rt_sto() 109 *entry |= (sto & ZPCI_RTE_ADDR_MASK); in set_rt_sto() 110 *entry |= ZPCI_TABLE_TYPE_RTX; in set_rt_sto() 113 static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) in set_st_pto() argument 115 *entry &= ZPCI_STE_FLAG_MASK; in set_st_pto() 116 *entry |= (pto & ZPCI_STE_ADDR_MASK); in set_st_pto() [all …]
|
/arch/sparc/include/asm/ |
D | spitfire.h | 119 static inline unsigned long spitfire_get_dtlb_data(int entry) in spitfire_get_dtlb_data() argument 125 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS)); in spitfire_get_dtlb_data() 133 static inline unsigned long spitfire_get_dtlb_tag(int entry) in spitfire_get_dtlb_tag() argument 139 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ)); in spitfire_get_dtlb_tag() 143 static inline void spitfire_put_dtlb_data(int entry, unsigned long data) in spitfire_put_dtlb_data() argument 148 : "r" (data), "r" (entry << 3), in spitfire_put_dtlb_data() 152 static inline unsigned long spitfire_get_itlb_data(int entry) in spitfire_get_itlb_data() argument 158 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS)); in spitfire_get_itlb_data() 166 static inline unsigned long spitfire_get_itlb_tag(int entry) in spitfire_get_itlb_tag() argument 172 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ)); in spitfire_get_itlb_tag() [all …]
|
/arch/x86/kvm/ |
D | cpuid.c | 196 struct kvm_cpuid_entry2 *entry; in kvm_get_hypervisor_cpuid() local 200 entry = kvm_find_cpuid_entry(vcpu, base); in kvm_get_hypervisor_cpuid() 202 if (entry) { in kvm_get_hypervisor_cpuid() 205 signature[0] = entry->ebx; in kvm_get_hypervisor_cpuid() 206 signature[1] = entry->ecx; in kvm_get_hypervisor_cpuid() 207 signature[2] = entry->edx; in kvm_get_hypervisor_cpuid() 211 cpuid.limit = entry->eax; in kvm_get_hypervisor_cpuid() 317 struct kvm_cpuid_entry2 *entry; in kvm_cpuid_has_hyperv() local 319 entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE, in kvm_cpuid_has_hyperv() 321 return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; in kvm_cpuid_has_hyperv() [all …]
|
D | reverse_cpuid.h | 166 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, in __cpuid_entry_get_reg() argument 171 return &entry->eax; in __cpuid_entry_get_reg() 173 return &entry->ebx; in __cpuid_entry_get_reg() 175 return &entry->ecx; in __cpuid_entry_get_reg() 177 return &entry->edx; in __cpuid_entry_get_reg() 184 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, in cpuid_entry_get_reg() argument 189 return __cpuid_entry_get_reg(entry, cpuid.reg); in cpuid_entry_get_reg() 192 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, in cpuid_entry_get() argument 195 u32 *reg = cpuid_entry_get_reg(entry, x86_feature); in cpuid_entry_get() 200 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, in cpuid_entry_has() argument [all …]
|
/arch/s390/kernel/ |
D | jump_label.c | 19 static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn) in jump_label_make_nop() argument 23 insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1; in jump_label_make_nop() 26 static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) in jump_label_make_branch() argument 30 insn->offset = (jump_entry_target(entry) - jump_entry_code(entry)) >> 1; in jump_label_make_branch() 33 static void jump_label_bug(struct jump_entry *entry, struct insn *expected, in jump_label_bug() argument 36 unsigned char *ipc = (unsigned char *)jump_entry_code(entry); in jump_label_bug() 47 static void jump_label_transform(struct jump_entry *entry, in jump_label_transform() argument 50 void *code = (void *)jump_entry_code(entry); in jump_label_transform() 54 jump_label_make_nop(entry, &old); in jump_label_transform() 55 jump_label_make_branch(entry, &new); in jump_label_transform() [all …]
|
/arch/x86/platform/efi/ |
D | runtime-map.c | 25 ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf); 33 static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) in type_show() argument 35 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); in type_show() 38 #define EFI_RUNTIME_FIELD(var) entry->md.var 41 static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \ 59 struct efi_runtime_map_entry *entry = to_map_entry(kobj); in map_attr_show() local 62 return map_attr->show(entry, buf); in map_attr_show() 90 struct efi_runtime_map_entry *entry; in map_release() local 92 entry = to_map_entry(kobj); in map_release() 93 kfree(entry); in map_release() [all …]
|
/arch/x86/kernel/ |
D | e820.c | 83 struct e820_entry *entry = &table->entries[i]; in _e820__mapped_any() local 85 if (type && entry->type != type) in _e820__mapped_any() 87 if (entry->addr >= end || entry->addr + entry->size <= start) in _e820__mapped_any() 118 struct e820_entry *entry = &e820_table->entries[i]; in __e820__mapped_all() local 120 if (type && entry->type != type) in __e820__mapped_all() 124 if (entry->addr >= end || entry->addr + entry->size <= start) in __e820__mapped_all() 131 if (entry->addr <= start) in __e820__mapped_all() 132 start = entry->addr + entry->size; in __e820__mapped_all() 139 return entry; in __e820__mapped_all() 158 struct e820_entry *entry = __e820__mapped_all(start, end, 0); in e820__get_entry_type() local [all …]
|
D | jump_label.c | 20 int arch_jump_entry_size(struct jump_entry *entry) in arch_jump_entry_size() argument 24 insn_decode_kernel(&insn, (void *)jump_entry_code(entry)); in arch_jump_entry_size() 36 __jump_label_patch(struct jump_entry *entry, enum jump_label_type type) in __jump_label_patch() argument 42 addr = (void *)jump_entry_code(entry); in __jump_label_patch() 43 dest = (void *)jump_entry_target(entry); in __jump_label_patch() 45 size = arch_jump_entry_size(entry); in __jump_label_patch() 83 __jump_label_transform(struct jump_entry *entry, in __jump_label_transform() argument 87 const struct jump_label_patch jlp = __jump_label_patch(entry, type); in __jump_label_transform() 101 text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size); in __jump_label_transform() 105 text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL); in __jump_label_transform() [all …]
|
/arch/riscv/kernel/ |
D | perf_callchain.c | 13 static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, in user_backtrace() argument 35 perf_callchain_store(entry, ra); in user_backtrace() 56 void perf_callchain_user(struct perf_callchain_entry_ctx *entry, in perf_callchain_user() argument 62 perf_callchain_store(entry, regs->epc); in perf_callchain_user() 64 fp = user_backtrace(entry, fp, regs->ra); in perf_callchain_user() 65 while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) in perf_callchain_user() 66 fp = user_backtrace(entry, fp, 0); in perf_callchain_user() 69 static bool fill_callchain(void *entry, unsigned long pc) in fill_callchain() argument 71 return perf_callchain_store(entry, pc) == 0; in fill_callchain() 74 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, in perf_callchain_kernel() argument [all …]
|
/arch/um/kernel/ |
D | irq.c | 94 static bool irq_do_timetravel_handler(struct irq_entry *entry, in irq_do_timetravel_handler() argument 97 struct irq_reg *reg = &entry->reg[t]; in irq_do_timetravel_handler() 108 reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event); in irq_do_timetravel_handler() 118 static bool irq_do_timetravel_handler(struct irq_entry *entry, in irq_do_timetravel_handler() argument 125 static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t, in sigio_reg_handler() argument 129 struct irq_reg *reg = &entry->reg[t]; in sigio_reg_handler() 137 if (irq_do_timetravel_handler(entry, t)) in sigio_reg_handler() 226 static bool update_irq_entry(struct irq_entry *entry) in update_irq_entry() argument 232 events |= entry->reg[i].events; in update_irq_entry() 236 os_add_epoll_fd(events, entry->fd, entry); in update_irq_entry() [all …]
|
/arch/x86/boot/compressed/ |
D | idt_64.c | 10 gate_desc entry; in set_idt_entry() local 12 memset(&entry, 0, sizeof(entry)); in set_idt_entry() 14 entry.offset_low = (u16)(address & 0xffff); in set_idt_entry() 15 entry.segment = __KERNEL_CS; in set_idt_entry() 16 entry.bits.type = GATE_TRAP; in set_idt_entry() 17 entry.bits.p = 1; in set_idt_entry() 18 entry.offset_middle = (u16)((address >> 16) & 0xffff); in set_idt_entry() 19 entry.offset_high = (u32)(address >> 32); in set_idt_entry() 21 memcpy(&boot_idt[vector], &entry, sizeof(entry)); in set_idt_entry()
|
/arch/mips/kernel/ |
D | perf_event.c | 26 static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry, in save_raw_perf_callchain() argument 35 perf_callchain_store(entry, addr); in save_raw_perf_callchain() 36 if (entry->nr >= entry->max_stack) in save_raw_perf_callchain() 42 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, in perf_callchain_kernel() argument 55 save_raw_perf_callchain(entry, sp); in perf_callchain_kernel() 59 perf_callchain_store(entry, pc); in perf_callchain_kernel() 60 if (entry->nr >= entry->max_stack) in perf_callchain_kernel() 65 save_raw_perf_callchain(entry, sp); in perf_callchain_kernel()
|
/arch/arm/kernel/ |
D | perf_callchain.c | 35 struct perf_callchain_entry_ctx *entry) in user_backtrace() argument 50 perf_callchain_store(entry, buftail.lr); in user_backtrace() 63 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) in perf_callchain_user() argument 67 perf_callchain_store(entry, regs->ARM_pc); in perf_callchain_user() 74 while ((entry->nr < entry->max_stack) && in perf_callchain_user() 76 tail = user_backtrace(tail, entry); in perf_callchain_user() 87 struct perf_callchain_entry_ctx *entry = data; in callchain_trace() local 88 return perf_callchain_store(entry, pc) == 0; in callchain_trace() 92 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) in perf_callchain_kernel() argument 97 walk_stackframe(&fr, callchain_trace, entry); in perf_callchain_kernel()
|
D | jump_label.c | 7 static void __arch_jump_label_transform(struct jump_entry *entry, in __arch_jump_label_transform() argument 11 void *addr = (void *)entry->code; in __arch_jump_label_transform() 15 insn = arm_gen_branch(entry->code, entry->target); in __arch_jump_label_transform() 25 void arch_jump_label_transform(struct jump_entry *entry, in arch_jump_label_transform() argument 28 __arch_jump_label_transform(entry, type, false); in arch_jump_label_transform()
|
/arch/csky/kernel/ |
D | perf_callchain.c | 36 struct perf_callchain_entry_ctx *entry) in walk_stackframe() argument 39 perf_callchain_store(entry, fr->lr); in walk_stackframe() 47 static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, in user_backtrace() argument 67 perf_callchain_store(entry, lr); in user_backtrace() 86 void perf_callchain_user(struct perf_callchain_entry_ctx *entry, in perf_callchain_user() argument 92 perf_callchain_store(entry, regs->pc); in perf_callchain_user() 100 fp = user_backtrace(entry, fp, regs->lr); in perf_callchain_user() 102 while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) in perf_callchain_user() 103 fp = user_backtrace(entry, fp, 0); in perf_callchain_user() 106 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, in perf_callchain_kernel() argument [all …]
|
/arch/sh/mm/ |
D | tlbex_32.c | 30 pte_t entry; in handle_tlbmiss() local 56 entry = *pte; in handle_tlbmiss() 57 if (unlikely(pte_none(entry) || pte_not_present(entry))) in handle_tlbmiss() 59 if (unlikely(error_code && !pte_write(entry))) in handle_tlbmiss() 63 entry = pte_mkdirty(entry); in handle_tlbmiss() 64 entry = pte_mkyoung(entry); in handle_tlbmiss() 66 set_pte(pte, entry); in handle_tlbmiss()
|
/arch/sparc/mm/ |
D | hugetlbpage.c | 134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) in sun4u_hugepage_shift_to_tte() argument 136 return entry; in sun4u_hugepage_shift_to_tte() 139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) in sun4v_hugepage_shift_to_tte() argument 143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; in sun4v_hugepage_shift_to_tte() 148 pte_val(entry) |= _PAGE_PUD_HUGE; in sun4v_hugepage_shift_to_tte() 152 pte_val(entry) |= _PAGE_PMD_HUGE; in sun4v_hugepage_shift_to_tte() 156 pte_val(entry) |= _PAGE_PMD_HUGE; in sun4v_hugepage_shift_to_tte() 159 pte_val(entry) |= _PAGE_PMD_HUGE; in sun4v_hugepage_shift_to_tte() 168 pte_val(entry) = pte_val(entry) | hugepage_size; in sun4v_hugepage_shift_to_tte() 169 return entry; in sun4v_hugepage_shift_to_tte() [all …]
|
/arch/mips/mm/ |
D | pgtable-64.c | 19 unsigned long entry; in pgd_init() local 22 entry = (unsigned long)invalid_pud_table; in pgd_init() 24 entry = (unsigned long)invalid_pmd_table; in pgd_init() 26 entry = (unsigned long)invalid_pte_table; in pgd_init() 33 p[0] = entry; in pgd_init() 34 p[1] = entry; in pgd_init() 35 p[2] = entry; in pgd_init() 36 p[3] = entry; in pgd_init() 37 p[4] = entry; in pgd_init() 39 p[-3] = entry; in pgd_init() [all …]
|
/arch/arm64/mm/ |
D | mteswap.c | 50 void mte_restore_tags(swp_entry_t entry, struct page *page) in mte_restore_tags() argument 52 void *tags = xa_load(&mte_pages, entry.val); in mte_restore_tags() 65 swp_entry_t entry = swp_entry(type, offset); in mte_invalidate_tags() local 66 void *tags = xa_erase(&mte_pages, entry.val); in mte_invalidate_tags() 73 swp_entry_t entry = page_swap_entry(page); in __mte_invalidate_tags() local 75 mte_invalidate_tags(swp_type(entry), swp_offset(entry)); in __mte_invalidate_tags() 80 swp_entry_t entry = swp_entry(type, 0); in mte_invalidate_tags_area() local 84 XA_STATE(xa_state, &mte_pages, entry.val); in mte_invalidate_tags_area() 117 void arch_swap_restore(swp_entry_t entry, struct folio *folio) in arch_swap_restore() argument 127 mte_restore_tags(entry, folio_page(folio, i)); in arch_swap_restore() [all …]
|
/arch/parisc/kernel/ |
D | alternative.c | 26 struct alt_instr *entry; in apply_alternatives() local 46 for (entry = start; entry < end; entry++, index++) { in apply_alternatives() 52 from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); in apply_alternatives() 53 len = entry->len; in apply_alternatives() 54 cond = entry->cond; in apply_alternatives() 55 replacement = entry->replacement; in apply_alternatives() 90 source = (u32 *)((ulong)&entry->replacement + entry->replacement); in apply_alternatives()
|
/arch/loongarch/mm/ |
D | pgtable.c | 44 unsigned long entry; in pgd_init() local 47 entry = (unsigned long)invalid_pud_table; in pgd_init() 49 entry = (unsigned long)invalid_pmd_table; in pgd_init() 51 entry = (unsigned long)invalid_pte_table; in pgd_init() 58 p[0] = entry; in pgd_init() 59 p[1] = entry; in pgd_init() 60 p[2] = entry; in pgd_init() 61 p[3] = entry; in pgd_init() 62 p[4] = entry; in pgd_init() 64 p[-3] = entry; in pgd_init() [all …]
|
/arch/m68k/include/asm/ |
D | sun3mmu.h | 88 register unsigned long entry; in sun3_get_segmap() local 95 entry = c; in sun3_get_segmap() 97 return entry; in sun3_get_segmap() 101 static inline void sun3_put_segmap(unsigned long addr, unsigned long entry) in sun3_put_segmap() argument 107 SET_CONTROL_BYTE (AC_SEGMAP | (addr & SUN3_CONTROL_MASK), entry); in sun3_put_segmap() 116 register unsigned long entry; in sun3_get_pte() local 121 GET_CONTROL_WORD (AC_PAGEMAP | (addr & SUN3_CONTROL_MASK), entry); in sun3_get_pte() 124 return entry; in sun3_get_pte() 128 static inline void sun3_put_pte(unsigned long addr, unsigned long entry) in sun3_put_pte() argument 134 SET_CONTROL_WORD (AC_PAGEMAP | (addr & SUN3_CONTROL_MASK), entry); in sun3_put_pte()
|
/arch/powerpc/kernel/ |
D | systbl.c | 19 #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) argument 23 #define __SYSCALL(nr, entry) [nr] = entry, argument 29 #define __SYSCALL(nr, entry) [nr] = (void *) entry, argument
|
/arch/x86/kernel/cpu/resctrl/ |
D | monitor.c | 141 struct rmid_entry *entry; in __rmid_entry() local 143 entry = &rmid_ptrs[rmid]; in __rmid_entry() 144 WARN_ON(entry->rmid != rmid); in __rmid_entry() 146 return entry; in __rmid_entry() 272 struct rmid_entry *entry; in __check_limbo() local 288 entry = __rmid_entry(nrmid); in __check_limbo() 290 if (resctrl_arch_rmid_read(r, d, entry->rmid, in __check_limbo() 298 clear_bit(entry->rmid, d->rmid_busy_llc); in __check_limbo() 299 if (!--entry->busy) { in __check_limbo() 301 list_add_tail(&entry->list, &rmid_free_lru); in __check_limbo() [all …]
|
/arch/x86/kernel/apic/ |
D | io_apic.c | 80 #define for_each_irq_pin(entry, head) \ argument 81 list_for_each_entry(entry, &head, list) 95 struct IO_APIC_route_entry entry; member 293 struct IO_APIC_route_entry entry; in __ioapic_read_entry() local 295 entry.w1 = io_apic_read(apic, 0x10 + 2 * pin); in __ioapic_read_entry() 296 entry.w2 = io_apic_read(apic, 0x11 + 2 * pin); in __ioapic_read_entry() 298 return entry; in __ioapic_read_entry() 303 struct IO_APIC_route_entry entry; in ioapic_read_entry() local 307 entry = __ioapic_read_entry(apic, pin); in ioapic_read_entry() 310 return entry; in ioapic_read_entry() [all …]
|