/arch/powerpc/kvm/ |
D | booke_emulate.c | 37 vcpu->arch.pc = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 38 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 43 vcpu->arch.pc = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 44 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 49 vcpu->arch.pc = vcpu->arch.csrr0; in kvmppc_emul_rfci() 50 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 91 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); in kvmppc_booke_emulate_op() 101 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() 107 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) in kvmppc_booke_emulate_op() 138 vcpu->arch.shared->dar = spr_val; in kvmppc_booke_emulate_mtspr() [all …]
|
D | timing.c | 38 mutex_lock(&vcpu->arch.exit_timing_lock); in kvmppc_init_timing_stats() 40 vcpu->arch.last_exit_type = 0xDEAD; in kvmppc_init_timing_stats() 42 vcpu->arch.timing_count_type[i] = 0; in kvmppc_init_timing_stats() 43 vcpu->arch.timing_max_duration[i] = 0; in kvmppc_init_timing_stats() 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; in kvmppc_init_timing_stats() 45 vcpu->arch.timing_sum_duration[i] = 0; in kvmppc_init_timing_stats() 46 vcpu->arch.timing_sum_quad_duration[i] = 0; in kvmppc_init_timing_stats() 48 vcpu->arch.timing_last_exit = 0; in kvmppc_init_timing_stats() 49 vcpu->arch.timing_exit.tv64 = 0; in kvmppc_init_timing_stats() 50 vcpu->arch.timing_last_enter.tv64 = 0; in kvmppc_init_timing_stats() [all …]
|
D | book3s_hv.c | 100 int real_cpu = cpu + vcpu->arch.ptid; in kvmppc_fast_vcpu_kick_hv() 147 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv() 150 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv() 156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv() 157 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv() 158 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv() 159 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv() 161 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv() 166 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv() 169 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv() [all …]
|
D | booke.c | 77 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); in kvmppc_dump_vcpu() 78 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); in kvmppc_dump_vcpu() 79 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu() 80 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu() 82 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 99 vcpu->arch.shadow_msr &= ~MSR_SPE; in kvmppc_vcpu_disable_spe() 108 vcpu->arch.shadow_msr |= MSR_SPE; in kvmppc_vcpu_enable_spe() 114 if (vcpu->arch.shared->msr & MSR_SPE) { in kvmppc_vcpu_sync_spe() 115 if (!(vcpu->arch.shadow_msr & MSR_SPE)) in kvmppc_vcpu_sync_spe() 117 } else if (vcpu->arch.shadow_msr & MSR_SPE) { in kvmppc_vcpu_sync_spe() [all …]
|
D | e500_emulate.c | 54 ulong param = vcpu->arch.gpr[rb]; in kvmppc_e500_emul_msgclr() 60 clear_bit(prio, &vcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgclr() 66 ulong param = vcpu->arch.gpr[rb]; in kvmppc_e500_emul_msgsnd() 76 int cpir = cvcpu->arch.shared->pir; in kvmppc_e500_emul_msgsnd() 78 set_bit(prio, &cvcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgsnd() 95 run->debug.arch.address = vcpu->arch.pc; in kvmppc_e500_emul_ehpriv() 96 run->debug.arch.status = 0; in kvmppc_e500_emul_ehpriv() 210 vcpu->arch.shared->mas0 = spr_val; in kvmppc_core_emulate_mtspr_e500() 213 vcpu->arch.shared->mas1 = spr_val; in kvmppc_core_emulate_mtspr_e500() 216 vcpu->arch.shared->mas2 = spr_val; in kvmppc_core_emulate_mtspr_e500() [all …]
|
D | e500mc.c | 106 vcpu->arch.pid = pid; in kvmppc_set_pid() 123 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); in kvmppc_core_vcpu_load_e500mc() 125 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); in kvmppc_core_vcpu_load_e500mc() 126 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); in kvmppc_core_vcpu_load_e500mc() 127 vcpu->arch.epsc = vcpu->arch.eplc; in kvmppc_core_vcpu_load_e500mc() 128 mtspr(SPRN_EPLC, vcpu->arch.eplc); in kvmppc_core_vcpu_load_e500mc() 129 mtspr(SPRN_EPSC, vcpu->arch.epsc); in kvmppc_core_vcpu_load_e500mc() 131 mtspr(SPRN_GIVPR, vcpu->arch.ivpr); in kvmppc_core_vcpu_load_e500mc() 132 mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); in kvmppc_core_vcpu_load_e500mc() 133 mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); in kvmppc_core_vcpu_load_e500mc() [all …]
|
D | book3s_pr.c | 81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real() 88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real() 111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; in kvmppc_core_vcpu_load_pr() 148 svcpu->gpr[0] = vcpu->arch.gpr[0]; in kvmppc_copy_to_svcpu() 149 svcpu->gpr[1] = vcpu->arch.gpr[1]; in kvmppc_copy_to_svcpu() 150 svcpu->gpr[2] = vcpu->arch.gpr[2]; in kvmppc_copy_to_svcpu() 151 svcpu->gpr[3] = vcpu->arch.gpr[3]; in kvmppc_copy_to_svcpu() 152 svcpu->gpr[4] = vcpu->arch.gpr[4]; in kvmppc_copy_to_svcpu() 153 svcpu->gpr[5] = vcpu->arch.gpr[5]; in kvmppc_copy_to_svcpu() 154 svcpu->gpr[6] = vcpu->arch.gpr[6]; in kvmppc_copy_to_svcpu() [all …]
|
D | e500_mmu.c | 74 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); in get_tlb_esel() 137 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; in kvmppc_e500_deliver_tlb_miss() 139 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; in kvmppc_e500_deliver_tlb_miss() 141 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) in kvmppc_e500_deliver_tlb_miss() 143 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) in kvmppc_e500_deliver_tlb_miss() 146 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) in kvmppc_e500_deliver_tlb_miss() 147 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); in kvmppc_e500_deliver_tlb_miss() 148 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; in kvmppc_e500_deliver_tlb_miss() 149 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) in kvmppc_e500_deliver_tlb_miss() 335 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); in kvmppc_e500_emul_tlbre() [all …]
|
/arch/mips/kvm/ |
D | emulate.c | 47 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() local 65 arch->gprs[insn.r_format.rd] = epc + 8; in kvm_compute_return_epc() 68 nextpc = arch->gprs[insn.r_format.rs]; in kvm_compute_return_epc() 82 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc() 91 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc() 100 arch->gprs[31] = epc + 8; in kvm_compute_return_epc() 101 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc() 110 arch->gprs[31] = epc + 8; in kvm_compute_return_epc() 111 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc() 134 arch->gprs[31] = instpc + 8; in kvm_compute_return_epc() [all …]
|
D | interrupt.c | 27 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_queue_irq() 32 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_dequeue_irq() 42 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_queue_timer_int_cb() 51 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_dequeue_timer_int_cb() 67 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); in kvm_mips_queue_io_int_cb() 73 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); in kvm_mips_queue_io_int_cb() 78 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); in kvm_mips_queue_io_int_cb() 95 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); in kvm_mips_dequeue_io_int_cb() 100 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); in kvm_mips_dequeue_io_int_cb() 105 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); in kvm_mips_dequeue_io_int_cb() [all …]
|
/arch/s390/kvm/ |
D | guestdbg.c | 65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 71 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 84 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 105 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_wp() 106 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_wp() [all …]
|
D | kvm-s390.c | 184 struct gmap *gmap = kvm->arch.gmap; in kvm_s390_sync_dirty_log() 249 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap() 268 kvm->arch.use_cmma = 1; in kvm_s390_mem_control() 276 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); in kvm_s390_mem_control() 363 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl() 404 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), in kvm_s390_crypto_init() 406 if (!kvm->arch.crypto.crycb) in kvm_s390_crypto_init() 409 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | in kvm_s390_crypto_init() 438 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); in kvm_arch_init_vm() 439 if (!kvm->arch.sca) in kvm_arch_init_vm() [all …]
|
D | intercept.c | 43 switch (vcpu->arch.sie_block->icptcode) { in handle_noop() 62 trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); in handle_stop() 64 action_bits = vcpu->arch.local_int.action_bits; in handle_stop() 83 int viwhy = vcpu->arch.sie_block->ipb >> 16; in handle_validity() 97 vcpu->arch.sie_block->ipa, in handle_instruction() 98 vcpu->arch.sie_block->ipb); in handle_instruction() 99 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8]; in handle_instruction() 109 pgm_info->code = vcpu->arch.sie_block->iprcc; in __extract_prog_irq() 111 switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { in __extract_prog_irq() 122 pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc; in __extract_prog_irq() [all …]
|
D | kvm-s390.h | 30 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) 32 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 36 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ 42 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ 44 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ 50 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; in is_vcpu_stopped() 56 if (kvm->arch.gmap) in kvm_is_ucontrol() 67 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; in kvm_s390_get_prefix() 72 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; in kvm_s390_set_prefix() 79 u32 base2 = vcpu->arch.sie_block->ipb >> 28; in kvm_s390_get_base_disp_s() [all …]
|
/arch/cris/ |
D | Makefile | 15 arch-y := v10 16 arch-$(CONFIG_ETRAX_ARCH_V10) := v10 17 arch-$(CONFIG_ETRAX_ARCH_V32) := v32 24 ifneq ($(arch-y),) 25 SARCH := arch-$(arch-y) 28 inc += -Iarch/cris/include/uapi/$(SARCH)/arch 29 inc += -Iarch/cris/include/$(SARCH)/arch 47 KBUILD_AFLAGS += -mlinux -march=$(arch-y) $(inc) 48 KBUILD_CFLAGS += -mlinux -march=$(arch-y) -pipe $(inc) 56 head-y := arch/cris/$(SARCH)/kernel/head.o [all …]
|
/arch/ia64/ |
D | Makefile | 25 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/ia64/module.lds 33 GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)") 34 KBUILD_CPPFLAGS += $(shell $(srctree)/arch/ia64/scripts/toolchain-flags "$(CC)" "$(OBJDUMP)" "$(REA… 45 head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o 47 libs-y += arch/ia64/lib/ 48 core-y += arch/ia64/kernel/ arch/ia64/mm/ 49 core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ 50 core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ 51 core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 52 core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ [all …]
|
/arch/x86/tools/ |
D | Makefile | 15 distill_awk = $(srctree)/arch/x86/tools/distill.awk 16 chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk 31 …en.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86… 33 …STCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srct… 36 …arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(… 38 …arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(…
|
/arch/powerpc/include/asm/ |
D | kvm_booke.h | 39 vcpu->arch.gpr[num] = val; in kvmppc_set_gpr() 44 return vcpu->arch.gpr[num]; in kvmppc_get_gpr() 49 vcpu->arch.cr = val; in kvmppc_set_cr() 54 return vcpu->arch.cr; in kvmppc_get_cr() 59 vcpu->arch.xer = val; in kvmppc_set_xer() 64 return vcpu->arch.xer; in kvmppc_get_xer() 75 vcpu->arch.ctr = val; in kvmppc_set_ctr() 80 return vcpu->arch.ctr; in kvmppc_get_ctr() 85 vcpu->arch.lr = val; in kvmppc_set_lr() 90 return vcpu->arch.lr; in kvmppc_get_lr() [all …]
|
/arch/x86/kvm/ |
D | x86.h | 9 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue() 15 vcpu->arch.interrupt.pending = true; in kvm_queue_interrupt() 16 vcpu->arch.interrupt.soft = soft; in kvm_queue_interrupt() 17 vcpu->arch.interrupt.nr = vector; in kvm_queue_interrupt() 22 vcpu->arch.interrupt.pending = false; in kvm_clear_interrupt_queue() 27 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || in kvm_event_needs_reinjection() 28 vcpu->arch.nmi_injected; in kvm_event_needs_reinjection() 44 return vcpu->arch.efer & EFER_LMA; in is_long_mode() 62 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested() 88 vcpu->arch.mmio_gva = gva & PAGE_MASK; in vcpu_cache_mmio_info() [all …]
|
D | kvm_cache_regs.h | 12 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) in kvm_register_read() 15 return vcpu->arch.regs[reg]; in kvm_register_read() 22 vcpu->arch.regs[reg] = val; in kvm_register_write() 23 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_write() 24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_write() 42 (unsigned long *)&vcpu->arch.regs_avail)) in kvm_pdptr_read() 45 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read() 51 if (tmask & vcpu->arch.cr0_guest_owned_bits) in kvm_read_cr0_bits() 53 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits() 64 if (tmask & vcpu->arch.cr4_guest_owned_bits) in kvm_read_cr4_bits() [all …]
|
D | x86.c | 72 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) 175 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset() 265 return vcpu->arch.apic_base; in kvm_get_apic_base() 271 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base() 354 if (!vcpu->arch.exception.pending) { in kvm_multiple_exception() 356 vcpu->arch.exception.pending = true; in kvm_multiple_exception() 357 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception() 358 vcpu->arch.exception.nr = nr; in kvm_multiple_exception() 359 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception() 360 vcpu->arch.exception.reinject = reinject; in kvm_multiple_exception() [all …]
|
/arch/x86/kernel/ |
D | machine_kexec_32.c | 72 free_page((unsigned long)image->arch.pgd); in machine_kexec_free_page_tables() 74 free_page((unsigned long)image->arch.pmd0); in machine_kexec_free_page_tables() 75 free_page((unsigned long)image->arch.pmd1); in machine_kexec_free_page_tables() 77 free_page((unsigned long)image->arch.pte0); in machine_kexec_free_page_tables() 78 free_page((unsigned long)image->arch.pte1); in machine_kexec_free_page_tables() 83 image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); in machine_kexec_alloc_page_tables() 85 image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL); in machine_kexec_alloc_page_tables() 86 image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL); in machine_kexec_alloc_page_tables() 88 image->arch.pte0 = (pte_t *)get_zeroed_page(GFP_KERNEL); in machine_kexec_alloc_page_tables() 89 image->arch.pte1 = (pte_t *)get_zeroed_page(GFP_KERNEL); in machine_kexec_alloc_page_tables() [all …]
|
/arch/powerpc/kernel/ |
D | asm-offsets.c | 436 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); in main() 437 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); in main() 438 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); in main() 439 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); in main() 440 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); in main() 441 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); in main() 443 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); in main() 445 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); in main() 446 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); in main() 447 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); in main() [all …]
|
/arch/avr32/ |
D | Makefile | 27 machdirs := $(patsubst %,arch/avr32/mach-%/, $(machine-y)) 31 head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o 32 head-y += arch/avr32/kernel/head.o 34 core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/ 35 core-$(CONFIG_BOARD_ATNGW100_COMMON) += arch/avr32/boards/atngw100/ 36 core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/ 37 core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/ 38 core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/ 39 core-$(CONFIG_BOARD_MIMC200) += arch/avr32/boards/mimc200/ 40 core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/ [all …]
|
/arch/ia64/kernel/ |
D | module.c | 310 if (mod && mod->arch.init_unw_table && in module_free() 312 unw_remove_unwind_table(mod->arch.init_unw_table); in module_free() 313 mod->arch.init_unw_table = NULL; in module_free() 435 mod->arch.core_plt = s; in module_frob_arch_sections() 437 mod->arch.init_plt = s; in module_frob_arch_sections() 439 mod->arch.got = s; in module_frob_arch_sections() 441 mod->arch.opd = s; in module_frob_arch_sections() 443 mod->arch.unwind = s; in module_frob_arch_sections() 447 mod->arch.paravirt_bundles = s; in module_frob_arch_sections() 450 mod->arch.paravirt_insts = s; in module_frob_arch_sections() [all …]
|