• Home
  • Raw
  • Download

Lines Matching +full:disable +full:- +full:hibernation +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Suspend support specific for i386/x86-64.
40 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context()
41 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
44 if (msr->valid) in msr_save_context()
45 rdmsrl(msr->info.msr_no, msr->info.reg.q); in msr_save_context()
52 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context()
53 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
56 if (msr->valid) in msr_restore_context()
57 wrmsrl(msr->info.msr_no, msr->info.reg.q); in msr_restore_context()
63 * __save_processor_state - save CPU registers before creating a
64 * hibernation image and before restoring the memory state from it
65 * @ctxt - structure to store the registers contents in
68 * boot kernel (ie. the kernel used for loading the hibernation image)
70 * saved in the hibernation image), then its contents must be saved by this
72 * kernel B is used for loading the hibernation image into memory, the
87 store_idt(&ctxt->idt); in __save_processor_state()
91 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit in __save_processor_state()
92 * mode in "secondary_startup_64". In 32-bit mode it is done via in __save_processor_state()
95 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state()
96 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state()
98 store_tr(ctxt->tr); in __save_processor_state()
105 savesegment(gs, ctxt->gs); in __save_processor_state()
108 savesegment(gs, ctxt->gs); in __save_processor_state()
109 savesegment(fs, ctxt->fs); in __save_processor_state()
110 savesegment(ds, ctxt->ds); in __save_processor_state()
111 savesegment(es, ctxt->es); in __save_processor_state()
113 rdmsrl(MSR_FS_BASE, ctxt->fs_base); in __save_processor_state()
114 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __save_processor_state()
115 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __save_processor_state()
118 rdmsrl(MSR_EFER, ctxt->efer); in __save_processor_state()
124 ctxt->cr0 = read_cr0(); in __save_processor_state()
125 ctxt->cr2 = read_cr2(); in __save_processor_state()
126 ctxt->cr3 = __read_cr3(); in __save_processor_state()
127 ctxt->cr4 = __read_cr4(); in __save_processor_state()
128 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, in __save_processor_state()
129 &ctxt->misc_enable); in __save_processor_state()
166 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); in fix_processor_context()
170 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ in fix_processor_context()
179 load_mm_ldt(current->active_mm); /* This does lldt */ in fix_processor_context()
189 * __restore_processor_state - restore the contents of CPU registers saved
191 * @ctxt - structure to load the registers contents from
200 if (ctxt->misc_enable_saved) in __restore_processor_state()
201 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); in __restore_processor_state()
207 if (ctxt->cr4) in __restore_processor_state()
208 __write_cr4(ctxt->cr4); in __restore_processor_state()
211 wrmsrl(MSR_EFER, ctxt->efer); in __restore_processor_state()
212 __write_cr4(ctxt->cr4); in __restore_processor_state()
214 write_cr3(ctxt->cr3); in __restore_processor_state()
215 write_cr2(ctxt->cr2); in __restore_processor_state()
216 write_cr0(ctxt->cr0); in __restore_processor_state()
219 load_idt(&ctxt->idt); in __restore_processor_state()
234 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __restore_processor_state()
240 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ in __restore_processor_state()
248 loadsegment(ds, ctxt->es); in __restore_processor_state()
249 loadsegment(es, ctxt->es); in __restore_processor_state()
250 loadsegment(fs, ctxt->fs); in __restore_processor_state()
251 load_gs_index(ctxt->gs); in __restore_processor_state()
258 wrmsrl(MSR_FS_BASE, ctxt->fs_base); in __restore_processor_state()
259 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __restore_processor_state()
261 loadsegment(gs, ctxt->gs); in __restore_processor_state()
317 * Those will be put to proper (not interfering with hibernation in hibernate_resume_nonboot_cpu_disable()
340 return -ENODEV; in bsp_check()
359 * When system resumes from hibernation, online CPU0 because in bsp_pm_callback()
361 * 2. the CPU was online before hibernation in bsp_pm_callback()
370 * This code is called only when user space hibernation software in bsp_pm_callback()
376 * mode, i.e. CPU0 is offline and user mode hibernation in bsp_pm_callback()
404 * earlier to disable cpu hotplug before bsp online check. in bsp_pm_check_init()
406 pm_notifier(bsp_pm_callback, -INT_MAX); in bsp_pm_check_init()
419 total_num = saved_msrs->num + num; in msr_build_context()
424 return -ENOMEM; in msr_build_context()
427 if (saved_msrs->array) { in msr_build_context()
432 memcpy(msr_array, saved_msrs->array, in msr_build_context()
433 sizeof(struct saved_msr) * saved_msrs->num); in msr_build_context()
435 kfree(saved_msrs->array); in msr_build_context()
438 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { in msr_build_context()
445 saved_msrs->num = total_num; in msr_build_context()
446 saved_msrs->array = msr_array; in msr_build_context()
466 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); in msr_initialize_bdw()
476 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
489 c->family); in msr_save_cpuid_features()
510 fn = (pm_cpu_match_t)m->driver_data; in pm_cpu_check()