1
2
3
4
5
6
7
8
9
10
11#include <linux/suspend.h>
12#include <linux/export.h>
13#include <linux/smp.h>
14#include <linux/perf_event.h>
15#include <linux/tboot.h>
16
17#include <asm/pgtable.h>
18#include <asm/proto.h>
19#include <asm/mtrr.h>
20#include <asm/page.h>
21#include <asm/mce.h>
22#include <asm/suspend.h>
23#include <asm/fpu/internal.h>
24#include <asm/debugreg.h>
25#include <asm/cpu.h>
26#include <asm/mmu_context.h>
27#include <linux/dmi.h>
28
29#ifdef CONFIG_X86_32
30__visible unsigned long saved_context_ebx;
31__visible unsigned long saved_context_esp, saved_context_ebp;
32__visible unsigned long saved_context_esi, saved_context_edi;
33__visible unsigned long saved_context_eflags;
34#endif
35struct saved_context saved_context;
36
37static void msr_save_context(struct saved_context *ctxt)
38{
39 struct saved_msr *msr = ctxt->saved_msrs.array;
40 struct saved_msr *end = msr + ctxt->saved_msrs.num;
41
42 while (msr < end) {
43 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
44 msr++;
45 }
46}
47
48static void msr_restore_context(struct saved_context *ctxt)
49{
50 struct saved_msr *msr = ctxt->saved_msrs.array;
51 struct saved_msr *end = msr + ctxt->saved_msrs.num;
52
53 while (msr < end) {
54 if (msr->valid)
55 wrmsrl(msr->info.msr_no, msr->info.reg.q);
56 msr++;
57 }
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static void __save_processor_state(struct saved_context *ctxt)
76{
77#ifdef CONFIG_X86_32
78 mtrr_save_fixed_ranges(NULL);
79#endif
80 kernel_fpu_begin();
81
82
83
84
85#ifdef CONFIG_X86_32
86 store_idt(&ctxt->idt);
87#else
88
89 store_idt((struct desc_ptr *)&ctxt->idt_limit);
90#endif
91
92
93
94
95
96
97 ctxt->gdt_desc.size = GDT_SIZE - 1;
98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
99
100 store_tr(ctxt->tr);
101
102
103
104
105
106#ifdef CONFIG_X86_32
107 savesegment(es, ctxt->es);
108 savesegment(fs, ctxt->fs);
109 savesegment(gs, ctxt->gs);
110 savesegment(ss, ctxt->ss);
111#else
112
113 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
114 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
115 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
116 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
117 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
118
119 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
120 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
121 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
122 mtrr_save_fixed_ranges(NULL);
123
124 rdmsrl(MSR_EFER, ctxt->efer);
125#endif
126
127
128
129
130 ctxt->cr0 = read_cr0();
131 ctxt->cr2 = read_cr2();
132 ctxt->cr3 = __read_cr3();
133 ctxt->cr4 = __read_cr4();
134#ifdef CONFIG_X86_64
135 ctxt->cr8 = read_cr8();
136#endif
137 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
138 &ctxt->misc_enable);
139 msr_save_context(ctxt);
140}
141
142
143void save_processor_state(void)
144{
145 __save_processor_state(&saved_context);
146 x86_platform.save_sched_clock_state();
147}
148#ifdef CONFIG_X86_32
149EXPORT_SYMBOL(save_processor_state);
150#endif
151
152static void do_fpu_end(void)
153{
154
155
156
157 kernel_fpu_end();
158}
159
160static void fix_processor_context(void)
161{
162 int cpu = smp_processor_id();
163 struct tss_struct *t = &per_cpu(cpu_tss, cpu);
164#ifdef CONFIG_X86_64
165 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
166 tss_desc tss;
167#endif
168 set_tss_desc(cpu, t);
169
170
171
172
173
174
175#ifdef CONFIG_X86_64
176 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
177 tss.type = 0x9;
178 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
179
180 syscall_init();
181#endif
182 load_TR_desc();
183 load_mm_ldt(current->active_mm);
184 initialize_tlbstate_and_flush();
185
186 fpu__resume_cpu();
187
188
189 load_fixmap_gdt(cpu);
190}
191
192
193
194
195
196
197static void notrace __restore_processor_state(struct saved_context *ctxt)
198{
199 if (ctxt->misc_enable_saved)
200 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
201
202
203
204
205#ifdef CONFIG_X86_32
206 if (ctxt->cr4)
207 __write_cr4(ctxt->cr4);
208#else
209
210 wrmsrl(MSR_EFER, ctxt->efer);
211 write_cr8(ctxt->cr8);
212 __write_cr4(ctxt->cr4);
213#endif
214 write_cr3(ctxt->cr3);
215 write_cr2(ctxt->cr2);
216 write_cr0(ctxt->cr0);
217
218
219
220
221
222#ifdef CONFIG_X86_32
223 load_idt(&ctxt->idt);
224#else
225
226 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
227#endif
228
229
230
231
232#ifdef CONFIG_X86_32
233 loadsegment(es, ctxt->es);
234 loadsegment(fs, ctxt->fs);
235 loadsegment(gs, ctxt->gs);
236 loadsegment(ss, ctxt->ss);
237
238
239
240
241 if (boot_cpu_has(X86_FEATURE_SEP))
242 enable_sep_cpu();
243#else
244
245 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
246 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
247 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
248 load_gs_index(ctxt->gs);
249 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
250
251 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
252 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
253 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
254#endif
255
256 fix_processor_context();
257
258 do_fpu_end();
259 tsc_verify_tsc_adjust(true);
260 x86_platform.restore_sched_clock_state();
261 mtrr_bp_restore();
262 perf_restore_debug_store();
263 msr_restore_context(ctxt);
264}
265
266
267void notrace restore_processor_state(void)
268{
269 __restore_processor_state(&saved_context);
270}
271#ifdef CONFIG_X86_32
272EXPORT_SYMBOL(restore_processor_state);
273#endif
274
275#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
276static void resume_play_dead(void)
277{
278 play_dead_common();
279 tboot_shutdown(TB_SHUTDOWN_WFS);
280 hlt_play_dead();
281}
282
283int hibernate_resume_nonboot_cpu_disable(void)
284{
285 void (*play_dead)(void) = smp_ops.play_dead;
286 int ret;
287
288
289
290
291
292
293
294
295
296
297 smp_ops.play_dead = resume_play_dead;
298 ret = disable_nonboot_cpus();
299 smp_ops.play_dead = play_dead;
300 return ret;
301}
302#endif
303
304
305
306
307
308
309static int bsp_check(void)
310{
311 if (cpumask_first(cpu_online_mask) != 0) {
312 pr_warn("CPU0 is offline.\n");
313 return -ENODEV;
314 }
315
316 return 0;
317}
318
319static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
320 void *ptr)
321{
322 int ret = 0;
323
324 switch (action) {
325 case PM_SUSPEND_PREPARE:
326 case PM_HIBERNATION_PREPARE:
327 ret = bsp_check();
328 break;
329#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
330 case PM_RESTORE_PREPARE:
331
332
333
334
335
336 if (!cpu_online(0))
337 _debug_hotplug_cpu(0, 1);
338 break;
339 case PM_POST_RESTORE:
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 _debug_hotplug_cpu(0, 0);
364 break;
365#endif
366 default:
367 break;
368 }
369 return notifier_from_errno(ret);
370}
371
372static int __init bsp_pm_check_init(void)
373{
374
375
376
377
378
379 pm_notifier(bsp_pm_callback, -INT_MAX);
380 return 0;
381}
382
383core_initcall(bsp_pm_check_init);
384
385static int msr_init_context(const u32 *msr_id, const int total_num)
386{
387 int i = 0;
388 struct saved_msr *msr_array;
389
390 if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
391 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
392 return -EINVAL;
393 }
394
395 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
396 if (!msr_array) {
397 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
398 return -ENOMEM;
399 }
400
401 for (i = 0; i < total_num; i++) {
402 msr_array[i].info.msr_no = msr_id[i];
403 msr_array[i].valid = false;
404 msr_array[i].info.reg.q = 0;
405 }
406 saved_context.saved_msrs.num = total_num;
407 saved_context.saved_msrs.array = msr_array;
408
409 return 0;
410}
411
412
413
414
415
416
417
418
419
420
421
422static int msr_initialize_bdw(const struct dmi_system_id *d)
423{
424
425 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
426
427 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
428 return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
429}
430
431static const struct dmi_system_id msr_save_dmi_table[] = {
432 {
433 .callback = msr_initialize_bdw,
434 .ident = "BROADWELL BDX_EP",
435 .matches = {
436 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
437 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
438 },
439 },
440 {}
441};
442
443static int pm_check_save_msr(void)
444{
445 dmi_check_system(msr_save_dmi_table);
446 return 0;
447}
448
449device_initcall(pm_check_save_msr);
450