1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
|
/*
* Hibernation support for x86-64
*
* Distribute under GPLv2.
*
* Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright 2005 Andi Kleen <ak@suse.de>
* Copyright 2004 Pavel Machek <pavel@suse.cz>
*
* swsusp_arch_resume must not use any stack or any nonlocal variables while
* copying pages:
*
* Its rewriting one kernel image with another. What is stack in "old"
* image could very well be data page in "new" image, and overwriting
* your own stack under you is bad idea.
*/
.text
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
ENTRY(swsusp_arch_suspend)
movq $saved_context, %rax
movq %rsp, pt_regs_sp(%rax)
movq %rbp, pt_regs_bp(%rax)
movq %rsi, pt_regs_si(%rax)
movq %rdi, pt_regs_di(%rax)
movq %rbx, pt_regs_bx(%rax)
movq %rcx, pt_regs_cx(%rax)
movq %rdx, pt_regs_dx(%rax)
movq %r8, pt_regs_r8(%rax)
movq %r9, pt_regs_r9(%rax)
movq %r10, pt_regs_r10(%rax)
movq %r11, pt_regs_r11(%rax)
movq %r12, pt_regs_r12(%rax)
movq %r13, pt_regs_r13(%rax)
movq %r14, pt_regs_r14(%rax)
movq %r15, pt_regs_r15(%rax)
pushfq
popq pt_regs_flags(%rax)
/* save the address of restore_registers */
movq $restore_registers, %rax
movq %rax, restore_jump_address(%rip)
/* save cr3 */
movq %cr3, %rax
movq %rax, restore_cr3(%rip)
call swsusp_save
ret
ENTRY(restore_image)
/* switch to temporary page tables */
movq $__PAGE_OFFSET, %rdx
movq temp_level4_pgt(%rip), %rax
subq %rdx, %rax
movq %rax, %cr3
/* Flush TLB */
movq mmu_cr4_features(%rip), %rax
movq %rax, %rdx
andq $~(X86_CR4_PGE), %rdx
movq %rdx, %cr4; # turn off PGE
movq %cr3, %rcx; # flush TLB
movq %rcx, %cr3;
movq %rax, %cr4; # turn PGE back on
/* prepare to jump to the image kernel */
movq restore_jump_address(%rip), %rax
movq restore_cr3(%rip), %rbx
/* prepare to copy image data to their original locations */
movq restore_pblist(%rip), %rdx
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
loop:
testq %rdx, %rdx
jz done
/* get addresses from the pbe and copy the page */
movq pbe_address(%rdx), %rsi
movq pbe_orig_address(%rdx), %rdi
movq $(PAGE_SIZE >> 3), %rcx
rep
movsq
/* progress to the next pbe */
movq pbe_next(%rdx), %rdx
jmp loop
done:
/* jump to the restore_registers address from the image header */
jmpq *%rax
/*
* NOTE: This assumes that the boot kernel's text mapping covers the
* image kernel's page containing restore_registers and the address of
* this page is the same as in the image kernel's text mapping (it
* should always be true, because the text mapping is linear, starting
* from 0, and is supposed to cover the entire kernel text for every
* kernel).
*
* code below belongs to the image kernel
*/
ENTRY(restore_registers)
/* go back to the original page tables */
movq %rbx, %cr3
/* Flush TLB, including "global" things (vmalloc) */
movq mmu_cr4_features(%rip), %rax
movq %rax, %rdx
andq $~(X86_CR4_PGE), %rdx
movq %rdx, %cr4; # turn off PGE
movq %cr3, %rcx; # flush TLB
movq %rcx, %cr3
movq %rax, %cr4; # turn PGE back on
/* We don't restore %rax, it must be 0 anyway */
movq $saved_context, %rax
movq pt_regs_sp(%rax), %rsp
movq pt_regs_bp(%rax), %rbp
movq pt_regs_si(%rax), %rsi
movq pt_regs_di(%rax), %rdi
movq pt_regs_bx(%rax), %rbx
movq pt_regs_cx(%rax), %rcx
movq pt_regs_dx(%rax), %rdx
movq pt_regs_r8(%rax), %r8
movq pt_regs_r9(%rax), %r9
movq pt_regs_r10(%rax), %r10
movq pt_regs_r11(%rax), %r11
movq pt_regs_r12(%rax), %r12
movq pt_regs_r13(%rax), %r13
movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15
pushq pt_regs_flags(%rax)
popfq
/* Saved in save_processor_state. */
lgdt saved_context_gdt_desc(%rax)
xorq %rax, %rax
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
ret
|