summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/Library/SmmCpuFeaturesLib/X64/SmiEntry.S
blob: 1f9f91ce10efff9a9ec5bafc94bd3644f756ce0d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
#------------------------------------------------------------------------------
#
# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution.  The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
#   SmiEntry.S
#
# Abstract:
#
#   Code template of the SMI handler for a particular processor
#
#------------------------------------------------------------------------------

ASM_GLOBAL  ASM_PFX(gcStmSmiHandlerTemplate)
ASM_GLOBAL  ASM_PFX(gcStmSmiHandlerSize)
ASM_GLOBAL  ASM_PFX(gcStmSmiHandlerOffset)
ASM_GLOBAL  ASM_PFX(gStmSmiCr3)
ASM_GLOBAL  ASM_PFX(gStmSmiStack)
ASM_GLOBAL  ASM_PFX(gStmSmbase)
ASM_GLOBAL  ASM_PFX(gStmXdSupported)
ASM_GLOBAL  ASM_PFX(gStmSmiHandlerIdtr)

.equ            MSR_IA32_MISC_ENABLE, 0x1A0
.equ            MSR_EFER, 0xc0000080
.equ            MSR_EFER_XD, 0x800

#
# Constants relating to TXT_PROCESSOR_SMM_DESCRIPTOR
#
.equ            DSC_OFFSET, 0xfb00
.equ            DSC_GDTPTR, 0x48
.equ            DSC_GDTSIZ, 0x50
.equ            DSC_CS, 0x14
.equ            DSC_DS, 0x16
.equ            DSC_SS, 0x18
.equ            DSC_OTHERSEG, 0x1a
#
# Constants relating to CPU State Save Area
#
.equ            SSM_DR6,   0xffd0
.equ            SSM_DR7,   0xffc8

.equ            PROTECT_MODE_CS, 0x08
.equ            PROTECT_MODE_DS, 0x20
.equ            LONG_MODE_CS, 0x38
.equ            TSS_SEGMENT, 0x40
.equ            GDT_SIZE, 0x50

    .text

ASM_PFX(gcStmSmiHandlerTemplate):

_StmSmiEntryPoint:
    #
    # The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
    # bit addressing mode. And that coincidence has been used in the following
    # "64-bit like" 16-bit code. Be aware that once RDI is referenced as a
    # base address register, it is actually BX that is referenced.
    #
    .byte 0xbb                          # mov bx, imm16
    .word _StmGdtDesc - _StmSmiEntryPoint + 0x8000
    #
    # fix GDT descriptor
    #
    .byte 0x2e,0xa1                     # mov ax, cs:[offset16]
    .word      DSC_OFFSET + DSC_GDTSIZ
    .byte 0x48                          # dec ax
    .byte 0x2e
    movl    %eax, (%rdi)                # mov cs:[bx], ax
    .byte 0x66,0x2e,0xa1                # mov eax, cs:[offset16]
    .word      DSC_OFFSET + DSC_GDTPTR
    .byte 0x2e
    movw    %ax, 2(%rdi)
    .byte 0x66,0x2e
    lgdt    (%rdi)
    #
    # Patch ProtectedMode Segment
    #
    .byte 0xb8
    .word PROTECT_MODE_CS
    .byte 0x2e
    movl    %eax, -2(%rdi)
    #
    # Patch ProtectedMode entry
    #
    .byte 0x66, 0xbf                    # mov edi, SMBASE
ASM_PFX(gStmSmbase): .space 4
    lea     ((ProtectedMode - _StmSmiEntryPoint) + 0x8000)(%edi), %ax
    .byte 0x2e
    movw    %ax, -6(%rdi)
    #
    # Switch into ProtectedMode
    #
    movq    %cr0, %rbx
    .byte 0x66
    andl    $0x9ffafff3, %ebx
    .byte 0x66
    orl     $0x00000023, %ebx

    movq    %rbx, %cr0
    .byte 0x66, 0xea
    .space 6

_StmGdtDesc:    .space  6

ProtectedMode:
    movw    $PROTECT_MODE_DS, %ax
    movl    %eax, %ds
    movl    %eax, %es
    movl    %eax, %fs
    movl    %eax, %gs
    movl    %eax, %ss
    .byte   0xbc                       # mov esp, imm32
ASM_PFX(gStmSmiStack):   .space  4
    jmp     ProtFlatMode

ProtFlatMode:
    .byte   0xb8
ASM_PFX(gStmSmiCr3):    .space  4
    movq    %rax, %cr3
    movl    $0x668,%eax                 # as cr4.PGE is not set here, refresh cr3
    movq    %rax, %cr4                  # in PreModifyMtrrs() to flush TLB.
# Load TSS
    subl    $8, %esp                    # reserve room in stack
    sgdt    (%rsp)
    movl    2(%rsp), %eax               # eax = GDT base
    addl    $8, %esp
    movb    $0x89, %dl
    movb    %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
    movl    $TSS_SEGMENT, %eax
    ltr     %ax

# enable NXE if supported
    .byte   0xb0                        # mov al, imm8
ASM_PFX(gStmXdSupported): .byte 1
    cmpb    $0, %al
    jz      SkipXd
#
# Check XD disable bit
#
    movl    $MSR_IA32_MISC_ENABLE, %ecx
    rdmsr
    subl    $4, %esp
    pushq   %rdx                       # save MSR_IA32_MISC_ENABLE[63-32]
    testl   $BIT2, %edx                # MSR_IA32_MISC_ENABLE[34]
    jz      L13
    andw    $0x0FFFB, %dx              # clear XD Disable bit if it is set
    wrmsr
L13:
    movl    $MSR_EFER, %ecx
    rdmsr
    orw     $MSR_EFER_XD,%ax            # enable NXE
    wrmsr
    jmp     XdDone
SkipXd:
    subl    $8, %esp
XdDone:

    #
    # Switch to LongMode
    #
    pushq    $LONG_MODE_CS                # push cs hardcore here
    call     Base                         # push return address for retf later
Base:
    addl    $(LongMode - Base), (%rsp)  # offset for far retf, seg is the 1st arg

    movl    $MSR_EFER, %ecx
    rdmsr
    orb     $1,%ah                      # enable LME
    wrmsr
    movq    %cr0, %rbx
    orl     $0x080010023, %ebx          # enable paging + WP + NE + MP + PE
    movq    %rbx, %cr0
    retf
LongMode:                               # long mode (64-bit code) starts here
    movabsq $ASM_PFX(gStmSmiHandlerIdtr), %rax
    lidt    (%rax)
    lea     (DSC_OFFSET)(%rdi), %ebx
    movw    DSC_DS(%rbx), %ax
    movl    %eax,%ds
    movw    DSC_OTHERSEG(%rbx), %ax
    movl    %eax,%es
    movl    %eax,%fs
    movl    %eax,%gs
    movw    DSC_SS(%rbx), %ax
    movl    %eax,%ss

CommonHandler:
    movq    8(%rsp), %rbx
    # Save FP registers

    subq    $0x200, %rsp
    .byte   0x48                        # FXSAVE64
    fxsave  (%rsp)

    addq    $-0x20, %rsp

    movq    %rbx, %rcx
    movabsq $ASM_PFX(CpuSmmDebugEntry), %rax
    call    *%rax

    movq    %rbx, %rcx
    movabsq $ASM_PFX(SmiRendezvous), %rax
    call    *%rax

    movq    %rbx, %rcx
    movabsq $ASM_PFX(CpuSmmDebugExit), %rax
    call    *%rax

    addq    $0x20, %rsp

    #
    # Restore FP registers
    #
    .byte   0x48                        # FXRSTOR64
    fxrstor (%rsp)

    addq    $0x200, %rsp

    movabsq $ASM_PFX(gStmXdSupported), %rax
    movb    (%rax), %al
    cmpb    $0, %al
    jz      L16
    popq    %rdx                        # get saved MSR_IA32_MISC_ENABLE[63-32]
    testl   $BIT2, %edx
    jz      L16
    movl    $MSR_IA32_MISC_ENABLE, %ecx
    rdmsr
    orw     $BIT2, %dx                  # set XD Disable bit if it was set before entering into SMM
    wrmsr

L16:
    rsm

_StmSmiHandler:
#
# Check XD disable bit
#
    xorq    %r8, %r8
    movabsq $ASM_PFX(gStmXdSupported), %rax
    movb    (%rax), %al
    cmpb    $0, %al
    jz      StmXdDone
    movl    $MSR_IA32_MISC_ENABLE, %ecx
    rdmsr
    movq    %rdx, %r8                  # save MSR_IA32_MISC_ENABLE[63-32]
    testl   $BIT2, %edx                # MSR_IA32_MISC_ENABLE[34]
    jz      L14
    andw    $0x0FFFB, %dx              # clear XD Disable bit if it is set
    wrmsr
L14:
    movl    $MSR_EFER, %ecx
    rdmsr
    orw     $MSR_EFER_XD,%ax            # enable NXE
    wrmsr
StmXdDone:
    pushq   %r8

    # below step is needed, because STM does not run above code.
    # we have to run below code to set IDT/CR0/CR4
    movabsq $ASM_PFX(gStmSmiHandlerIdtr), %rax
    lidt    (%rax)

    movq    %cr0, %rax
    orl     $0x80010023, %eax
    movq    %rax, %cr0
    movq    %cr4, %rax
    movl    $0x668, %eax                # as cr4.PGE is not set here, refresh cr3
    movq    %rax, %cr4                  # in PreModifyMtrrs() to flush TLB.
    # STM init finish
    jmp     CommonHandler

ASM_PFX(gcStmSmiHandlerSize)  :  .word      . - _StmSmiEntryPoint
ASM_PFX(gcStmSmiHandlerOffset):  .word      _StmSmiHandler - _StmSmiEntryPoint