summaryrefslogtreecommitdiffstats
path: root/MdeModulePkg/Core/Dxe/Mem/HeapGuard.h
blob: 55a91ec098e69eb83efb914178b596b904d0ebe4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
/** @file
  Data type, macros and function prototypes of heap guard feature.

Copyright (c) 2017-2018, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution.  The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php

THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.

**/

#ifndef _HEAPGUARD_H_
#define _HEAPGUARD_H_

//
// Following macros are used to define and access the guarded memory bitmap
// table.
//
// To simplify the access and reduce the memory used for this table, the
// table is constructed in the similar way as page table structure but in
// reverse direction, i.e. from bottom growing up to top.
//
//    - 1-bit tracks 1 page (4KB)
//    - 1-UINT64 map entry tracks 256KB memory
//    - 1K-UINT64 map table tracks 256MB memory
//    - Five levels of tables can track any address of memory of 64-bit
//      system, like below.
//
//       512   *   512   *   512   *   512    *    1K   *  64b *     4K
//    111111111 111111111 111111111 111111111 1111111111 111111 111111111111
//    63        54        45        36        27         17     11         0
//       9b        9b        9b        9b         10b      6b       12b
//       L0   ->   L1   ->   L2   ->   L3   ->    L4   -> bits  ->  page
//      1FF       1FF       1FF       1FF         3FF      3F       FFF
//
// L4 table has 1K * sizeof(UINT64) = 8K (2-page), which can track 256MB
// memory. Each table of L0-L3 will be allocated when its memory address
// range is to be tracked. Only 1-page will be allocated each time. This
// can save memories used to establish this map table.
//
// For a normal configuration of system with 4G memory, two levels of tables
// can track the whole memory, because two levels (L3+L4) of map tables have
// already coverred 37-bit of memory address. And for a normal UEFI BIOS,
// less than 128M memory would be consumed during boot. That means we just
// need
//
//          1-page (L3) + 2-page (L4)
//
// memory (3 pages) to track the memory allocation works. In this case,
// there's no need to setup L0-L2 tables.
//

//
// Each entry occupies 8B/64b. 1-page can hold 512 entries, which spans 9
// bits in address. (512 = 1 << 9)
//
#define BYTE_LENGTH_SHIFT                   3             // (8 = 1 << 3)

#define GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT  \
        (EFI_PAGE_SHIFT - BYTE_LENGTH_SHIFT)

#define GUARDED_HEAP_MAP_TABLE_DEPTH        5

// Use UINT64_index + bit_index_of_UINT64 to locate the bit in may
#define GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT    6             // (64 = 1 << 6)

#define GUARDED_HEAP_MAP_ENTRY_BITS         \
        (1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)

#define GUARDED_HEAP_MAP_ENTRY_BYTES        \
        (GUARDED_HEAP_MAP_ENTRY_BITS / 8)

// L4 table address width: 64 - 9 * 4 - 6 - 12 = 10b
#define GUARDED_HEAP_MAP_ENTRY_SHIFT              \
        (GUARDED_HEAP_MAP_ENTRY_BITS              \
         - GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 4 \
         - GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT       \
         - EFI_PAGE_SHIFT)

// L4 table address mask: (1 << 10 - 1) = 0x3FF
#define GUARDED_HEAP_MAP_ENTRY_MASK               \
        ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1)

// Size of each L4 table: (1 << 10) * 8 = 8KB = 2-page
#define GUARDED_HEAP_MAP_SIZE                     \
        ((1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) * GUARDED_HEAP_MAP_ENTRY_BYTES)

// Memory size tracked by one L4 table: 8KB * 8 * 4KB = 256MB
#define GUARDED_HEAP_MAP_UNIT_SIZE                \
        (GUARDED_HEAP_MAP_SIZE * 8 * EFI_PAGE_SIZE)

// L4 table entry number: 8KB / 8 = 1024
#define GUARDED_HEAP_MAP_ENTRIES_PER_UNIT         \
        (GUARDED_HEAP_MAP_SIZE / GUARDED_HEAP_MAP_ENTRY_BYTES)

// L4 table entry indexing
#define GUARDED_HEAP_MAP_ENTRY_INDEX(Address)                       \
        (RShiftU64 (Address, EFI_PAGE_SHIFT                         \
                             + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)    \
         & GUARDED_HEAP_MAP_ENTRY_MASK)

// L4 table entry bit indexing
#define GUARDED_HEAP_MAP_ENTRY_BIT_INDEX(Address)       \
        (RShiftU64 (Address, EFI_PAGE_SHIFT)            \
         & ((1 << GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT) - 1))

//
// Total bits (pages) tracked by one L4 table (65536-bit)
//
#define GUARDED_HEAP_MAP_BITS                               \
        (1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT                 \
               + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT))

//
// Bit indexing inside the whole L4 table (0 - 65535)
//
#define GUARDED_HEAP_MAP_BIT_INDEX(Address)                     \
        (RShiftU64 (Address, EFI_PAGE_SHIFT)                    \
         & ((1 << (GUARDED_HEAP_MAP_ENTRY_SHIFT                 \
                   + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT)) - 1))

//
// Memory address bit width tracked by L4 table: 10 + 6 + 12 = 28
//
#define GUARDED_HEAP_MAP_TABLE_SHIFT                                      \
        (GUARDED_HEAP_MAP_ENTRY_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT  \
         + EFI_PAGE_SHIFT)

//
// Macro used to initialize the local array variable for map table traversing
// {55, 46, 37, 28, 18}
//
#define GUARDED_HEAP_MAP_TABLE_DEPTH_SHIFTS                                 \
  {                                                                         \
    GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 3,  \
    GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT * 2,  \
    GUARDED_HEAP_MAP_TABLE_SHIFT + GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT,      \
    GUARDED_HEAP_MAP_TABLE_SHIFT,                                           \
    EFI_PAGE_SHIFT + GUARDED_HEAP_MAP_ENTRY_BIT_SHIFT                       \
  }

//
// Masks used to extract address range of each level of table
// {0x1FF, 0x1FF, 0x1FF, 0x1FF, 0x3FF}
//
#define GUARDED_HEAP_MAP_TABLE_DEPTH_MASKS                                  \
  {                                                                         \
    (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
    (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
    (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
    (1 << GUARDED_HEAP_MAP_TABLE_ENTRY_SHIFT) - 1,                          \
    (1 << GUARDED_HEAP_MAP_ENTRY_SHIFT) - 1                                 \
  }

//
// Memory type to guard (matching the related PCD definition)
//
#define GUARD_HEAP_TYPE_PAGE        BIT0
#define GUARD_HEAP_TYPE_POOL        BIT1
#define GUARD_HEAP_TYPE_FREED       BIT4
#define GUARD_HEAP_TYPE_ALL         \
        (GUARD_HEAP_TYPE_PAGE|GUARD_HEAP_TYPE_POOL|GUARD_HEAP_TYPE_FREED)

//
// Debug message level
//
#define HEAP_GUARD_DEBUG_LEVEL  (DEBUG_POOL|DEBUG_PAGE)

typedef struct {
  UINT32                TailMark;
  UINT32                HeadMark;
  EFI_PHYSICAL_ADDRESS  Address;
  LIST_ENTRY            Link;
} HEAP_GUARD_NODE;

/**
  Internal function.  Converts a memory range to the specified type.
  The range must exist in the memory map.

  @param  Start                  The first address of the range Must be page
                                 aligned.
  @param  NumberOfPages          The number of pages to convert.
  @param  NewType                The new type for the memory range.

  @retval EFI_INVALID_PARAMETER  Invalid parameter.
  @retval EFI_NOT_FOUND          Could not find a descriptor cover the specified
                                 range or convertion not allowed.
  @retval EFI_SUCCESS            Successfully converts the memory range to the
                                 specified type.

**/
EFI_STATUS
CoreConvertPages (
  IN UINT64           Start,
  IN UINT64           NumberOfPages,
  IN EFI_MEMORY_TYPE  NewType
  );

/**
  Allocate or free guarded memory.

  @param[in]  Start           Start address of memory to allocate or free.
  @param[in]  NumberOfPages   Memory size in pages.
  @param[in]  NewType         Memory type to convert to.

  @return VOID.
**/
EFI_STATUS
CoreConvertPagesWithGuard (
  IN UINT64           Start,
  IN UINTN            NumberOfPages,
  IN EFI_MEMORY_TYPE  NewType
  );

/**
  Set head Guard and tail Guard for the given memory range.

  @param[in]  Memory          Base address of memory to set guard for.
  @param[in]  NumberOfPages   Memory size in pages.

  @return VOID.
**/
VOID
SetGuardForMemory (
  IN EFI_PHYSICAL_ADDRESS   Memory,
  IN UINTN                  NumberOfPages
  );

/**
  Unset head Guard and tail Guard for the given memory range.

  @param[in]  Memory          Base address of memory to unset guard for.
  @param[in]  NumberOfPages   Memory size in pages.

  @return VOID.
**/
VOID
UnsetGuardForMemory (
  IN EFI_PHYSICAL_ADDRESS   Memory,
  IN UINTN                  NumberOfPages
  );

/**
  Adjust the base and number of pages to really allocate according to Guard.

  @param[in,out]  Memory          Base address of free memory.
  @param[in,out]  NumberOfPages   Size of memory to allocate.

  @return VOID.
**/
VOID
AdjustMemoryA (
  IN OUT EFI_PHYSICAL_ADDRESS    *Memory,
  IN OUT UINTN                   *NumberOfPages
  );

/**
  Adjust the start address and number of pages to free according to Guard.

  The purpose of this function is to keep the shared Guard page with adjacent
  memory block if it's still in guard, or free it if no more sharing. Another
  is to reserve pages as Guard pages in partial page free situation.

  @param[in,out]  Memory          Base address of memory to free.
  @param[in,out]  NumberOfPages   Size of memory to free.

  @return VOID.
**/
VOID
AdjustMemoryF (
  IN OUT EFI_PHYSICAL_ADDRESS    *Memory,
  IN OUT UINTN                   *NumberOfPages
  );

/**
  Adjust address of free memory according to existing and/or required Guard.

  This function will check if there're existing Guard pages of adjacent
  memory blocks, and try to use it as the Guard page of the memory to be
  allocated.

  @param[in]  Start           Start address of free memory block.
  @param[in]  Size            Size of free memory block.
  @param[in]  SizeRequested   Size of memory to allocate.

  @return The end address of memory block found.
  @return 0 if no enough space for the required size of memory and its Guard.
**/
UINT64
AdjustMemoryS (
  IN UINT64                  Start,
  IN UINT64                  Size,
  IN UINT64                  SizeRequested
  );

/**
  Check to see if the pool at the given address should be guarded or not.

  @param[in]  MemoryType      Pool type to check.


  @return TRUE  The given type of pool should be guarded.
  @return FALSE The given type of pool should not be guarded.
**/
BOOLEAN
IsPoolTypeToGuard (
  IN EFI_MEMORY_TYPE        MemoryType
  );

/**
  Check to see if the page at the given address should be guarded or not.

  @param[in]  MemoryType      Page type to check.
  @param[in]  AllocateType    Allocation type to check.

  @return TRUE  The given type of page should be guarded.
  @return FALSE The given type of page should not be guarded.
**/
BOOLEAN
IsPageTypeToGuard (
  IN EFI_MEMORY_TYPE        MemoryType,
  IN EFI_ALLOCATE_TYPE      AllocateType
  );

/**
  Check to see if the page at the given address is guarded or not.

  @param[in]  Address     The address to check for.

  @return TRUE  The page at Address is guarded.
  @return FALSE The page at Address is not guarded.
**/
BOOLEAN
EFIAPI
IsMemoryGuarded (
  IN EFI_PHYSICAL_ADDRESS    Address
  );

/**
  Check to see if the page at the given address is a Guard page or not.

  @param[in]  Address     The address to check for.

  @return TRUE  The page at Address is a Guard page.
  @return FALSE The page at Address is not a Guard page.
**/
BOOLEAN
EFIAPI
IsGuardPage (
  IN EFI_PHYSICAL_ADDRESS    Address
  );

/**
  Dump the guarded memory bit map.
**/
VOID
EFIAPI
DumpGuardedMemoryBitmap (
  VOID
  );

/**
  Adjust the pool head position to make sure the Guard page is adjavent to
  pool tail or pool head.

  @param[in]  Memory    Base address of memory allocated.
  @param[in]  NoPages   Number of pages actually allocated.
  @param[in]  Size      Size of memory requested.
                        (plus pool head/tail overhead)

  @return Address of pool head.
**/
VOID *
AdjustPoolHeadA (
  IN EFI_PHYSICAL_ADDRESS    Memory,
  IN UINTN                   NoPages,
  IN UINTN                   Size
  );

/**
  Get the page base address according to pool head address.

  @param[in]  Memory    Head address of pool to free.

  @return Address of pool head.
**/
VOID *
AdjustPoolHeadF (
  IN EFI_PHYSICAL_ADDRESS    Memory
  );

/**
  Check to see if the heap guard is enabled for page and/or pool allocation.

  @param[in]  GuardType   Specify the sub-type(s) of Heap Guard.

  @return TRUE/FALSE.
**/
BOOLEAN
IsHeapGuardEnabled (
  UINT8           GuardType
  );

/**
  Notify function used to set all Guard pages after CPU Arch Protocol installed.
**/
VOID
HeapGuardCpuArchProtocolNotify (
  VOID
  );

/**
  This function checks to see if the given memory map descriptor in a memory map
  can be merged with any guarded free pages.

  @param  MemoryMapEntry    A pointer to a descriptor in MemoryMap.
  @param  MaxAddress        Maximum address to stop the merge.

  @return VOID

**/
VOID
MergeGuardPages (
  IN EFI_MEMORY_DESCRIPTOR      *MemoryMapEntry,
  IN EFI_PHYSICAL_ADDRESS       MaxAddress
  );

/**
  Record freed pages as well as mark them as not-present, if enabled.

  @param[in]  BaseAddress   Base address of just freed pages.
  @param[in]  Pages         Number of freed pages.

  @return VOID.
**/
VOID
EFIAPI
GuardFreedPagesChecked (
  IN  EFI_PHYSICAL_ADDRESS    BaseAddress,
  IN  UINTN                   Pages
  );

/**
  Put part (at most 64 pages a time) guarded free pages back to free page pool.

  Freed memory guard is used to detect Use-After-Free (UAF) memory issue, which
  makes use of 'Used then throw away' way to detect any illegal access to freed
  memory. The thrown-away memory will be marked as not-present so that any access
  to those memory (after free) will be caught by page-fault exception.

  The problem is that this will consume lots of memory space. Once no memory
  left in pool to allocate, we have to restore part of the freed pages to their
  normal function. Otherwise the whole system will stop functioning.

  @param  StartAddress    Start address of promoted memory.
  @param  EndAddress      End address of promoted memory.

  @return TRUE    Succeeded to promote memory.
  @return FALSE   No free memory found.

**/
BOOLEAN
PromoteGuardedFreePages (
  OUT EFI_PHYSICAL_ADDRESS      *StartAddress,
  OUT EFI_PHYSICAL_ADDRESS      *EndAddress
  );

extern BOOLEAN mOnGuarding;

#endif