summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/efi_64.c
blob: f2000dbc719511fd26da6199e69f97603d97885a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
/*
 * x86_64 specific EFI support functions
 * Based on Extensible Firmware Interface Specification version 1.0
 *
 * Copyright (C) 2005-2008 Intel Co.
 *	Fenghua Yu <fenghua.yu@intel.com>
 *	Bibo Mao <bibo.mao@intel.com>
 *	Chandramouli Narayanan <mouli@linux.intel.com>
 *	Huang Ying <ying.huang@intel.com>
 *
 * Code to convert EFI to E820 map has been implemented in elilo bootloader
 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
 * is setup appropriately for EFI runtime code.
 * - mouli 06/14/2007.
 *
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/reboot.h>

#include <asm/setup.h>
#include <asm/page.h>
#include <asm/e820.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/proto.h>
#include <asm/efi.h>

static pgd_t save_pgd __initdata;
static unsigned long efi_flags __initdata;

static int __init setup_noefi(char *arg)
{
	efi_enabled = 0;
	return 0;
}
early_param("noefi", setup_noefi);

static void __init early_mapping_set_exec(unsigned long start,
					  unsigned long end,
					  int executable)
{
	pte_t *kpte;
	int level;

	while (start < end) {
		kpte = lookup_address((unsigned long)__va(start), &level);
		BUG_ON(!kpte);
		if (executable)
			set_pte(kpte, pte_mkexec(*kpte));
		else
			set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
					    __supported_pte_mask));
		if (pte_huge(*kpte))
			start = (start + PMD_SIZE) & PMD_MASK;
		else
			start = (start + PAGE_SIZE) & PAGE_MASK;
	}
}

static void __init early_runtime_code_mapping_set_exec(int executable)
{
	efi_memory_desc_t *md;
	void *p;

	/* Make EFI runtime service code area executable */
	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		md = p;
		if (md->type == EFI_RUNTIME_SERVICES_CODE) {
			unsigned long end;
			end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
			early_mapping_set_exec(md->phys_addr, end, executable);
		}
	}
}

void __init efi_call_phys_prelog(void)
{
	unsigned long vaddress;

	local_irq_save(efi_flags);
	early_runtime_code_mapping_set_exec(1);
	vaddress = (unsigned long)__va(0x0UL);
	pgd_val(save_pgd) = pgd_val(*pgd_offset_k(0x0UL));
	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
	__flush_tlb_all();
}

void __init efi_call_phys_epilog(void)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	set_pgd(pgd_offset_k(0x0UL), save_pgd);
	early_runtime_code_mapping_set_exec(0);
	__flush_tlb_all();
	local_irq_restore(efi_flags);
}

/*
 * We need to map the EFI memory map again after init_memory_mapping().
 */
void __init efi_map_memmap(void)
{
	memmap.map = __va(memmap.phys_map);
	memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
}

void __init efi_reserve_bootmem(void)
{
	reserve_bootmem_generic((unsigned long)memmap.phys_map,
				memmap.nr_map * memmap.desc_size);
}

void __init runtime_code_page_mkexec(void)
{
	efi_memory_desc_t *md;
	void *p;

	/* Make EFI runtime service code area executable */
	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		md = p;
		if (md->type == EFI_RUNTIME_SERVICES_CODE)
			change_page_attr_addr(md->virt_addr,
					      md->num_pages,
					      PAGE_KERNEL_EXEC);
	}
	__flush_tlb_all();
}

void __iomem * __init efi_ioremap(unsigned long offset,
				  unsigned long size)
{
	static unsigned pages_mapped;
	unsigned long last_addr;
	unsigned i, pages;

	last_addr = offset + size - 1;
	offset &= PAGE_MASK;
	pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
	if (pages_mapped + pages > MAX_EFI_IO_PAGES)
		return NULL;

	for (i = 0; i < pages; i++) {
		set_fixmap_nocache(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
				   offset);
		offset += PAGE_SIZE;
		pages_mapped++;
	}

	return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
					     (pages_mapped - pages));
}