diff options
Diffstat (limited to 'arch/x86/platform')
-rw-r--r-- | arch/x86/platform/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_stub_64.S | 161 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_thunk_64.S | 121 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/intel-mid.c | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr.c | 661 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr_selftest.c | 129 |
7 files changed, 898 insertions, 179 deletions
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index 85afde1fa3e5..a62e0be3a2f1 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile @@ -5,6 +5,7 @@ obj-y += geode/ obj-y += goldfish/ obj-y += iris/ obj-y += intel-mid/ +obj-y += intel-quark/ obj-y += olpc/ obj-y += scx200/ obj-y += sfi/ diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 5fcda7272550..86d0f9e08dd9 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -91,167 +91,6 @@ ENTRY(efi_call) ret ENDPROC(efi_call) -#ifdef CONFIG_EFI_MIXED - -/* - * We run this function from the 1:1 mapping. - * - * This function must be invoked with a 1:1 mapped stack. - */ -ENTRY(__efi64_thunk) - movl %ds, %eax - push %rax - movl %es, %eax - push %rax - movl %ss, %eax - push %rax - - subq $32, %rsp - movl %esi, 0x0(%rsp) - movl %edx, 0x4(%rsp) - movl %ecx, 0x8(%rsp) - movq %r8, %rsi - movl %esi, 0xc(%rsp) - movq %r9, %rsi - movl %esi, 0x10(%rsp) - - sgdt save_gdt(%rip) - - leaq 1f(%rip), %rbx - movq %rbx, func_rt_ptr(%rip) - - /* Switch to gdt with 32-bit segments */ - movl 64(%rsp), %eax - lgdt (%rax) - - leaq efi_enter32(%rip), %rax - pushq $__KERNEL_CS - pushq %rax - lretq - -1: addq $32, %rsp - - lgdt save_gdt(%rip) - - pop %rbx - movl %ebx, %ss - pop %rbx - movl %ebx, %es - pop %rbx - movl %ebx, %ds - - /* - * Convert 32-bit status code into 64-bit. - */ - test %rax, %rax - jz 1f - movl %eax, %ecx - andl $0x0fffffff, %ecx - andl $0xf0000000, %eax - shl $32, %rax - or %rcx, %rax -1: - ret -ENDPROC(__efi64_thunk) - -ENTRY(efi_exit32) - movq func_rt_ptr(%rip), %rax - push %rax - mov %rdi, %rax - ret -ENDPROC(efi_exit32) - - .code32 -/* - * EFI service pointer must be in %edi. - * - * The stack should represent the 32-bit calling convention. - */ -ENTRY(efi_enter32) - movl $__KERNEL_DS, %eax - movl %eax, %ds - movl %eax, %es - movl %eax, %ss - - /* Reload pgtables */ - movl %cr3, %eax - movl %eax, %cr3 - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Disable long mode via EFER */ - movl $MSR_EFER, %ecx - rdmsr - btrl $_EFER_LME, %eax - wrmsr - - call *%edi - - /* We must preserve return value */ - movl %eax, %edi - - /* - * Some firmware will return with interrupts enabled. Be sure to - * disable them before we switch GDTs. - */ - cli - - movl 68(%esp), %eax - movl %eax, 2(%eax) - lgdtl (%eax) - - movl %cr4, %eax - btsl $(X86_CR4_PAE_BIT), %eax - movl %eax, %cr4 - - movl %cr3, %eax - movl %eax, %cr3 - - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_LME, %eax - wrmsr - - xorl %eax, %eax - lldt %ax - - movl 72(%esp), %eax - pushl $__KERNEL_CS - pushl %eax - - /* Enable paging */ - movl %cr0, %eax - btsl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - lret -ENDPROC(efi_enter32) - - .data - .balign 8 - .global efi32_boot_gdt -efi32_boot_gdt: .word 0 - .quad 0 - -save_gdt: .word 0 - .quad 0 -func_rt_ptr: .quad 0 - - .global efi_gdt64 -efi_gdt64: - .word efi_gdt64_end - efi_gdt64 - .long 0 /* Filled out by user */ - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00af9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf92000000ffff /* __KERNEL_DS */ - .quad 0x0080890000000000 /* TS descriptor */ - .quad 0x0000000000000000 /* TS continued */ -efi_gdt64_end: -#endif /* CONFIG_EFI_MIXED */ - .data ENTRY(efi_scratch) .fill 3,8,0 diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 8806fa73e6e6..ff85d28c50f2 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -1,9 +1,26 @@ /* * Copyright (C) 2014 Intel Corporation; author Matt Fleming + * + * Support for invoking 32-bit EFI runtime services from a 64-bit + * kernel. + * + * The below thunking functions are only used after ExitBootServices() + * has been called. This simplifies things considerably as compared with + * the early EFI thunking because we can leave all the kernel state + * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime + * services from __KERNEL32_CS. This means we can continue to service + * interrupts across an EFI mixed mode call. + * + * We do however, need to handle the fact that we're running in a full + * 64-bit virtual address space. Things like the stack and instruction + * addresses need to be accessible by the 32-bit firmware, so we rely on + * using the identity mappings in the EFI page table to access the stack + * and kernel text (see efi_setup_page_tables()). */ #include <linux/linkage.h> #include <asm/page_types.h> +#include <asm/segment.h> .text .code64 @@ -33,14 +50,6 @@ ENTRY(efi64_thunk) leaq efi_exit32(%rip), %rbx subq %rax, %rbx movl %ebx, 8(%rsp) - leaq efi_gdt64(%rip), %rbx - subq %rax, %rbx - movl %ebx, 2(%ebx) - movl %ebx, 4(%rsp) - leaq efi_gdt32(%rip), %rbx - subq %rax, %rbx - movl %ebx, 2(%ebx) - movl %ebx, (%rsp) leaq __efi64_thunk(%rip), %rbx subq %rax, %rbx @@ -52,14 +61,92 @@ ENTRY(efi64_thunk) retq ENDPROC(efi64_thunk) - .data -efi_gdt32: - .word efi_gdt32_end - efi_gdt32 - .long 0 /* Filled out above */ - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00cf9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf93000000ffff /* __KERNEL_DS */ -efi_gdt32_end: +/* + * We run this function from the 1:1 mapping. + * + * This function must be invoked with a 1:1 mapped stack. + */ +ENTRY(__efi64_thunk) + movl %ds, %eax + push %rax + movl %es, %eax + push %rax + movl %ss, %eax + push %rax + + subq $32, %rsp + movl %esi, 0x0(%rsp) + movl %edx, 0x4(%rsp) + movl %ecx, 0x8(%rsp) + movq %r8, %rsi + movl %esi, 0xc(%rsp) + movq %r9, %rsi + movl %esi, 0x10(%rsp) + + leaq 1f(%rip), %rbx + movq %rbx, func_rt_ptr(%rip) + + /* Switch to 32-bit descriptor */ + pushq $__KERNEL32_CS + leaq efi_enter32(%rip), %rax + pushq %rax + lretq + +1: addq $32, %rsp + + pop %rbx + movl %ebx, %ss + pop %rbx + movl %ebx, %es + pop %rbx + movl %ebx, %ds + /* + * Convert 32-bit status code into 64-bit. + */ + test %rax, %rax + jz 1f + movl %eax, %ecx + andl $0x0fffffff, %ecx + andl $0xf0000000, %eax + shl $32, %rax + or %rcx, %rax +1: + ret +ENDPROC(__efi64_thunk) + +ENTRY(efi_exit32) + movq func_rt_ptr(%rip), %rax + push %rax + mov %rdi, %rax + ret +ENDPROC(efi_exit32) + + .code32 +/* + * EFI service pointer must be in %edi. + * + * The stack should represent the 32-bit calling convention. + */ +ENTRY(efi_enter32) + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %es + movl %eax, %ss + + call *%edi + + /* We must preserve return value */ + movl %eax, %edi + + movl 72(%esp), %eax + pushl $__KERNEL_CS + pushl %eax + + lret +ENDPROC(efi_enter32) + + .data + .balign 8 +func_rt_ptr: .quad 0 efi_saved_sp: .quad 0 diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 1bbedc4b0f88..3005f0c89f2e 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void) intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); else { intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); - pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); + pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); } out: diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile new file mode 100644 index 000000000000..9cc57ed36022 --- /dev/null +++ b/arch/x86/platform/intel-quark/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IMR) += imr.o +obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c new file mode 100644 index 000000000000..0ee619f9fcb7 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr.c @@ -0,0 +1,661 @@ +/** + * imr.c + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * IMR registers define an isolated region of memory that can + * be masked to prohibit certain system agents from accessing memory. + * When a device behind a masked port performs an access - snooped or + * not, an IMR may optionally prevent that transaction from changing + * the state of memory or from getting correct data in response to the + * operation. + * + * Write data will be dropped and reads will return 0xFFFFFFFF, the + * system will reset and system BIOS will print out an error message to + * inform the user that an IMR has been violated. + * + * This code is based on the Linux MTRR code and reference code from + * Intel's Quark BSP EFI, Linux and grub code. + * + * See quark-x1000-datasheet.pdf for register definitions. + * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm-generic/sections.h> +#include <asm/cpu_device_id.h> +#include <asm/imr.h> +#include <asm/iosf_mbi.h> +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/types.h> + +struct imr_device { + struct dentry *file; + bool init; + struct mutex lock; + int max_imr; + int reg_base; +}; + +static struct imr_device imr_dev; + +/* + * IMR read/write mask control registers. + * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for + * bit definitions. + * + * addr_hi + * 31 Lock bit + * 30:24 Reserved + * 23:2 1 KiB aligned lo address + * 1:0 Reserved + * + * addr_hi + * 31:24 Reserved + * 23:2 1 KiB aligned hi address + * 1:0 Reserved + */ +#define IMR_LOCK BIT(31) + +struct imr_regs { + u32 addr_lo; + u32 addr_hi; + u32 rmask; + u32 wmask; +}; + +#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) +#define IMR_SHIFT 8 +#define imr_to_phys(x) ((x) << IMR_SHIFT) +#define phys_to_imr(x) ((x) >> IMR_SHIFT) + +/** + * imr_is_enabled - true if an IMR is enabled false otherwise. + * + * Determines if an IMR is enabled based on address range and read/write + * mask. An IMR set with an address range set to zero and a read/write + * access mask set to all is considered to be disabled. An IMR in any + * other state - for example set to zero but without read/write access + * all is considered to be enabled. This definition of disabled is how + * firmware switches off an IMR and is maintained in kernel for + * consistency. + * + * @imr: pointer to IMR descriptor. + * @return: true if IMR enabled false if disabled. + */ +static inline int imr_is_enabled(struct imr_regs *imr) +{ + return !(imr->rmask == IMR_READ_ACCESS_ALL && + imr->wmask == IMR_WRITE_ACCESS_ALL && + imr_to_phys(imr->addr_lo) == 0 && + imr_to_phys(imr->addr_hi) == 0); +} + +/** + * imr_read - read an IMR at a given index. + * + * Requires caller to hold imr mutex. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to read. + * @imr: IMR structure representing address and access masks. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) +{ + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->addr_lo); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->addr_hi); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->rmask); + if (ret) + return ret; + + return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->wmask); +} + +/** + * imr_write - write an IMR at a given index. + * + * Requires caller to hold imr mutex. + * Note lock bits need to be written independently of address bits. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to write. + * @imr: IMR structure representing address and access masks. + * @lock: indicates if the IMR lock bit should be applied. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_write(struct imr_device *idev, u32 imr_id, + struct imr_regs *imr, bool lock) +{ + unsigned long flags; + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + local_irq_save(flags); + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++, + imr->addr_lo); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->addr_hi); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->rmask); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->wmask); + if (ret) + goto failed; + + /* Lock bit must be set separately to addr_lo address bits. */ + if (lock) { + imr->addr_lo |= IMR_LOCK; + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg - IMR_NUM_REGS, imr->addr_lo); + if (ret) + goto failed; + } + + local_irq_restore(flags); + return 0; +failed: + /* + * If writing to the IOSF failed then we're in an unknown state, + * likely a very bad state. An IMR in an invalid state will almost + * certainly lead to a memory access violation. + */ + local_irq_restore(flags); + WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n", + imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); + + return ret; +} + +/** + * imr_dbgfs_state_show - print state of IMR registers. + * + * @s: pointer to seq_file for output. + * @unused: unused parameter. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_dbgfs_state_show(struct seq_file *s, void *unused) +{ + phys_addr_t base; + phys_addr_t end; + int i; + struct imr_device *idev = s->private; + struct imr_regs imr; + size_t size; + int ret = -ENODEV; + + mutex_lock(&idev->lock); + + for (i = 0; i < idev->max_imr; i++) { + + ret = imr_read(idev, i, &imr); + if (ret) + break; + + /* + * Remember to add IMR_ALIGN bytes to size to indicate the + * inherent IMR_ALIGN size bytes contained in the masked away + * lower ten bits. + */ + if (imr_is_enabled(&imr)) { + base = imr_to_phys(imr.addr_lo); + end = imr_to_phys(imr.addr_hi) + IMR_MASK; + } else { + base = 0; + end = 0; + } + size = end - base; + seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " + "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, + &base, &end, size, imr.rmask, imr.wmask, + imr_is_enabled(&imr) ? "enabled " : "disabled", + imr.addr_lo & IMR_LOCK ? "locked" : "unlocked"); + } + + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_state_open - debugfs open callback. + * + * @inode: pointer to struct inode. + * @file: pointer to struct file. + * @return: result of single open. + */ +static int imr_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, imr_dbgfs_state_show, inode->i_private); +} + +static const struct file_operations imr_state_ops = { + .open = imr_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * imr_debugfs_register - register debugfs hooks. + * + * @idev: pointer to imr_device structure. + * @return: 0 on success - errno on failure. + */ +static int imr_debugfs_register(struct imr_device *idev) +{ + idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, + idev, &imr_state_ops); + return PTR_ERR_OR_ZERO(idev->file); +} + +/** + * imr_debugfs_unregister - unregister debugfs hooks. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void imr_debugfs_unregister(struct imr_device *idev) +{ + debugfs_remove(idev->file); +} + +/** + * imr_check_params - check passed address range IMR alignment and non-zero size + * + * @base: base address of intended IMR. + * @size: size of intended IMR. + * @return: zero on valid range -EINVAL on unaligned base/size. + */ +static int imr_check_params(phys_addr_t base, size_t size) +{ + if ((base & IMR_MASK) || (size & IMR_MASK)) { + pr_err("base %pa size 0x%08zx must align to 1KiB\n", + &base, size); + return -EINVAL; + } + if (size == 0) + return -EINVAL; + + return 0; +} + +/** + * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. + * + * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the + * value in the register. We need to subtract IMR_ALIGN bytes from input sizes + * as a result. + * + * @size: input size bytes. + * @return: reduced size. + */ +static inline size_t imr_raw_size(size_t size) +{ + return size - IMR_ALIGN; +} + +/** + * imr_address_overlap - detects an address overlap. + * + * @addr: address to check against an existing IMR. + * @imr: imr being checked. + * @return: true for overlap false for no overlap. + */ +static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) +{ + return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); +} + +/** + * imr_add_range - add an Isolated Memory Region. + * + * @base: physical base address of region aligned to 1KiB. + * @size: physical size of region in bytes must be aligned to 1KiB. + * @read_mask: read access mask. + * @write_mask: write access mask. + * @lock: indicates whether or not to permanently lock this region. + * @return: zero on success or negative value indicating error. + */ +int imr_add_range(phys_addr_t base, size_t size, + unsigned int rmask, unsigned int wmask, bool lock) +{ + phys_addr_t end; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int reg; + int ret; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + ret = imr_check_params(base, size); + if (ret) + return ret; + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + /* + * Check for reserved IMR value common to firmware, kernel and grub + * indicating a disabled IMR. + */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + if (!imr_is_enabled(&imr)) + return -ENOTSUPP; + + mutex_lock(&idev->lock); + + /* + * Find a free IMR while checking for an existing overlapping range. + * Note there's no restriction in silicon to prevent IMR overlaps. + * For the sake of simplicity and ease in defining/debugging an IMR + * memory map we exclude IMR overlaps. + */ + reg = -1; + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + /* Find overlap @ base or end of requested range. */ + ret = -EINVAL; + if (imr_is_enabled(&imr)) { + if (imr_address_overlap(base, &imr)) + goto failed; + if (imr_address_overlap(end, &imr)) + goto failed; + } else { + reg = i; + } + } + + /* Error out if we have no free IMR entries. */ + if (reg == -1) { + ret = -ENOMEM; + goto failed; + } + + pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n", + reg, &base, &end, raw_size, rmask, wmask); + + /* Enable IMR at specified range and access mask. */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + + ret = imr_write(idev, reg, &imr, lock); + if (ret < 0) { + /* + * In the highly unlikely event iosf_mbi_write failed + * attempt to rollback the IMR setup skipping the trapping + * of further IOSF write failures. + */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + imr_write(idev, reg, &imr, false); + } +failed: + mutex_unlock(&idev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(imr_add_range); + +/** + * __imr_remove_range - delete an Isolated Memory Region. + * + * This function allows you to delete an IMR by its index specified by reg or + * by address range specified by base and size respectively. If you specify an + * index on its own the base and size parameters are ignored. + * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. + * imr_remove_range(-1, base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static int __imr_remove_range(int reg, phys_addr_t base, size_t size) +{ + phys_addr_t end; + bool found = false; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int ret = 0; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + /* + * Validate address range if deleting by address, else we are + * deleting by index where base and size will be ignored. + */ + if (reg == -1) { + ret = imr_check_params(base, size); + if (ret) + return ret; + } + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + mutex_lock(&idev->lock); + + if (reg >= 0) { + /* If a specific IMR is given try to use it. */ + ret = imr_read(idev, reg, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) { + ret = -ENODEV; + goto failed; + } + found = true; + } else { + /* Search for match based on address range. */ + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) + continue; + + if ((imr_to_phys(imr.addr_lo) == base) && + (imr_to_phys(imr.addr_hi) == end)) { + found = true; + reg = i; + break; + } + } + } + + if (!found) { + ret = -ENODEV; + goto failed; + } + + pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size); + + /* Tear down the IMR. */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + + ret = imr_write(idev, reg, &imr, false); + +failed: + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_remove_range - delete an Isolated Memory Region by address + * + * This function allows you to delete an IMR by an address range specified + * by base and size respectively. + * imr_remove_range(base, size); delete IMR from base to base+size. + * + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +int imr_remove_range(phys_addr_t base, size_t size) +{ + return __imr_remove_range(-1, base, size); +} +EXPORT_SYMBOL_GPL(imr_remove_range); + +/** + * imr_clear - delete an Isolated Memory Region by index + * + * This function allows you to delete an IMR by an address range specified + * by the index of the IMR. Useful for initial sanitization of the IMR + * address map. + * imr_ge(base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static inline int imr_clear(int reg) +{ + return __imr_remove_range(reg, 0, 0); +} + +/** + * imr_fixup_memmap - Tear down IMRs used during bootup. + * + * BIOS and Grub both setup IMRs around compressed kernel, initrd memory + * that need to be removed before the kernel hands out one of the IMR + * encased addresses to a downstream DMA agent such as the SD or Ethernet. + * IMRs on Galileo are setup to immediately reset the system on violation. + * As a result if you're running a root filesystem from SD - you'll need + * the boot-time IMRs torn down or you'll find seemingly random resets when + * using your filesystem. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void __init imr_fixup_memmap(struct imr_device *idev) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + int i; + int ret; + + /* Tear down all existing unlocked IMRs. */ + for (i = 0; i < idev->max_imr; i++) + imr_clear(i); + + /* + * Setup a locked IMR around the physical extent of the kernel + * from the beginning of the .text secton to the end of the + * .rodata section as one physically contiguous block. + */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); + if (ret < 0) { + pr_err("unable to setup IMR for kernel: (%p - %p)\n", + &_text, &__end_rodata); + } else { + pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", + size / 1024, &_text, &__end_rodata); + } + +} + +static const struct x86_cpu_id imr_ids[] __initconst = { + { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */ + {} +}; +MODULE_DEVICE_TABLE(x86cpu, imr_ids); + +/** + * imr_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_init(void) +{ + struct imr_device *idev = &imr_dev; + int ret; + + if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) + return -ENODEV; + + idev->max_imr = QUARK_X1000_IMR_MAX; + idev->reg_base = QUARK_X1000_IMR_REGBASE; + idev->init = true; + + mutex_init(&idev->lock); + ret = imr_debugfs_register(idev); + if (ret != 0) + pr_warn("debugfs register failed!\n"); + imr_fixup_memmap(idev); + return 0; +} + +/** + * imr_exit - exit point for IMR code. + * + * Deregisters debugfs, leave IMR state as-is. + * + * return: + */ +static void __exit imr_exit(void) +{ + imr_debugfs_unregister(&imr_dev); +} + +module_init(imr_init); +module_exit(imr_exit); + +MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>"); +MODULE_DESCRIPTION("Intel Isolated Memory Region driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c new file mode 100644 index 000000000000..c9a0838890e2 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -0,0 +1,129 @@ +/** + * imr_selftest.c + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * IMR self test. The purpose of this module is to run a set of tests on the + * IMR API to validate it's sanity. We check for overlapping, reserved + * addresses and setup/teardown sanity. + * + */ + +#include <asm-generic/sections.h> +#include <asm/imr.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/types.h> + +#define SELFTEST KBUILD_MODNAME ": " +/** + * imr_self_test_result - Print result string for self test. + * + * @res: result code - true if test passed false otherwise. + * @fmt: format string. + * ... variadic argument list. + */ +static void __init imr_self_test_result(int res, const char *fmt, ...) +{ + va_list vlist; + + /* Print pass/fail. */ + if (res) + pr_info(SELFTEST "pass "); + else + pr_info(SELFTEST "fail "); + + /* Print variable string. */ + va_start(vlist, fmt); + vprintk(fmt, vlist); + va_end(vlist); + + /* Optional warning. */ + WARN(res == 0, "test failed"); +} +#undef SELFTEST + +/** + * imr_self_test + * + * Verify IMR self_test with some simple tests to verify overlap, + * zero sized allocations and 1 KiB sized areas. + * + */ +static void __init imr_self_test(void) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n"; + int ret; + + /* Test zero zero. */ + ret = imr_add_range(0, 0, 0, 0, false); + imr_self_test_result(ret < 0, "zero sized IMR\n"); + + /* Test exact overlap. */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with base inside of existing. */ + base += size - IMR_ALIGN; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with end inside of existing. */ + base -= size + IMR_ALIGN * 2; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL, false); + imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n"); + + /* Test that a 1 KiB IMR @ zero with CPU only will work. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n"); + if (ret >= 0) { + ret = imr_remove_range(0, IMR_ALIGN); + imr_self_test_result(ret == 0, "teardown - cpu-access\n"); + } + + /* Test 2 KiB works. */ + size = IMR_ALIGN * 2; + ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL, false); + imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n"); + if (ret >= 0) { + ret = imr_remove_range(0, size); + imr_self_test_result(ret == 0, "teardown 2KiB\n"); + } +} + +/** + * imr_self_test_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_self_test_init(void) +{ + imr_self_test(); + return 0; +} + +/** + * imr_self_test_exit - exit point for IMR code. + * + * return: + */ +static void __exit imr_self_test_exit(void) +{ +} + +module_init(imr_self_test_init); +module_exit(imr_self_test_exit); + +MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>"); +MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver"); +MODULE_LICENSE("Dual BSD/GPL"); |