#ifndef CPU_X86_MSR_H #define CPU_X86_MSR_H /* Intel SDM: Table 2-1 * IA-32 architectural MSR: Extended Feature Enable Register * * AMD64 Programmers Manual vol2 Revision 3.30 and/or the device's BKDG */ #define IA32_EFER 0xC0000080 #define EFER_NXE (1 << 11) #define EFER_LMA (1 << 10) #define EFER_LME (1 << 8) #define EFER_SCE (1 << 0) /* Page attribute type MSR */ #define TSC_MSR 0x10 #define IA32_PLATFORM_ID 0x17 #define IA32_FEATURE_CONTROL 0x3a #define FEATURE_CONTROL_LOCK_BIT (1 << 0) #define FEATURE_ENABLE_VMX (1 << 2) #define SMRR_ENABLE (1 << 3) #define CPUID_VMX (1 << 5) #define CPUID_SMX (1 << 6) #define CPUID_DCA (1 << 18) #define CPUID_AES (1 << 25) #define SGX_GLOBAL_ENABLE (1 << 18) #define PLATFORM_INFO_SET_TDP (1 << 29) #define IA32_BIOS_UPDT_TRIG 0x79 #define IA32_BIOS_SIGN_ID 0x8b #define IA32_MPERF 0xe7 #define IA32_APERF 0xe8 #define IA32_MCG_CAP 0x179 #define MCG_CTL_P (1 << 3) #define MCA_BANKS_MASK 0xff #define IA32_PERF_STATUS 0x198 #define IA32_PERF_CTL 0x199 #define IA32_THERM_INTERRUPT 0x19b #define IA32_MISC_ENABLE 0x1a0 #define FAST_STRINGS_ENABLE_BIT (1 << 0) #define SPEED_STEP_ENABLE_BIT (1 << 16) #define IA32_ENERGY_PERF_BIAS 0x1b0 #define ENERGY_POLICY_PERFORMANCE 0 #define ENERGY_POLICY_NORMAL 6 #define ENERGY_POLICY_POWERSAVE 15 #define IA32_PACKAGE_THERM_INTERRUPT 0x1b2 #define IA32_PLATFORM_DCA_CAP 0x1f8 #define IA32_PAT 0x277 #define IA32_MC0_CTL 0x400 #define IA32_MC0_STATUS 0x401 #define MCA_STATUS_HI_VAL (1UL << (63 - 32)) #define MCA_STATUS_HI_OVERFLOW (1UL << (62 - 32)) #define MCA_STATUS_HI_UC (1UL << (61 - 32)) #define MCA_STATUS_HI_EN (1UL << (60 - 32)) #define MCA_STATUS_HI_MISCV (1UL << (59 - 32)) #define MCA_STATUS_HI_ADDRV (1UL << (58 - 32)) #define MCA_STATUS_HI_PCC (1UL << (57 - 32)) #define MCA_STATUS_HI_COREID_VAL (1UL << (56 - 32)) #define MCA_STATUS_HI_CECC (1UL << (46 - 32)) #define MCA_STATUS_HI_UECC (1UL << (45 - 32)) #define MCA_STATUS_HI_DEFERRED (1UL << (44 - 32)) #define MCA_STATUS_HI_POISON (1UL << (43 - 32)) #define MCA_STATUS_HI_SUBLINK (1UL << (41 - 32)) #define MCA_STATUS_HI_ERRCOREID_MASK (0xf << 0) #define MCA_STATUS_LO_ERRCODE_EXT_SH 16 #define MCA_STATUS_LO_ERRCODE_EXT_MASK (0x3f << MCA_STATUS_LO_ERRCODE_EXT_SH) #define MCA_STATUS_LO_ERRCODE_MASK (0xffff << 0) #define MC0_ADDR 0x402 #define MC0_MISC 0x403 #define MC0_CTL_MASK 0xC0010044 #define IA32_PM_ENABLE 0x770 #define IA32_HWP_CAPABILITIES 0x771 #define IA32_HWP_REQUEST 0x774 #define IA32_HWP_STATUS 0x777 #define IA32_PQR_ASSOC 0xc8f /* MSR bits 33:32 encode slot number 0-3 */ #define IA32_PQR_ASSOC_MASK (1 << 0 | 1 << 1) #define IA32_L3_MASK_1 0xc91 #define IA32_L3_MASK_2 0xc92 #ifndef __ASSEMBLER__ #include #if defined(__ROMCC__) typedef __builtin_msr_t msr_t; static msr_t rdmsr(unsigned long index) { return __builtin_rdmsr(index); } static void wrmsr(unsigned long index, msr_t msr) { __builtin_wrmsr(index, msr.lo, msr.hi); } #else typedef struct msr_struct { unsigned int lo; unsigned int hi; } msr_t; typedef struct msrinit_struct { unsigned int index; msr_t msr; } msrinit_t; #if CONFIG(SOC_SETS_MSRS) msr_t soc_msr_read(unsigned int index); void soc_msr_write(unsigned int index, msr_t msr); /* Handle MSR references in the other source code */ static __always_inline msr_t rdmsr(unsigned int index) { return soc_msr_read(index); } static __always_inline void wrmsr(unsigned int index, msr_t msr) { soc_msr_write(index, msr); } #else /* CONFIG_SOC_SETS_MSRS */ /* The following functions require the __always_inline due to AMD * function STOP_CAR_AND_CPU that disables cache as * RAM, the cache as RAM stack can no longer be used. Called * functions must be inlined to avoid stack usage. Also, the * compiler must keep local variables register based and not * allocated them from the stack. With gcc 4.5.0, some functions * declared as inline are not being inlined. This patch forces * these functions to always be inlined by adding the qualifier * __always_inline to their declaration. */ static __always_inline msr_t rdmsr(unsigned int index) { msr_t result; __asm__ __volatile__ ( "rdmsr" : "=a" (result.lo), "=d" (result.hi) : "c" (index) ); return result; } static __always_inline void wrmsr(unsigned int index, msr_t msr) { __asm__ __volatile__ ( "wrmsr" : /* No outputs */ : "c" (index), "a" (msr.lo), "d" (msr.hi) ); } #endif /* CONFIG_SOC_SETS_MSRS */ #endif /* __ROMCC__ */ /* Helpers for interpreting MC[i]_STATUS */ static inline int mca_valid(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_VAL); } static inline int mca_over(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_OVERFLOW); } static inline int mca_uc(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_UC); } static inline int mca_en(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_EN); } static inline int mca_miscv(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_MISCV); } static inline int mca_addrv(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_ADDRV); } static inline int mca_pcc(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_PCC); } static inline int mca_idv(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_COREID_VAL); } static inline int mca_cecc(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_CECC); } static inline int mca_uecc(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_UECC); } static inline int mca_defd(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_DEFERRED); } static inline int mca_poison(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_POISON); } static inline int mca_sublink(msr_t msr) { return !!(msr.hi & MCA_STATUS_HI_SUBLINK); } static inline uint16_t mca_err_code(msr_t reg) { return reg.lo & MCA_STATUS_LO_ERRCODE_MASK; } static inline uint16_t mca_err_extcode(msr_t reg) { return reg.lo & MCA_STATUS_LO_ERRCODE_EXT_MASK; } /* Machine Check errors may be categorized by type, as determined by the * Error Code field of MC[i]_STATUS. The definitions below can typically * be found by searching the BKDG for a table called "Error Code Types". */ /* TLB Errors 0000 0000 0001 TTLL */ #define MCA_ERRCODE_TLB_DETECT 0xfff0 #define MCA_ERRCODE_TLB_TT_SH 2 /* Transaction Type */ #define MCA_ERRCODE_TLB_TT_MASK (0x3 << MCA_ERRCODE_TLB_TT_SH) #define MCA_ERRCODE_TLB_LL_SH 0 /* Cache Level */ #define MCA_ERRCODE_TLB_LL_MASK (0x3 << MCA_ERRCODE_TLB_LL_SH) /* Memory Errors 0000 0001 RRRR TTLL */ #define MCA_ERRCODE_MEM_DETECT 0xff00 #define MCA_ERRCODE_MEM_RRRR_SH 4 /* Memory Transaction Type */ #define MCA_ERRCODE_MEM_RRRR_MASK (0xf << MCA_ERRCODE_MEM_RRRR_MASK) #define MCA_ERRCODE_MEM_TT_SH 2 /* Transaction Type */ #define MCA_ERRCODE_MEM_TT_MASK (0x3 << MCA_ERRCODE_MEM_TT_SH) #define MCA_ERRCODE_MEM_LL_SH 0 /* Cache Level */ #define MCA_ERRCODE_MEM_LL_MASK (0x3 << MCA_ERRCODE_MEM_LL_SH) /* Bus Errors 0000 1PPT RRRR IILL */ #define MCA_ERRCODE_BUS_DETECT 0xf800 #define MCA_ERRCODE_BUS_PP_SH 9 /* Participation Processor */ #define MCA_ERRCODE_BUS_PP_MASK (0x3 << MCA_ERRCODE_BUS_PP_SH) #define MCA_ERRCODE_BUS_T_SH 8 /* Timeout */ #define MCA_ERRCODE_BUS_T_MASK (0x1 << MCA_ERRCODE_BUS_T_SH) #define MCA_ERRCODE_BUS_RRRR_SH 4 /* Memory Transaction Type */ #define MCA_ERRCODE_BUS_RRRR_MASK (0xf << MCA_ERRCODE_BUS_RRRR_SH) #define MCA_ERRCODE_BUS_II_SH 2 /* Memory or IO */ #define MCA_ERRCODE_BUS_II_MASK (0x3 << MCA_ERRCODE_BUS_II_SH) #define MCA_ERRCODE_BUS_LL_SH 0 /* Cache Level */ #define MCA_ERRCODE_BUS_LL_MASK (0x3 << MCA_ERRCODE_BUS_LL_SH) /* Int. Unclassified Errors 0000 01UU 0000 0000 */ #define MCA_ERRCODE_INT_DETECT 0xfc00 #define MCA_ERRCODE_INT_UU_SH 8 /* Internal Error Type */ #define MCA_ERRCODE_INT_UU_MASK (0x3 << MCA_ERRCODE_INT_UU_SH) #define MCA_BANK_LS 0 /* Load-store, including DC */ #define MCA_BANK_IF 1 /* Instruction Fetch, including IC */ #define MCA_BANK_CU 2 /* Combined Unit, including L2 */ /* bank 3 reserved */ #define MCA_BANK_NB 4 /* Northbridge, including IO link */ #define MCA_BANK_EX 5 /* Execution Unit */ #define MCA_BANK_FP 6 /* Floating Point */ enum mca_err_code_types { MCA_ERRTYPE_UNKNOWN, MCA_ERRTYPE_TLB, MCA_ERRTYPE_MEM, MCA_ERRTYPE_BUS, MCA_ERRTYPE_INT }; static inline enum mca_err_code_types mca_err_type(msr_t reg) { uint16_t error = mca_err_code(reg); if (error & MCA_ERRCODE_BUS_DETECT) /* this order must be maintained */ return MCA_ERRTYPE_BUS; if (error & MCA_ERRCODE_INT_DETECT) return MCA_ERRTYPE_INT; if (error & MCA_ERRCODE_MEM_DETECT) return MCA_ERRTYPE_MEM; if (error & MCA_ERRCODE_TLB_DETECT) return MCA_ERRTYPE_TLB; return MCA_ERRTYPE_UNKNOWN; } /* Helper for setting single MSR bits */ static inline void msr_set_bit(unsigned int reg, unsigned int bit) { msr_t msr = rdmsr(reg); if (bit < 32) { if (msr.lo & (1 << bit)) return; msr.lo |= 1 << bit; } else { if (msr.hi & (1 << (bit - 32))) return; msr.hi |= 1 << (bit - 32); } wrmsr(reg, msr); } #endif /* __ASSEMBLER__ */ #endif /* CPU_X86_MSR_H */