summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
authorSamuel Gabrielsson <samuel.gabrielsson@gmail.com>2011-03-30 10:21:23 -0300
committerMauro Carvalho Chehab <mchehab@redhat.com>2011-10-31 15:10:05 -0200
commite8b6a1271035e621d1c4b22403c697b5c969728c (patch)
tree2d7d0f1453da9302749f7c4fe0f60aa50b56954a /drivers/edac
parentddeb3547d4823495c6604750c241e5a3810f51a3 (diff)
downloadlinux-e8b6a1271035e621d1c4b22403c697b5c969728c.tar.gz
linux-e8b6a1271035e621d1c4b22403c697b5c969728c.tar.bz2
linux-e8b6a1271035e621d1c4b22403c697b5c969728c.zip
i7core_edac: Add scrubbing support
Add scrubbing support to i7core_edac, tested on intel Xeon L5638. Signed-off-by: Samuel Gabrielsson <samuel.gabrielsson@gmail.com> Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/i7core_edac.c126
1 files changed, 126 insertions, 0 deletions
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index c64ba267902f..0c02486cf57a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -78,6 +78,8 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
/* OFFSETS for Device 0 Function 0 */
#define MC_CFG_CONTROL 0x90
+ #define MC_CFG_UNLOCK 0x02
+ #define MC_CFG_LOCK 0x00
/* OFFSETS for Device 3 Function 0 */
@@ -98,6 +100,14 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
#define DIMM0_COR_ERR(r) ((r) & 0x7fff)
/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
+#define MC_SSRCONTROL 0x48
+ #define SSR_MODE_DISABLE 0x00
+ #define SSR_MODE_ENABLE 0x01
+ #define SSR_MODE_MASK 0x03
+
+#define MC_SCRUB_CONTROL 0x4c
+ #define STARTSCRUB (1 << 24)
+
#define MC_COR_ECC_CNT_0 0x80
#define MC_COR_ECC_CNT_1 0x84
#define MC_COR_ECC_CNT_2 0x88
@@ -1901,6 +1911,116 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
return 1;
}
+/*
+ * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
+ * to hardware according to SCRUBINTERVAL formula
+ * found in datasheet.
+ */
+static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ const u32 cache_line_size = 64;
+ const u32 freq_dclk = 800*1000000;
+ u32 dw_scrub;
+ u32 dw_ssr;
+
+ /* Get data from the MC register, function 2 */
+ pdev = pvt->pci_mcr[2];
+ if (!pdev)
+ return -ENODEV;
+
+ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
+
+ if (new_bw == 0) {
+ /* Prepare to disable petrol scrub */
+ dw_scrub &= ~STARTSCRUB;
+ /* Stop the patrol scrub engine */
+ write_and_test(pdev, MC_SCRUB_CONTROL, dw_scrub & ~0x00ffffff);
+
+ /* Get current status of scrub rate and set bit to disable */
+ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
+ dw_ssr &= ~SSR_MODE_MASK;
+ dw_ssr |= SSR_MODE_DISABLE;
+ } else {
+ /*
+ * Translate the desired scrub rate to a register value and
+ * program the cooresponding register value.
+ */
+ dw_scrub = 0x00ffffff & (cache_line_size * freq_dclk / new_bw);
+
+ /* Start the patrol scrub engine */
+ pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
+ STARTSCRUB | dw_scrub);
+
+ /* Get current status of scrub rate and set bit to enable */
+ pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
+ dw_ssr &= ~SSR_MODE_MASK;
+ dw_ssr |= SSR_MODE_ENABLE;
+ }
+ /* Disable or enable scrubbing */
+ pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
+
+ return new_bw;
+}
+
+/*
+ * get_sdram_scrub_rate This routine convert current scrub rate value
+ * into byte/sec bandwidth accourding to
+ * SCRUBINTERVAL formula found in datasheet.
+ */
+static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ const u32 cache_line_size = 64;
+ const u32 freq_dclk = 800*1000000;
+ u32 scrubval;
+
+ /* Get data from the MC register, function 2 */
+ pdev = pvt->pci_mcr[2];
+ if (!pdev)
+ return -ENODEV;
+
+ /* Get current scrub control data */
+ pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
+
+ /* Mask highest 8-bits to 0 */
+ scrubval &= 0x00ffffff;
+ if (!scrubval)
+ return 0;
+
+ /* Calculate scrub rate value into byte/sec bandwidth */
+ return 0xffffffff & (cache_line_size * freq_dclk / (u64) scrubval);
+}
+
+static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 pci_lock;
+
+ /* Unlock writes to pci registers */
+ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
+ pci_lock &= ~0x3;
+ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
+ pci_lock | MC_CFG_UNLOCK);
+
+ mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
+ mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
+}
+
+static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 pci_lock;
+
+ /* Lock writes to pci registers */
+ pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
+ pci_lock &= ~0x3;
+ pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
+ pci_lock | MC_CFG_LOCK);
+}
+
static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
{
pvt->i7core_pci = edac_pci_create_generic_ctl(
@@ -1939,6 +2059,9 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &i7core_dev->pdev[0]->dev);
+ /* Disable scrubrate setting */
+ disable_sdram_scrub_setting(mci);
+
/* Disable MCE NMI handler */
edac_mce_unregister(&pvt->edac_mce);
@@ -2012,6 +2135,9 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* Set the function pointer to an actual operation function */
mci->edac_check = i7core_check_error;
+ /* Enable scrubrate setting */
+ enable_sdram_scrub_setting(mci);
+
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
debugf0("MC: " __FILE__