diff options
author | Brian Norris <computersforpeace@gmail.com> | 2014-08-19 11:57:23 -0700 |
---|---|---|
committer | Brian Norris <computersforpeace@gmail.com> | 2014-08-19 11:57:23 -0700 |
commit | 5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3 (patch) | |
tree | 090c7c069bc6c0f2b368ed8d0af861c275525411 /arch/x86/kernel/cpu/intel_cacheinfo.c | |
parent | b25046b1e5e3f1423434da77ccc859f2f779d1ce (diff) | |
parent | 54ea17a597b00e46b3720e75dd7595cd5dfa5670 (diff) | |
download | linux-stable-5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3.tar.gz linux-stable-5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3.tar.bz2 linux-stable-5b49ab3e03f68eb49db4bce6290e5707b7f6c6f3.zip |
Merge l2-mtd/next into l2-mtd/master
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a952e9c85b6f..c7035073dfc1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -461,7 +461,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); - if (strict_strtoul(buf, 10, &val) < 0) + if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); @@ -511,7 +511,7 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; - if (strict_strtoul(buf, 16, &val) < 0) + if (kstrtoul(buf, 16, &val) < 0) return -EINVAL; if (amd_set_subcaches(cpu, val)) @@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) #endif } +#ifdef CONFIG_X86_HT + /* + * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in + * turns means that the only possibility is SMT (as indicated in + * cpuid1). Since cpuid2 doesn't specify shared caches, and we know + * that SMT shares all caches, we can unconditionally set cpu_llc_id to + * c->phys_proc_id. + */ + if (per_cpu(cpu_llc_id, cpu) == BAD_APICID) + per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; +#endif + c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; |