summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2011-05-29 12:40:51 +0200
committerHeiko Carstens <heiko.carstens@de.ibm.com>2011-05-29 12:40:51 +0200
commit3c5cffb66d8ea94832650fcb55194715b0229088 (patch)
tree22872361ef884b527855ebe6bf225eaabbae4ca1 /arch/s390/mm
parenta43a9d93d40a69eceeb4e4a4c860cc20186d475c (diff)
downloadlinux-3c5cffb66d8ea94832650fcb55194715b0229088.tar.gz
linux-3c5cffb66d8ea94832650fcb55194715b0229088.tar.bz2
linux-3c5cffb66d8ea94832650fcb55194715b0229088.zip
[S390] mm: fix mmu_gather rework
Quite a few functions that get called from the tlb gather code require that preemption must be disabled. So disable preemption inside of the called functions instead. The only drawback is that rcu_table_freelist_finish() doesn't get necessarily called on the cpu(s) that filled the free lists. So we may see a delay, until we finally see an rcu callback. However over time this shouldn't matter. So we get rid of lots of "BUG: using smp_processor_id() in preemptible" messages. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/pgtable.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 14c6fae6fe6b..b09763fe5da1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -71,12 +71,15 @@ static void rcu_table_freelist_callback(struct rcu_head *head)
void rcu_table_freelist_finish(void)
{
- struct rcu_table_freelist *batch = __get_cpu_var(rcu_table_freelist);
+ struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
+ struct rcu_table_freelist *batch = *batchp;
if (!batch)
- return;
+ goto out;
call_rcu(&batch->rcu, rcu_table_freelist_callback);
- __get_cpu_var(rcu_table_freelist) = NULL;
+ *batchp = NULL;
+out:
+ put_cpu_var(rcu_table_freelist);
}
static void smp_sync(void *arg)
@@ -141,20 +144,23 @@ void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
{
struct rcu_table_freelist *batch;
+ preempt_disable();
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
crst_table_free(mm, table);
- return;
+ goto out;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
crst_table_free(mm, table);
- return;
+ goto out;
}
batch->table[--batch->crst_index] = table;
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
+out:
+ preempt_enable();
}
#ifdef CONFIG_64BIT
@@ -323,16 +329,17 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned long bits;
+ preempt_disable();
if (atomic_read(&mm->mm_users) < 2 &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
page_table_free(mm, table);
- return;
+ goto out;
}
batch = rcu_table_freelist_get(mm);
if (!batch) {
smp_call_function(smp_sync, NULL, 1);
page_table_free(mm, table);
- return;
+ goto out;
}
bits = (mm->context.has_pgste) ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
@@ -345,6 +352,8 @@ void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
batch->table[batch->pgt_index++] = table;
if (batch->pgt_index >= batch->crst_index)
rcu_table_freelist_finish();
+out:
+ preempt_enable();
}
/*