summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-06-06 11:18:06 +0200
committerJens Axboe <jens.axboe@oracle.com>2008-06-26 11:24:35 +0200
commit8691e5a8f691cc2a4fda0651e8d307aaba0e7d68 (patch)
tree6cb6767064d2d43441212566da2d83dcc9a0cd8e /net
parent490f5de52a87063fcb40e3b22f61b0779603ff6d (diff)
downloadlinux-8691e5a8f691cc2a4fda0651e8d307aaba0e7d68.tar.gz
linux-8691e5a8f691cc2a4fda0651e8d307aaba0e7d68.tar.bz2
linux-8691e5a8f691cc2a4fda0651e8d307aaba0e7d68.zip
smp_call_function: get rid of the unused nonatomic/retry argument
It's never used and the comments refer to nonatomic and retry interchangably. So get rid of it. Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/core/flow.c2
-rw-r--r--net/iucv/iucv.c14
2 files changed, 8 insertions, 8 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 19991175fdeb..5cf81052d044 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -298,7 +298,7 @@ void flow_cache_flush(void)
init_completion(&info.completion);
local_bh_disable();
- smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
+ smp_call_function(flow_cache_flush_per_cpu, &info, 0);
flow_cache_flush_tasklet((unsigned long)&info);
local_bh_enable();
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 918970762131..94d5a45c3a57 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -480,7 +480,7 @@ static void iucv_setmask_mp(void)
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
!cpu_isset(cpu, iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu,
- NULL, 0, 1);
+ NULL, 1);
preempt_enable();
}
@@ -498,7 +498,7 @@ static void iucv_setmask_up(void)
cpumask = iucv_irq_cpumask;
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
for_each_cpu_mask(cpu, cpumask)
- smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
+ smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
/**
@@ -523,7 +523,7 @@ static int iucv_enable(void)
rc = -EIO;
preempt_disable();
for_each_online_cpu(cpu)
- smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
preempt_enable();
if (cpus_empty(iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
@@ -580,7 +580,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
@@ -589,10 +589,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
if (cpus_empty(cpumask))
/* Can't offline last IUCV enabled cpu. */
return NOTIFY_BAD;
- smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
+ smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
if (cpus_empty(iucv_irq_cpumask))
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
- iucv_allow_cpu, NULL, 0, 1);
+ iucv_allow_cpu, NULL, 1);
break;
}
return NOTIFY_OK;
@@ -652,7 +652,7 @@ static void iucv_cleanup_queue(void)
* pending interrupts force them to the work queue by calling
* an empty function on all cpus.
*/
- smp_call_function(__iucv_cleanup_queue, NULL, 0, 1);
+ smp_call_function(__iucv_cleanup_queue, NULL, 1);
spin_lock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &iucv_task_queue, list) {
/* Remove stale work items from the task queue. */