summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-04-24 23:59:11 -0400
committerDavid S. Miller <davem@davemloft.net>2018-04-24 23:59:11 -0400
commitc749fa181bd5848be78691d23168ec61ce691b95 (patch)
treed037dc016bd880d9d5b393a30f3907ef5e98124d /kernel
parent16f4faa4f06ff3b4e214922d55ac33ab6e2bdbdc (diff)
parent3be4aaf4e2d3eb95cce7835e8df797ae65ae5ac1 (diff)
downloadlinux-c749fa181bd5848be78691d23168ec61ce691b95.tar.gz
linux-c749fa181bd5848be78691d23168ec61ce691b95.tar.bz2
linux-c749fa181bd5848be78691d23168ec61ce691b95.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c45
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/events/callchain.c25
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/time/posix-cpu-timers.c4
-rw-r--r--kernel/time/tick-oneshot.c11
-rw-r--r--kernel/time/timekeeping.c7
-rw-r--r--kernel/trace/bpf_trace.c25
-rw-r--r--kernel/trace/trace_kprobe.c2
10 files changed, 76 insertions, 57 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d315b393abdd..ba03ec39efb3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
return cnt;
}
+static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
+ u32 *prog_ids,
+ u32 request_cnt)
+{
+ int i = 0;
+
+ for (; *prog; prog++) {
+ if (*prog == &dummy_bpf_prog.prog)
+ continue;
+ prog_ids[i] = (*prog)->aux->id;
+ if (++i == request_cnt) {
+ prog++;
+ break;
+ }
+ }
+
+ return !!(*prog);
+}
+
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
__u32 __user *prog_ids, u32 cnt)
{
struct bpf_prog **prog;
unsigned long err = 0;
- u32 i = 0, *ids;
bool nospc;
+ u32 *ids;
/* users of this function are doing:
* cnt = bpf_prog_array_length();
@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
return -ENOMEM;
rcu_read_lock();
prog = rcu_dereference(progs)->progs;
- for (; *prog; prog++) {
- if (*prog == &dummy_bpf_prog.prog)
- continue;
- ids[i] = (*prog)->aux->id;
- if (++i == cnt) {
- prog++;
- break;
- }
- }
- nospc = !!(*prog);
+ nospc = bpf_prog_array_copy_core(prog, ids, cnt);
rcu_read_unlock();
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
kfree(ids);
@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
}
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
- __u32 __user *prog_ids, u32 request_cnt,
- __u32 __user *prog_cnt)
+ u32 *prog_ids, u32 request_cnt,
+ u32 *prog_cnt)
{
+ struct bpf_prog **prog;
u32 cnt = 0;
if (array)
cnt = bpf_prog_array_length(array);
- if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
- return -EFAULT;
+ *prog_cnt = cnt;
/* return early if user requested only program count or nothing to copy */
if (!request_cnt || !cnt)
return 0;
- return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+ /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
+ prog = rcu_dereference_check(array, 1)->progs;
+ return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
+ : 0;
}
static void bpf_prog_free_deferred(struct work_struct *work)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 8dd9210d7db7..a3b21385e947 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
- if (attr->value_size > KMALLOC_MAX_SIZE)
- return ERR_PTR(-E2BIG);
-
err = bpf_tcp_ulp_register();
if (err && err != -EEXIST)
return ERR_PTR(err);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 772a43fea825..c187aa3df3c8 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack)
goto exit;
}
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- /*
- * If requesting per event more than the global cap,
- * return a different error to help userspace figure
- * this out.
- *
- * And also do it here so that we have &callchain_mutex held.
- */
- if (event_max_stack > sysctl_perf_event_max_stack)
- err = -EOVERFLOW;
+ /*
+ * If requesting per event more than the global cap,
+ * return a different error to help userspace figure
+ * this out.
+ *
+ * And also do it here so that we have &callchain_mutex held.
+ */
+ if (event_max_stack > sysctl_perf_event_max_stack) {
+ err = -EOVERFLOW;
goto exit;
}
- err = alloc_callchain_buffers();
+ if (count == 1)
+ err = alloc_callchain_buffers();
exit:
if (err)
atomic_dec(&nr_callchain_events);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2d5fe26551f8..67612ce359ad 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task,
},
};
+ if (!sched_in && task->state == TASK_RUNNING)
+ switch_event.event_id.header.misc |=
+ PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
+
perf_iterate_sb(perf_event_switch_output,
&switch_event,
NULL);
@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
- ret = -EINVAL;
+ return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
- ret = -EINVAL;
+ return -EINVAL;
}
if (!attr->sample_max_stack)
diff --git a/kernel/fork.c b/kernel/fork.c
index 242c8c93d285..a5d21c42acfc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s)
continue;
-#ifdef CONFIG_DEBUG_KMEMLEAK
/* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE);
-#endif
+
tsk->stack_vm_area = s;
return s->addr;
}
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2541bd89f20e..5a6251ac6f7a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
u64 *newval, u64 *oldval)
{
u64 now;
+ int ret;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
+ ret = cpu_timer_sample_group(clock_idx, tsk, &now);
- if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
+ if (oldval && ret != -EINVAL) {
/*
* We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index c1f518e7aa80..6fe615d57ebb 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -82,16 +82,15 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
!tick_device_is_functional(dev)) {
- printk(KERN_INFO "Clockevents: "
- "could not switch to one-shot mode:");
+ pr_info("Clockevents: could not switch to one-shot mode:");
if (!dev) {
- printk(" no tick device\n");
+ pr_cont(" no tick device\n");
} else {
if (!tick_device_is_functional(dev))
- printk(" %s is not functional.\n", dev->name);
+ pr_cont(" %s is not functional.\n", dev->name);
else
- printk(" %s does not support one-shot mode.\n",
- dev->name);
+ pr_cont(" %s does not support one-shot mode.\n",
+ dev->name);
}
return -EINVAL;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index ca90219a1e73..dcf7f20fcd12 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -2139,13 +2139,6 @@ unsigned long get_seconds(void)
}
EXPORT_SYMBOL(get_seconds);
-struct timespec __current_kernel_time(void)
-{
- struct timekeeper *tk = &tk_core.timekeeper;
-
- return timespec64_to_timespec(tk_xtime(tk));
-}
-
struct timespec64 current_kernel_time64(void)
{
struct timekeeper *tk = &tk_core.timekeeper;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d88e96d4e12c..56ba0f2a01db 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
{
struct perf_event_query_bpf __user *uquery = info;
struct perf_event_query_bpf query = {};
+ u32 *ids, prog_cnt, ids_len;
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
return -EINVAL;
if (copy_from_user(&query, uquery, sizeof(query)))
return -EFAULT;
- if (query.ids_len > BPF_TRACE_MAX_PROGS)
+
+ ids_len = query.ids_len;
+ if (ids_len > BPF_TRACE_MAX_PROGS)
return -E2BIG;
+ ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
+ if (!ids)
+ return -ENOMEM;
+ /*
+ * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
+ * is required when user only wants to check for uquery->prog_cnt.
+ * There is no need to check for it since the case is handled
+ * gracefully in bpf_prog_array_copy_info.
+ */
mutex_lock(&bpf_event_mutex);
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
- uquery->ids,
- query.ids_len,
- &uquery->prog_cnt);
+ ids,
+ ids_len,
+ &prog_cnt);
mutex_unlock(&bpf_event_mutex);
+ if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
+ copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
+ ret = -EFAULT;
+
+ kfree(ids);
return ret;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1cd3fb4d70f8..02aed76e0978 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
if (ret == 0)
tk->tp.flags |= TP_FLAG_REGISTERED;
else {
- pr_warn("Could not insert probe at %s+%lu: %d\n",
- trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
ret = 0;