1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Tracing hooks
*
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This file defines hook entry points called by core code where
* user tracing/debugging support might need to do something. These
* entry points are called tracehook_*(). Each hook declared below
* has a detailed kerneldoc comment giving the context (locking et
* al) from which it is called, and the meaning of its return value.
*
* Each function here typically has only one call site, so it is ok
* to have some nontrivial tracehook_*() inlines. In all cases, the
* fast path when no tracing is enabled should be very short.
*
* The purpose of this file and the tracehook_* layer is to consolidate
* the interface that the kernel core and arch code uses to enable any
* user debugging or tracing facility (such as ptrace). The interfaces
* here are carefully documented so that maintainers of core and arch
* code do not need to think about the implementation details of the
* tracing facilities. Likewise, maintainers of the tracing code do not
* need to understand all the calling core or arch code in detail, just
* documented circumstances of each call, such as locking conditions.
*
* If the calling core code changes so that locking is different, then
* it is ok to change the interface documented here. The maintainer of
* core code changing should notify the maintainers of the tracing code
* that they need to work out the change.
*
* Some tracehook_*() inlines take arguments that the current tracing
* implementations might not necessarily use. These function signatures
* are chosen to pass in all the information that is on hand in the
* caller and might conceivably be relevant to a tracer, so that the
* core code won't have to be updated when tracing adds more features.
* If a call site changes so that some of those parameters are no longer
* already on hand without extra work, then the tracehook_* interface
* can change so there is no make-work burden on the core code. The
* maintainer of core code changing should notify the maintainers of the
* tracing code that they need to work out the change.
*/
#ifndef _LINUX_TRACEHOOK_H
#define _LINUX_TRACEHOOK_H 1
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/security.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
#include <linux/blk-cgroup.h>
struct linux_binprm;
/**
* set_notify_resume - cause tracehook_notify_resume() to be called
* @task: task that will call tracehook_notify_resume()
*
* Calling this arranges that @task will call tracehook_notify_resume()
* before returning to user mode. If it's already running in user mode,
* it will enter the kernel and call tracehook_notify_resume() soon.
* If it's blocked, it will not be woken.
*/
static inline void set_notify_resume(struct task_struct *task)
{
#ifdef TIF_NOTIFY_RESUME
if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
kick_process(task);
#endif
}
/**
* tracehook_notify_resume - report when about to return to user mode
* @regs: user-mode registers of @current task
*
* This is called when %TIF_NOTIFY_RESUME has been set. Now we are
* about to return to user mode, and the user state in @regs can be
* inspected or adjusted. The caller in arch code has cleared
* %TIF_NOTIFY_RESUME before the call. If the flag gets set again
* asynchronously, this will be called again before we return to
* user mode.
*
* Called without locks.
*/
static inline void tracehook_notify_resume(struct pt_regs *regs)
{
clear_thread_flag(TIF_NOTIFY_RESUME);
/*
* This barrier pairs with task_work_add()->set_notify_resume() after
* hlist_add_head(task->task_works);
*/
smp_mb__after_atomic();
if (unlikely(current->task_works))
task_work_run();
#ifdef CONFIG_KEYS_REQUEST_CACHE
if (unlikely(current->cached_requested_key)) {
key_put(current->cached_requested_key);
current->cached_requested_key = NULL;
}
#endif
mem_cgroup_handle_over_high();
blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
}
/*
* called by exit_to_user_mode_loop() if ti_work & _TIF_NOTIFY_SIGNAL. This
* is currently used by TWA_SIGNAL based task_work, which requires breaking
* wait loops to ensure that task_work is noticed and run.
*/
static inline void tracehook_notify_signal(void)
{
clear_thread_flag(TIF_NOTIFY_SIGNAL);
smp_mb__after_atomic();
if (current->task_works)
task_work_run();
}
/*
* Called when we have work to process from exit_to_user_mode_loop()
*/
static inline void set_notify_signal(struct task_struct *task)
{
if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
!wake_up_state(task, TASK_INTERRUPTIBLE))
kick_process(task);
}
#endif /* <linux/tracehook.h> */
|