summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-02-05 13:10:33 +0100
committerIngo Molnar <mingo@kernel.org>2013-02-05 13:10:33 +0100
commitb2c77a57e4a0a7877e357dead7ee8acc19944f3e (patch)
treefa192b5a058711299c2a8ce2621df6c9bd8f3a99 /include/linux/sched.h
parentc3c186403c6abd32e719f005f0af950155a9e54d (diff)
parent6a61671bb2f3a1bd12cd17b8fca811a624782632 (diff)
downloadlinux-b2c77a57e4a0a7877e357dead7ee8acc19944f3e.tar.gz
linux-b2c77a57e4a0a7877e357dead7ee8acc19944f3e.tar.bz2
linux-b2c77a57e4a0a7877e357dead7ee8acc19944f3e.zip
Merge tag 'full-dynticks-cputime-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into sched/core
Pull full-dynticks (user-space execution is undisturbed and receives no timer IRQs) preparation changes that convert the cputime accounting code to be full-dynticks ready, from Frederic Weisbecker: "This implements the cputime accounting on full dynticks CPUs. Typical cputime stats infrastructure relies on the timer tick and its periodic polling on the CPU to account the amount of time spent by the CPUs and the tasks per high level domains such as userspace, kernelspace, guest, ... Now we are preparing to implement full dynticks capability on Linux for Real Time and HPC users who want full CPU isolation. This feature requires a cputime accounting that doesn't depend on the timer tick. To implement it, this new cputime infrastructure plugs into kernel/user/guest boundaries to take snapshots of cputime and flush these to the stats when needed. This performs pretty much like CONFIG_VIRT_CPU_ACCOUNTING except that context location and cputime snaphots are synchronized between write and read side such that the latter can safely retrieve the pending tickless cputime of a task and add it to its latest cputime snapshot to return the correct result to the user." Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h40
1 files changed, 40 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 924e42a8df58..719ee0815e3a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1369,6 +1369,15 @@ struct task_struct {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
struct cputime prev_cputime;
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ seqlock_t vtime_seqlock;
+ unsigned long long vtime_snap;
+ enum {
+ VTIME_SLEEPING = 0,
+ VTIME_USER,
+ VTIME_SYS,
+ } vtime_snap_whence;
+#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
struct timespec real_start_time; /* boot based time */
@@ -1793,6 +1802,37 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void task_cputime(struct task_struct *t,
+ cputime_t *utime, cputime_t *stime);
+extern void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled, cputime_t *stimescaled);
+extern cputime_t task_gtime(struct task_struct *t);
+#else
+static inline void task_cputime(struct task_struct *t,
+ cputime_t *utime, cputime_t *stime)
+{
+ if (utime)
+ *utime = t->utime;
+ if (stime)
+ *stime = t->stime;
+}
+
+static inline void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled,
+ cputime_t *stimescaled)
+{
+ if (utimescaled)
+ *utimescaled = t->utimescaled;
+ if (stimescaled)
+ *stimescaled = t->stimescaled;
+}
+
+static inline cputime_t task_gtime(struct task_struct *t)
+{
+ return t->gtime;
+}
+#endif
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);