diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2017-01-31 04:09:23 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-02-01 09:13:49 +0100 |
commit | 5613fda9a503cd6137b120298902a34a1386b2c1 (patch) | |
tree | 5431dd50508a6cfa052a3b9966e58842b430804d /kernel/sys.c | |
parent | a1cecf2ba78e0a6de00ff99df34b662728535aa5 (diff) | |
download | linux-stable-5613fda9a503cd6137b120298902a34a1386b2c1.tar.gz linux-stable-5613fda9a503cd6137b120298902a34a1386b2c1.tar.bz2 linux-stable-5613fda9a503cd6137b120298902a34a1386b2c1.zip |
sched/cputime: Convert task/group cputime to nsecs
Now that most cputime readers use the transition API which return the
task cputime in old style cputime_t, we can safely store the cputime in
nsecs. This will eventually make cputime statistics less opaque and more
granular. Back and forth convertions between cputime_t and nsecs in order
to deal with cputime_t random granularity won't be needed anymore.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1485832191-26889-8-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sys.c')
-rw-r--r-- | kernel/sys.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sys.c b/kernel/sys.c index 842914ef7de4..7d4a9a6df956 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -881,15 +881,15 @@ SYSCALL_DEFINE0(getegid) void do_sys_times(struct tms *tms) { - cputime_t tgutime, tgstime, cutime, cstime; + u64 tgutime, tgstime, cutime, cstime; thread_group_cputime_adjusted(current, &tgutime, &tgstime); cutime = current->signal->cutime; cstime = current->signal->cstime; - tms->tms_utime = cputime_to_clock_t(tgutime); - tms->tms_stime = cputime_to_clock_t(tgstime); - tms->tms_cutime = cputime_to_clock_t(cutime); - tms->tms_cstime = cputime_to_clock_t(cstime); + tms->tms_utime = nsec_to_clock_t(tgutime); + tms->tms_stime = nsec_to_clock_t(tgstime); + tms->tms_cutime = nsec_to_clock_t(cutime); + tms->tms_cstime = nsec_to_clock_t(cstime); } SYSCALL_DEFINE1(times, struct tms __user *, tbuf) @@ -1544,7 +1544,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) { struct task_struct *t; unsigned long flags; - cputime_t tgutime, tgstime, utime, stime; + u64 tgutime, tgstime, utime, stime; unsigned long maxrss = 0; memset((char *)r, 0, sizeof (*r)); @@ -1600,8 +1600,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) unlock_task_sighand(p, &flags); out: - cputime_to_timeval(utime, &r->ru_utime); - cputime_to_timeval(stime, &r->ru_stime); + r->ru_utime = ns_to_timeval(utime); + r->ru_stime = ns_to_timeval(stime); if (who != RUSAGE_CHILDREN) { struct mm_struct *mm = get_task_mm(p); |