From b76834bc1b6db0a0923eed85c81b1113021b0612 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 6 Dec 2010 11:16:25 -0600 Subject: kprobes: Use this_cpu_ops Use this_cpu ops in various places to optimize per cpu data access. Cc: Jason Baron Cc: Namhyung Kim Acked-by: H. Peter Anvin Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/kprobes.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e7d1b2e0070d..0c251e9f0507 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -303,12 +303,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ static inline struct kprobe *kprobe_running(void) { - return (__get_cpu_var(current_kprobe)); + return (__this_cpu_read(current_kprobe)); } static inline void reset_current_kprobe(void) { - __get_cpu_var(current_kprobe) = NULL; + __this_cpu_write(current_kprobe, NULL); } static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) -- cgit v1.2.3 From 909ea96468096b07fbb41aaf69be060d92bd9271 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 8 Dec 2010 16:22:55 +0100 Subject: core: Replace __get_cpu_var with __this_cpu_read if not used for an address. __get_cpu_var() can be replaced with this_cpu_read and will then use a single read instruction with implied address calculation to access the correct per cpu instance. However, the address of a per cpu variable passed to __this_cpu_read() cannot be determined (since it's an implied address conversion through segment prefixes). Therefore apply this only to uses of __get_cpu_var where the address of the variable is not used. Cc: Pekka Enberg Cc: Hugh Dickins Cc: Thomas Gleixner Acked-by: H. Peter Anvin Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/asm-generic/irq_regs.h | 8 ++++---- include/linux/elevator.h | 12 +++--------- include/linux/kernel_stat.h | 2 +- 3 files changed, 8 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h index 5ae1d07d4a12..6bf9355fa7eb 100644 --- a/include/asm-generic/irq_regs.h +++ b/include/asm-generic/irq_regs.h @@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs); static inline struct pt_regs *get_irq_regs(void) { - return __get_cpu_var(__irq_regs); + return __this_cpu_read(__irq_regs); } static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) { - struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs); + struct pt_regs *old_regs; - old_regs = *pp_regs; - *pp_regs = new_regs; + old_regs = __this_cpu_read(__irq_regs); + __this_cpu_write(__irq_regs, new_regs); return old_regs; } diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4fd978e7eb83..4d857973d2c9 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -195,15 +195,9 @@ enum { /* * io context count accounting */ -#define elv_ioc_count_mod(name, __val) \ - do { \ - preempt_disable(); \ - __get_cpu_var(name) += (__val); \ - preempt_enable(); \ - } while (0) - -#define elv_ioc_count_inc(name) elv_ioc_count_mod(name, 1) -#define elv_ioc_count_dec(name) elv_ioc_count_mod(name, -1) +#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val) +#define elv_ioc_count_inc(name) this_cpu_inc(name) +#define elv_ioc_count_dec(name) this_cpu_dec(name) #define elv_ioc_count_read(name) \ ({ \ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index ad54c846911b..44e83ba12b5b 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void); #ifndef CONFIG_GENERIC_HARDIRQS #define kstat_irqs_this_cpu(irq) \ - (kstat_this_cpu.irqs[irq]) + (this_cpu_read(kstat.irqs[irq]) struct irq_desc; -- cgit v1.2.3 From a663ffff1d2e94a7c549a37d08ed9169ce83bdd6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 6 Dec 2010 11:39:59 -0600 Subject: percpu: Generic support for this_cpu_add, sub, dec, inc_return Introduce generic support for this_cpu_add_return etc. The fallback is to realize these operations with simpler __this_cpu_ops. tj: - Reformatted __cpu_size_call_return2() to make it more consistent with its neighbors. - Dropped unnecessary temp variable ret__ from __this_cpu_generic_add_return(). Reviewed-by: Tejun Heo Reviewed-by: Mathieu Desnoyers Acked-by: H. Peter Anvin Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/percpu.h | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5095b834a6fb..4d593defc47d 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void); pscr_ret__; \ }) +#define __pcpu_size_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + #define __pcpu_size_call(stem, variable, ...) \ do { \ __verify_pcpu_ptr(&(variable)); \ @@ -529,6 +544,62 @@ do { \ # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) #endif +#define _this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) ret__; \ + preempt_disable(); \ + __this_cpu_add(pcp, val); \ + ret__ = __this_cpu_read(pcp); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_add_return +# ifndef this_cpu_add_return_1 +# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_2 +# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_4 +# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_8 +# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#endif + +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + +#define __this_cpu_generic_add_return(pcp, val) \ +({ \ + __this_cpu_add(pcp, val); \ + __this_cpu_read(pcp); \ +}) + +#ifndef __this_cpu_add_return +# ifndef __this_cpu_add_return_1 +# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# ifndef __this_cpu_add_return_2 +# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# ifndef __this_cpu_add_return_4 +# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# ifndef __this_cpu_add_return_8 +# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val) +# endif +# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#endif + +#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + /* * IRQ safe versions of the per cpu RMW operations. Note that these operations * are *not* safe against modification of the same variable from another -- cgit v1.2.3 From cfb824349556904b319464139be5c75fce983b0d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 6 Dec 2010 11:40:03 -0600 Subject: highmem: Use this_cpu_xx_return() operations Use this_cpu operations to optimize access primitives for highmem. The main effect is the avoidance of address calculations through the use of a segment prefix. V3->V4 - kmap_atomic_idx: Do not return a value. - Use __this_cpu_dec without HIGHMEM_DEBUG Cc: Peter Zijlstra Cc: Catalin Marinas Acked-by: H. Peter Anvin Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/highmem.h | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/highmem.h b/include/linux/highmem.h index b676c585574e..3a93f73a8acc 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -81,7 +81,8 @@ DECLARE_PER_CPU(int, __kmap_atomic_idx); static inline int kmap_atomic_idx_push(void) { - int idx = __get_cpu_var(__kmap_atomic_idx)++; + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(in_irq() && !irqs_disabled()); BUG_ON(idx > KM_TYPE_NR); @@ -91,16 +92,18 @@ static inline int kmap_atomic_idx_push(void) static inline int kmap_atomic_idx(void) { - return __get_cpu_var(__kmap_atomic_idx) - 1; + return __this_cpu_read(__kmap_atomic_idx) - 1; } -static inline int kmap_atomic_idx_pop(void) +static inline void kmap_atomic_idx_pop(void) { - int idx = --__get_cpu_var(__kmap_atomic_idx); #ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + BUG_ON(idx < 0); +#else + __this_cpu_dec(__kmap_atomic_idx); #endif - return idx; } #endif -- cgit v1.2.3 From 403047754cf690b012369b8fb563b738b88086e6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 17 Dec 2010 15:47:04 +0100 Subject: percpu,x86: relocate this_cpu_add_return() and friends - include/linux/percpu.h: this_cpu_add_return() and friends were located next to __this_cpu_add_return(). However, the overall organization is to first group by preemption safeness. Relocate this_cpu_add_return() and friends to preemption-safe area. - arch/x86/include/asm/percpu.h: Relocate percpu_add_return_op() after other more basic operations. Relocate [__]this_cpu_add_return_8() so that they're first grouped by preemption safeness. Signed-off-by: Tejun Heo Cc: Christoph Lameter --- include/linux/percpu.h | 60 +++++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4d593defc47d..3484e88d93f8 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -417,6 +417,36 @@ do { \ # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) #endif +#define _this_cpu_generic_add_return(pcp, val) \ +({ \ + typeof(pcp) ret__; \ + preempt_disable(); \ + __this_cpu_add(pcp, val); \ + ret__ = __this_cpu_read(pcp); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_add_return +# ifndef this_cpu_add_return_1 +# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_2 +# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_4 +# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# ifndef this_cpu_add_return_8 +# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) +# endif +# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#endif + +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + /* * Generic percpu operations that do not require preemption handling. * Either we do not care about races or the caller has the @@ -544,36 +574,6 @@ do { \ # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) #endif -#define _this_cpu_generic_add_return(pcp, val) \ -({ \ - typeof(pcp) ret__; \ - preempt_disable(); \ - __this_cpu_add(pcp, val); \ - ret__ = __this_cpu_read(pcp); \ - preempt_enable(); \ - ret__; \ -}) - -#ifndef this_cpu_add_return -# ifndef this_cpu_add_return_1 -# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_2 -# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_4 -# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_8 -# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) -#endif - -#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val)) -#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) -#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) - #define __this_cpu_generic_add_return(pcp, val) \ ({ \ __this_cpu_add(pcp, val); \ -- cgit v1.2.3 From 2b7124428561c7c3cfa4a58cc4c6feea53f3148e Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sat, 18 Dec 2010 15:54:04 +0100 Subject: percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support Generic code to provide new per cpu atomic features this_cpu_cmpxchg this_cpu_xchg Fallback occurs to functions using interrupts disable/enable to ensure correct per cpu atomicity. Fallback to regular cmpxchg and xchg is not possible since per cpu atomic semantics include the guarantee that the current cpus per cpu data is accessed atomically. Use of regular cmpxchg and xchg requires the determination of the address of the per cpu data before regular cmpxchg or xchg which therefore cannot be atomically included in an xchg or cmpxchg without segment override. tj: - Relocated new ops to conform better to the general organization. - This patch contains a trivial comment fix. Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- include/linux/percpu.h | 134 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 133 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3484e88d93f8..27c3c6fcfad3 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -447,6 +447,59 @@ do { \ #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) +#define _this_cpu_generic_xchg(pcp, nval) \ +({ typeof(pcp) ret__; \ + preempt_disable(); \ + ret__ = __this_cpu_read(pcp); \ + __this_cpu_write(pcp, nval); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_xchg +# ifndef this_cpu_xchg_1 +# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef this_cpu_xchg_2 +# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef this_cpu_xchg_4 +# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef this_cpu_xchg_8 +# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) +# endif +# define this_cpu_xchg(pcp, nval) \ + __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) +#endif + +#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ typeof(pcp) ret__; \ + preempt_disable(); \ + ret__ = __this_cpu_read(pcp); \ + if (ret__ == (oval)) \ + __this_cpu_write(pcp, nval); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_cmpxchg +# ifndef this_cpu_cmpxchg_1 +# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef this_cpu_cmpxchg_2 +# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef this_cpu_cmpxchg_4 +# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef this_cpu_cmpxchg_8 +# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# define this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) +#endif + /* * Generic percpu operations that do not require preemption handling. * Either we do not care about races or the caller has the @@ -600,11 +653,61 @@ do { \ #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) +#define __this_cpu_generic_xchg(pcp, nval) \ +({ typeof(pcp) ret__; \ + ret__ = __this_cpu_read(pcp); \ + __this_cpu_write(pcp, nval); \ + ret__; \ +}) + +#ifndef __this_cpu_xchg +# ifndef __this_cpu_xchg_1 +# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef __this_cpu_xchg_2 +# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef __this_cpu_xchg_4 +# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# ifndef __this_cpu_xchg_8 +# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval) +# endif +# define __this_cpu_xchg(pcp, nval) \ + __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval) +#endif + +#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) ret__; \ + ret__ = __this_cpu_read(pcp); \ + if (ret__ == (oval)) \ + __this_cpu_write(pcp, nval); \ + ret__; \ +}) + +#ifndef __this_cpu_cmpxchg +# ifndef __this_cpu_cmpxchg_1 +# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef __this_cpu_cmpxchg_2 +# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef __this_cpu_cmpxchg_4 +# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef __this_cpu_cmpxchg_8 +# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# define __this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval) +#endif + /* * IRQ safe versions of the per cpu RMW operations. Note that these operations * are *not* safe against modification of the same variable from another * processors (which one gets when using regular atomic operations) - . They are guaranteed to be atomic vs. local interrupts and + * They are guaranteed to be atomic vs. local interrupts and * preemption only. */ #define irqsafe_cpu_generic_to_op(pcp, val, op) \ @@ -691,4 +794,33 @@ do { \ # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) #endif +#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \ +({ \ + typeof(pcp) ret__; \ + unsigned long flags; \ + local_irq_save(flags); \ + ret__ = __this_cpu_read(pcp); \ + if (ret__ == (oval)) \ + __this_cpu_write(pcp, nval); \ + local_irq_restore(flags); \ + ret__; \ +}) + +#ifndef irqsafe_cpu_cmpxchg +# ifndef irqsafe_cpu_cmpxchg_1 +# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef irqsafe_cpu_cmpxchg_2 +# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef irqsafe_cpu_cmpxchg_4 +# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# ifndef irqsafe_cpu_cmpxchg_8 +# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) +# endif +# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval) +#endif + #endif /* __LINUX_PERCPU_H */ -- cgit v1.2.3