summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/processor.h1
-rw-r--r--include/asm-i386/apic.h9
-rw-r--r--include/asm-i386/hpet.h16
-rw-r--r--include/asm-i386/i8253.h15
-rw-r--r--include/asm-i386/mach-default/do_timer.h78
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h27
-rw-r--r--include/asm-i386/mpspec.h1
-rw-r--r--include/asm-i386/msr.h3
-rw-r--r--include/asm-i386/tsc.h49
-rw-r--r--include/asm-x86_64/hpet.h7
-rw-r--r--include/asm-x86_64/proto.h6
-rw-r--r--include/asm-x86_64/timex.h35
-rw-r--r--include/asm-x86_64/tsc.h66
-rw-r--r--include/asm-x86_64/vsyscall.h29
-rw-r--r--include/linux/acpi_pmtmr.h38
-rw-r--r--include/linux/agp_backend.h5
-rw-r--r--include/linux/clockchips.h142
-rw-r--r--include/linux/clocksource.h39
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/hardirq.h9
-rw-r--r--include/linux/hrtimer.h260
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/irq.h52
-rw-r--r--include/linux/jiffies.h222
-rw-r--r--include/linux/ktime.h3
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs4_acl.h9
-rw-r--r--include/linux/tick.h109
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timer.h66
-rw-r--r--include/linux/timex.h7
31 files changed, 849 insertions, 474 deletions
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 7798d2a9f793..916c0102db5b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -79,6 +79,7 @@ struct acpi_processor_power {
u32 bm_activity;
int count;
struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
+ int timer_broadcast_on_state;
};
/* Performance Management */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 3a61206fd108..cc6b1652249a 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -95,9 +95,7 @@ static inline void ack_APIC_irq(void)
apic_write_around(APIC_EOI, 0);
}
-extern void (*wait_timer_tick)(void);
-
-extern int get_maxlvt(void);
+extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC (void);
extern void disconnect_bsp_APIC (int virt_wire_setup);
@@ -113,14 +111,9 @@ extern void smp_local_timer_interrupt (void);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern int APIC_init_uniprocessor (void);
-extern void disable_APIC_timer(void);
-extern void enable_APIC_timer(void);
extern void enable_NMI_through_LVT0 (void * dummy);
-void smp_send_timer_broadcast_ipi(void);
-void switch_APIC_timer_to_ipi(void *cpumask);
-void switch_ipi_to_APIC_timer(void *cpumask);
#define ARCH_APICTIMER_STOPS_ON_C3 1
extern int timer_over_8254;
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index e47be9a56cc2..fc03cf9de5c4 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -90,16 +90,19 @@
#define HPET_MIN_PERIOD (100000UL)
#define HPET_TICK_RATE (HZ * 100000UL)
-extern unsigned long hpet_tick; /* hpet clks count per tick */
extern unsigned long hpet_address; /* hpet memory map physical address */
-extern int hpet_use_timer;
+extern int is_hpet_enabled(void);
+#ifdef CONFIG_X86_64
+extern unsigned long hpet_tick; /* hpet clks count per tick */
+extern int hpet_use_timer;
extern int hpet_rtc_timer_init(void);
extern int hpet_enable(void);
-extern int hpet_reenable(void);
-extern int is_hpet_enabled(void);
extern int is_hpet_capable(void);
extern int hpet_readl(unsigned long a);
+#else
+extern int hpet_enable(void);
+#endif
#ifdef CONFIG_HPET_EMULATE_RTC
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
@@ -110,5 +113,10 @@ extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
#endif /* CONFIG_HPET_EMULATE_RTC */
+
+#else
+
+static inline int hpet_enable(void) { return 0; }
+
#endif /* CONFIG_HPET_TIMER */
#endif /* _I386_HPET_H */
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 015d8df07690..6cb0dd4dcdde 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -1,6 +1,21 @@
#ifndef __ASM_I8253_H__
#define __ASM_I8253_H__
+#include <linux/clockchips.h>
+
extern spinlock_t i8253_lock;
+extern struct clock_event_device *global_clock_event;
+
+/**
+ * pit_interrupt_hook - hook into timer tick
+ * @regs: standard registers from interrupt
+ *
+ * Call the global clock event handler.
+ **/
+static inline void pit_interrupt_hook(void)
+{
+ global_clock_event->event_handler(global_clock_event);
+}
+
#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 7d606e3364ae..56e5689863ae 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -1,86 +1,16 @@
/* defines for inline arch setup functions */
+#include <linux/clockchips.h>
-#include <asm/apic.h>
#include <asm/i8259.h>
+#include <asm/i8253.h>
/**
* do_timer_interrupt_hook - hook into timer tick
- * @regs: standard registers from interrupt
*
- * Description:
- * This hook is called immediately after the timer interrupt is ack'd.
- * It's primary purpose is to allow architectures that don't possess
- * individual per CPU clocks (like the CPU APICs supply) to broadcast the
- * timer interrupt as a means of triggering reschedules etc.
+ * Call the pit clock event handler. see asm/i8253.h
**/
static inline void do_timer_interrupt_hook(void)
{
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode_vm(get_irq_regs()));
-#endif
-/*
- * In the SMP case we use the local APIC timer interrupt to do the
- * profiling, except when we simulate SMP mode on a uniprocessor
- * system, in that case we have to call the local interrupt handler.
- */
-#ifndef CONFIG_X86_LOCAL_APIC
- profile_tick(CPU_PROFILING);
-#else
- if (!using_apic_timer)
- smp_local_timer_interrupt();
-#endif
-}
-
-
-/* you can safely undefine this if you don't have the Neptune chipset */
-
-#define BUGGY_NEPTUN_TIMER
-
-/**
- * do_timer_overflow - process a detected timer overflow condition
- * @count: hardware timer interrupt count on overflow
- *
- * Description:
- * This call is invoked when the jiffies count has not incremented but
- * the hardware timer interrupt has. It means that a timer tick interrupt
- * came along while the previous one was pending, thus a tick was missed
- **/
-static inline int do_timer_overflow(int count)
-{
- int i;
-
- spin_lock(&i8259A_lock);
- /*
- * This is tricky when I/O APICs are used;
- * see do_timer_interrupt().
- */
- i = inb(0x20);
- spin_unlock(&i8259A_lock);
-
- /* assumption about timer being IRQ0 */
- if (i & 0x01) {
- /*
- * We cannot detect lost timer interrupts ...
- * well, that's why we call them lost, don't we? :)
- * [hmm, on the Pentium and Alpha we can ... sort of]
- */
- count -= LATCH;
- } else {
-#ifdef BUGGY_NEPTUN_TIMER
- /*
- * for the Neptun bug we know that the 'latch'
- * command doesn't latch the high and low value
- * of the counter atomically. Thus we have to
- * substract 256 from the counter
- * ... funny, isnt it? :)
- */
-
- count -= 256;
-#else
- printk("do_slow_gettimeoffset(): hardware timer problem?\n");
-#endif
- }
- return count;
+ pit_interrupt_hook();
}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 04e69c104a74..60f9dcc15d54 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -1,25 +1,18 @@
/* defines for inline arch setup functions */
+#include <linux/clockchips.h>
+
#include <asm/voyager.h>
+#include <asm/i8253.h>
+/**
+ * do_timer_interrupt_hook - hook into timer tick
+ * @regs: standard registers from interrupt
+ *
+ * Call the pit clock event handler. see asm/i8253.h
+ **/
static inline void do_timer_interrupt_hook(void)
{
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode_vm(irq_regs));
-#endif
-
+ pit_interrupt_hook();
voyager_timer_interrupt();
}
-static inline int do_timer_overflow(int count)
-{
- /* can't read the ISR, just assume 1 tick
- overflow */
- if(count > LATCH || count < 0) {
- printk(KERN_ERR "VOYAGER PROBLEM: count is %d, latch is %d\n", count, LATCH);
- count = LATCH;
- }
- count -= LATCH;
-
- return count;
-}
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 770bf6da8c3d..f21349399d14 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -23,7 +23,6 @@ extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
extern int pic_mode;
-extern int using_apic_timer;
#ifdef CONFIG_ACPI
extern void mp_register_lapic (u8 id, u8 enabled);
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 609a3899475c..6db40d0583f1 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -307,4 +307,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+/* Geode defined MSRs */
+#define MSR_GEODE_BUSCONT_CONF0 0x1900
+
#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index c13933185c1c..e997891cc7cc 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -1,48 +1 @@
-/*
- * linux/include/asm-i386/tsc.h
- *
- * i386 TSC related functions
- */
-#ifndef _ASM_i386_TSC_H
-#define _ASM_i386_TSC_H
-
-#include <asm/processor.h>
-
-/*
- * Standard way to access the cycle counter on i586+ CPUs.
- * Currently only used on SMP.
- *
- * If you really have a SMP machine with i486 chips or older,
- * compile for that, and this will just always return zero.
- * That's ok, it just means that the nicer scheduling heuristics
- * won't work for you.
- *
- * We only use the low 32 bits, and we'd simply better make sure
- * that we reschedule before that wraps. Scheduling at least every
- * four billion cycles just basically sounds like a good idea,
- * regardless of how fast the machine is.
- */
-typedef unsigned long long cycles_t;
-
-extern unsigned int cpu_khz;
-extern unsigned int tsc_khz;
-
-static inline cycles_t get_cycles(void)
-{
- unsigned long long ret = 0;
-
-#ifndef CONFIG_X86_TSC
- if (!cpu_has_tsc)
- return 0;
-#endif
-
-#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
- rdtscll(ret);
-#endif
- return ret;
-}
-
-extern void tsc_init(void);
-extern void mark_tsc_unstable(void);
-
-#endif
+#include <asm-x86_64/tsc.h>
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index b39098408b69..59a66f084611 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -56,8 +56,15 @@
extern int is_hpet_enabled(void);
extern int hpet_rtc_timer_init(void);
extern int apic_is_clustered_box(void);
+extern int hpet_arch_init(void);
+extern int hpet_timer_stop_set_go(unsigned long tick);
+extern int hpet_reenable(void);
+extern unsigned int hpet_calibrate_tsc(void);
extern int hpet_use_timer;
+extern unsigned long hpet_address;
+extern unsigned long hpet_period;
+extern unsigned long hpet_tick;
#ifdef CONFIG_HPET_EMULATE_RTC
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index a6d2ff5c69b7..f54f3abf93ce 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -45,11 +45,7 @@ extern u32 pmtmr_ioport;
#else
#define pmtmr_ioport 0
#endif
-extern unsigned long long monotonic_base;
-extern int sysctl_vsyscall;
extern int nohpet;
-extern unsigned long vxtime_hz;
-extern void time_init_gtod(void);
extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
@@ -91,8 +87,6 @@ extern void check_efer(void);
extern int unhandled_signal(struct task_struct *tsk, int sig);
-extern int unsynchronized_tsc(void);
-
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern unsigned long table_start, table_end;
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index b9e5320b7625..8c6808a3fba4 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -12,38 +12,21 @@
#include <asm/hpet.h>
#include <asm/system.h>
#include <asm/processor.h>
+#include <asm/tsc.h>
#include <linux/compiler.h>
#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
- unsigned long long ret;
-
- rdtscll(ret);
- return ret;
-}
-
-/* Like get_cycles, but make sure the CPU is synchronized. */
-static __always_inline cycles_t get_cycles_sync(void)
-{
- unsigned long long ret;
- unsigned eax;
- /* Don't do an additional sync on CPUs where we know
- RDTSC is already synchronous. */
- alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
- "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
- rdtscll(ret);
- return ret;
-}
-
-extern unsigned int cpu_khz;
-
extern int read_current_timer(unsigned long *timer_value);
#define ARCH_HAS_READ_CURRENT_TIMER 1
-extern struct vxtime_data vxtime;
+#define USEC_PER_TICK (USEC_PER_SEC / HZ)
+#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
+#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
+
+#define NS_SCALE 10 /* 2^10, carefully chosen */
+#define US_SCALE 32 /* 2^32, arbitralrily chosen */
+extern void mark_tsc_unstable(void);
+extern void set_cyc2ns_scale(unsigned long khz);
#endif
diff --git a/include/asm-x86_64/tsc.h b/include/asm-x86_64/tsc.h
new file mode 100644
index 000000000000..9a0a368852c7
--- /dev/null
+++ b/include/asm-x86_64/tsc.h
@@ -0,0 +1,66 @@
+/*
+ * linux/include/asm-x86_64/tsc.h
+ *
+ * x86_64 TSC related functions
+ */
+#ifndef _ASM_x86_64_TSC_H
+#define _ASM_x86_64_TSC_H
+
+#include <asm/processor.h>
+
+/*
+ * Standard way to access the cycle counter.
+ */
+typedef unsigned long long cycles_t;
+
+extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+
+static inline cycles_t get_cycles(void)
+{
+ unsigned long long ret = 0;
+
+#ifndef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ return 0;
+#endif
+
+#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
+ rdtscll(ret);
+#endif
+ return ret;
+}
+
+/* Like get_cycles, but make sure the CPU is synchronized. */
+static __always_inline cycles_t get_cycles_sync(void)
+{
+ unsigned long long ret;
+#ifdef X86_FEATURE_SYNC_RDTSC
+ unsigned eax;
+
+ /*
+ * Don't do an additional sync on CPUs where we know
+ * RDTSC is already synchronous:
+ */
+ alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC,
+ "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+#else
+ sync_core();
+#endif
+ rdtscll(ret);
+
+ return ret;
+}
+
+extern void tsc_init(void);
+extern void mark_tsc_unstable(void);
+extern int unsynchronized_tsc(void);
+
+/*
+ * Boot-time check whether the TSCs are synchronized across
+ * all CPUs/cores:
+ */
+extern void check_tsc_sync_source(int cpu);
+extern void check_tsc_sync_target(void);
+
+#endif
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 0c7847165eae..82b4afe65c91 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -16,46 +16,27 @@ enum vsyscall_num {
#ifdef __KERNEL__
#include <linux/seqlock.h>
-#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
-#define __section_sys_tz __attribute__ ((unused, __section__ (".sys_tz"), aligned(16)))
-#define __section_sysctl_vsyscall __attribute__ ((unused, __section__ (".sysctl_vsyscall"), aligned(16)))
-#define __section_xtime __attribute__ ((unused, __section__ (".xtime"), aligned(16)))
-#define __section_xtime_lock __attribute__ ((unused, __section__ (".xtime_lock"), aligned(16)))
-#define VXTIME_TSC 1
-#define VXTIME_HPET 2
-#define VXTIME_PMTMR 3
+/* Definitions for CONFIG_GENERIC_TIME definitions */
+#define __section_vsyscall_gtod_data __attribute__ \
+ ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
+#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
#define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2
-struct vxtime_data {
- long hpet_address; /* HPET base address */
- int last;
- unsigned long last_tsc;
- long quot;
- long tsc_quot;
- int mode;
-};
-
#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
-/* vsyscall space (readonly) */
-extern struct vxtime_data __vxtime;
extern int __vgetcpu_mode;
-extern struct timespec __xtime;
extern volatile unsigned long __jiffies;
-extern struct timezone __sys_tz;
-extern seqlock_t __xtime_lock;
/* kernel space (writeable) */
-extern struct vxtime_data vxtime;
extern int vgetcpu_mode;
extern struct timezone sys_tz;
-extern int sysctl_vsyscall;
+extern struct vsyscall_gtod_data_t vsyscall_gtod_data;
#endif /* __KERNEL__ */
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
new file mode 100644
index 000000000000..1d0ef1ae8036
--- /dev/null
+++ b/include/linux/acpi_pmtmr.h
@@ -0,0 +1,38 @@
+#ifndef _ACPI_PMTMR_H_
+#define _ACPI_PMTMR_H_
+
+#include <linux/clocksource.h>
+
+/* Number of PMTMR ticks expected during calibration run */
+#define PMTMR_TICKS_PER_SEC 3579545
+
+/* limit it to 24 bits */
+#define ACPI_PM_MASK CLOCKSOURCE_MASK(24)
+
+/* Overrun value */
+#define ACPI_PM_OVRRUN (1<<24)
+
+#ifdef CONFIG_X86_PM_TIMER
+
+extern u32 acpi_pm_read_verified(void);
+extern u32 pmtmr_ioport;
+
+static inline u32 acpi_pm_read_early(void)
+{
+ if (!pmtmr_ioport)
+ return 0;
+ /* mask the output to 24 bits */
+ return acpi_pm_read_verified() & ACPI_PM_MASK;
+}
+
+#else
+
+static inline u32 acpi_pm_read_early(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif
+
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index a5c8bb5d80ba..abc521cfb084 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -87,10 +87,15 @@ struct agp_memory {
u32 physical;
u8 is_bound;
u8 is_flushed;
+ u8 vmalloc_flag;
};
#define AGP_NORMAL_MEMORY 0
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+
extern struct agp_bridge_data *agp_bridge;
extern struct list_head agp_bridges;
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
new file mode 100644
index 000000000000..4ea7e7bcfafe
--- /dev/null
+++ b/include/linux/clockchips.h
@@ -0,0 +1,142 @@
+/* linux/include/linux/clockchips.h
+ *
+ * This file contains the structure definitions for clockchips.
+ *
+ * If you are not a clockchip, or the time of day code, you should
+ * not be including this file!
+ */
+#ifndef _LINUX_CLOCKCHIPS_H
+#define _LINUX_CLOCKCHIPS_H
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+#include <linux/clocksource.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
+#include <linux/notifier.h>
+
+struct clock_event_device;
+
+/* Clock event mode commands */
+enum clock_event_mode {
+ CLOCK_EVT_MODE_UNUSED = 0,
+ CLOCK_EVT_MODE_SHUTDOWN,
+ CLOCK_EVT_MODE_PERIODIC,
+ CLOCK_EVT_MODE_ONESHOT,
+};
+
+/* Clock event notification values */
+enum clock_event_nofitiers {
+ CLOCK_EVT_NOTIFY_ADD,
+ CLOCK_EVT_NOTIFY_BROADCAST_ON,
+ CLOCK_EVT_NOTIFY_BROADCAST_OFF,
+ CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+ CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+ CLOCK_EVT_NOTIFY_SUSPEND,
+ CLOCK_EVT_NOTIFY_RESUME,
+ CLOCK_EVT_NOTIFY_CPU_DEAD,
+};
+
+/*
+ * Clock event features
+ */
+#define CLOCK_EVT_FEAT_PERIODIC 0x000001
+#define CLOCK_EVT_FEAT_ONESHOT 0x000002
+/*
+ * x86(64) specific misfeatures:
+ *
+ * - Clockevent source stops in C3 State and needs broadcast support.
+ * - Local APIC timer is used as a dummy device.
+ */
+#define CLOCK_EVT_FEAT_C3STOP 0x000004
+#define CLOCK_EVT_FEAT_DUMMY 0x000008
+
+/**
+ * struct clock_event_device - clock event device descriptor
+ * @name: ptr to clock event name
+ * @hints: usage hints
+ * @max_delta_ns: maximum delta value in ns
+ * @min_delta_ns: minimum delta value in ns
+ * @mult: nanosecond to cycles multiplier
+ * @shift: nanoseconds to cycles divisor (power of two)
+ * @rating: variable to rate clock event devices
+ * @irq: irq number (only for non cpu local devices)
+ * @cpumask: cpumask to indicate for which cpus this device works
+ * @set_next_event: set next event
+ * @set_mode: set mode function
+ * @evthandler: Assigned by the framework to be called by the low
+ * level handler of the event source
+ * @broadcast: function to broadcast events
+ * @list: list head for the management code
+ * @mode: operating mode assigned by the management code
+ * @next_event: local storage for the next event in oneshot mode
+ */
+struct clock_event_device {
+ const char *name;
+ unsigned int features;
+ unsigned long max_delta_ns;
+ unsigned long min_delta_ns;
+ unsigned long mult;
+ int shift;
+ int rating;
+ int irq;
+ cpumask_t cpumask;
+ int (*set_next_event)(unsigned long evt,
+ struct clock_event_device *);
+ void (*set_mode)(enum clock_event_mode mode,
+ struct clock_event_device *);
+ void (*event_handler)(struct clock_event_device *);
+ void (*broadcast)(cpumask_t mask);
+ struct list_head list;
+ enum clock_event_mode mode;
+ ktime_t next_event;
+};
+
+/*
+ * Calculate a multiplication factor for scaled math, which is used to convert
+ * nanoseconds based values to clock ticks:
+ *
+ * clock_ticks = (nanoseconds * factor) >> shift.
+ *
+ * div_sc is the rearranged equation to calculate a factor from a given clock
+ * ticks / nanoseconds ratio:
+ *
+ * factor = (clock_ticks << shift) / nanoseconds
+ */
+static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec,
+ int shift)
+{
+ uint64_t tmp = ((uint64_t)ticks) << shift;
+
+ do_div(tmp, nsec);
+ return (unsigned long) tmp;
+}
+
+/* Clock event layer functions */
+extern unsigned long clockevent_delta2ns(unsigned long latch,
+ struct clock_event_device *evt);
+extern void clockevents_register_device(struct clock_event_device *dev);
+
+extern void clockevents_exchange_device(struct clock_event_device *old,
+ struct clock_event_device *new);
+extern
+struct clock_event_device *clockevents_request_device(unsigned int features,
+ cpumask_t cpumask);
+extern void clockevents_release_device(struct clock_event_device *dev);
+extern void clockevents_set_mode(struct clock_event_device *dev,
+ enum clock_event_mode mode);
+extern int clockevents_register_notifier(struct notifier_block *nb);
+extern void clockevents_unregister_notifier(struct notifier_block *nb);
+extern int clockevents_program_event(struct clock_event_device *dev,
+ ktime_t expires, ktime_t now);
+
+extern void clockevents_notify(unsigned long reason, void *arg);
+
+#else
+
+static inline void clockevents_resume_events(void) { }
+#define clockevents_notify(reason, arg) do { } while (0)
+
+#endif
+
+#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 1622d23a8dc3..daa4940cc0f1 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -12,11 +12,13 @@
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/list.h>
+#include <linux/timer.h>
#include <asm/div64.h>
#include <asm/io.h>
/* clocksource cycle base type */
typedef u64 cycle_t;
+struct clocksource;
/**
* struct clocksource - hardware abstraction for a free running counter
@@ -44,8 +46,8 @@ typedef u64 cycle_t;
* subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
- * @update_callback: called when safe to alter clocksource values
- * @is_continuous: defines if clocksource is free-running.
+ * @flags: flags describing special properties
+ * @vread: vsyscall based read
* @cycle_interval: Used internally by timekeeping core, please ignore.
* @xtime_interval: Used internally by timekeeping core, please ignore.
*/
@@ -57,15 +59,30 @@ struct clocksource {
cycle_t mask;
u32 mult;
u32 shift;
- int (*update_callback)(void);
- int is_continuous;
+ unsigned long flags;
+ cycle_t (*vread)(void);
/* timekeeping specific data, ignore */
cycle_t cycle_last, cycle_interval;
u64 xtime_nsec, xtime_interval;
s64 error;
+
+#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
+ /* Watchdog related data, used by the framework */
+ struct list_head wd_list;
+ cycle_t wd_last;
+#endif
};
+/*
+ * Clock source flags bits::
+ */
+#define CLOCK_SOURCE_IS_CONTINUOUS 0x01
+#define CLOCK_SOURCE_MUST_VERIFY 0x02
+
+#define CLOCK_SOURCE_WATCHDOG 0x10
+#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
+
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1)
@@ -178,8 +195,16 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
/* used to install a new clocksource */
-int clocksource_register(struct clocksource*);
-void clocksource_reselect(void);
-struct clocksource* clocksource_get_next(void);
+extern int clocksource_register(struct clocksource*);
+extern struct clocksource* clocksource_get_next(void);
+extern void clocksource_change_rating(struct clocksource *cs, int rating);
+
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
+#else
+static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
+{
+}
+#endif
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7f008f6bfdc3..0899e2cdcdd1 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -84,9 +84,6 @@ struct cpufreq_policy {
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
- struct mutex lock; /* CPU ->setpolicy or ->target may
- only be called once a time */
-
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
@@ -172,11 +169,16 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
-extern int cpufreq_driver_getavg(struct cpufreq_policy *policy);
+extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int lock_policy_rwsem_read(int cpu);
+int lock_policy_rwsem_write(int cpu);
+void unlock_policy_rwsem_read(int cpu);
+void unlock_policy_rwsem_write(int cpu);
+
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 612472aaa79c..7803014f3a11 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -106,7 +106,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
* always balanced, so the interrupted value of ->hardirq_context
* will always be restored.
*/
-#define irq_enter() \
+#define __irq_enter() \
do { \
account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \
@@ -114,6 +114,11 @@ static inline void account_system_vtime(struct task_struct *tsk)
} while (0)
/*
+ * Enter irq context (on NO_HZ, update jiffies):
+ */
+extern void irq_enter(void);
+
+/*
* Exit irq context without processing softirqs:
*/
#define __irq_exit() \
@@ -128,7 +133,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
*/
extern void irq_exit(void);
-#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
+#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0)
#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fca93025ab51..37f9279192a9 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -21,22 +21,72 @@
#include <linux/list.h>
#include <linux/wait.h>
+struct hrtimer_clock_base;
+struct hrtimer_cpu_base;
+
/*
* Mode arguments of xxx_hrtimer functions:
*/
enum hrtimer_mode {
- HRTIMER_ABS, /* Time value is absolute */
- HRTIMER_REL, /* Time value is relative to now */
+ HRTIMER_MODE_ABS, /* Time value is absolute */
+ HRTIMER_MODE_REL, /* Time value is relative to now */
};
+/*
+ * Return values for the callback function
+ */
enum hrtimer_restart {
- HRTIMER_NORESTART,
- HRTIMER_RESTART,
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
};
-#define HRTIMER_INACTIVE ((void *)1UL)
+/*
+ * hrtimer callback modes:
+ *
+ * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
+ * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
+ * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
+ * does not restart the timer
+ * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in softirq context
+ * Special mode for tick emultation
+ */
+enum hrtimer_cb_mode {
+ HRTIMER_CB_SOFTIRQ,
+ HRTIMER_CB_IRQSAFE,
+ HRTIMER_CB_IRQSAFE_NO_RESTART,
+ HRTIMER_CB_IRQSAFE_NO_SOFTIRQ,
+};
-struct hrtimer_base;
+/*
+ * Values to track state of the timer
+ *
+ * Possible states:
+ *
+ * 0x00 inactive
+ * 0x01 enqueued into rbtree
+ * 0x02 callback function running
+ * 0x04 callback pending (high resolution mode)
+ *
+ * Special case:
+ * 0x03 callback function running and enqueued
+ * (was requeued on another CPU)
+ * The "callback function running and enqueued" status is only possible on
+ * SMP. It happens for example when a posix timer expired and the callback
+ * queued a signal. Between dropping the lock which protects the posix timer
+ * and reacquiring the base lock of the hrtimer, another CPU can deliver the
+ * signal and rearm the timer. We have to preserve the callback running state,
+ * as otherwise the timer could be removed before the softirq code finishes the
+ * the handling of the timer.
+ *
+ * The HRTIMER_STATE_ENQUEUE bit is always or'ed to the current state to
+ * preserve the HRTIMER_STATE_CALLBACK bit in the above scenario.
+ *
+ * All state transitions are protected by cpu_base->lock.
+ */
+#define HRTIMER_STATE_INACTIVE 0x00
+#define HRTIMER_STATE_ENQUEUED 0x01
+#define HRTIMER_STATE_CALLBACK 0x02
+#define HRTIMER_STATE_PENDING 0x04
/**
* struct hrtimer - the basic hrtimer structure
@@ -46,14 +96,34 @@ struct hrtimer_base;
* which the timer is based.
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @cb_mode: high resolution timer feature to select the callback execution
+ * mode
+ * @cb_entry: list head to enqueue an expired timer into the callback list
+ * @start_site: timer statistics field to store the site where the timer
+ * was started
+ * @start_comm: timer statistics field to store the name of the process which
+ * started the timer
+ * @start_pid: timer statistics field to store the pid of the task which
+ * started the timer
*
- * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE()
+ * The hrtimer structure must be initialized by hrtimer_init()
*/
struct hrtimer {
- struct rb_node node;
- ktime_t expires;
- int (*function)(struct hrtimer *);
- struct hrtimer_base *base;
+ struct rb_node node;
+ ktime_t expires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ unsigned long state;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ enum hrtimer_cb_mode cb_mode;
+ struct list_head cb_entry;
+#endif
+#ifdef CONFIG_TIMER_STATS
+ void *start_site;
+ char start_comm[16];
+ int start_pid;
+#endif
};
/**
@@ -70,37 +140,114 @@ struct hrtimer_sleeper {
/**
* struct hrtimer_base - the timer base for a specific clock
- * @index: clock type index for per_cpu support when moving a timer
- * to a base on another cpu.
- * @lock: lock protecting the base and associated timers
+ * @index: clock type index for per_cpu support when moving a
+ * timer to a base on another cpu.
* @active: red black tree root node for the active timers
* @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
* @get_softirq_time: function to retrieve the current time from the softirq
- * @curr_timer: the timer which is executing a callback right now
* @softirq_time: the time when running the hrtimer queue in the softirq
- * @lock_key: the lock_class_key for use with lockdep
+ * @cb_pending: list of timers where the callback is pending
+ * @offset: offset of this clock to the monotonic base
+ * @reprogram: function to reprogram the timer event
*/
-struct hrtimer_base {
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
clockid_t index;
- spinlock_t lock;
struct rb_root active;
struct rb_node *first;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t (*get_softirq_time)(void);
- struct hrtimer *curr_timer;
ktime_t softirq_time;
- struct lock_class_key lock_key;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t offset;
+ int (*reprogram)(struct hrtimer *t,
+ struct hrtimer_clock_base *b,
+ ktime_t n);
+#endif
+};
+
+#define HRTIMER_MAX_CLOCK_BASES 2
+
+/*
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @lock_key: the lock_class_key for use with lockdep
+ * @clock_base: array of clock bases for this cpu
+ * @curr_timer: the timer which is executing a callback right now
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @hres_active: State of high resolution mode
+ * @check_clocks: Indictator, when set evaluate time source and clock
+ * event devices whether high resolution mode can be
+ * activated.
+ * @cb_pending: Expired timers are moved from the rbtree to this
+ * list in the timer interrupt. The list is processed
+ * in the softirq.
+ * @nr_events: Total number of timer interrupt events
+ */
+struct hrtimer_cpu_base {
+ spinlock_t lock;
+ struct lock_class_key lock_key;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+#ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires_next;
+ int hres_active;
+ struct list_head cb_pending;
+ unsigned long nr_events;
+#endif
};
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void clock_was_set(void);
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
+/*
+ * In high resolution mode the time reference must be read accurate
+ */
+static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+{
+ return timer->base->get_time();
+}
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+# define KTIME_HIGH_RES (ktime_t) { .tv64 = 1 }
+# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
+#else
+
+# define KTIME_MONOTONIC_RES KTIME_LOW_RES
+
/*
* clock_was_set() is a NOP for non- high-resolution systems. The
* time-sorted order guarantees that a timer does not expire early and
* is expired in the next softirq when the clock was advanced.
*/
-#define clock_was_set() do { } while (0)
+static inline void clock_was_set(void) { }
+
+/*
+ * In non high resolution mode the time reference is taken from
+ * the base softirq time variable.
+ */
+static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+{
+ return timer->base->softirq_time;
+}
+
+#endif
+
+extern ktime_t ktime_get(void);
+extern ktime_t ktime_get_real(void);
/* Exported timer functions: */
@@ -114,19 +261,33 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
-#define hrtimer_restart(timer) hrtimer_start((timer), (timer)->expires, HRTIMER_ABS)
+static inline int hrtimer_restart(struct hrtimer *timer)
+{
+ return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+}
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-#ifdef CONFIG_NO_IDLE_HZ
extern ktime_t hrtimer_get_next_event(void);
-#endif
+/*
+ * A timer is active, when it is enqueued into the rbtree or the callback
+ * function is running.
+ */
static inline int hrtimer_active(const struct hrtimer *timer)
{
- return rb_parent(&timer->node) != &timer->node;
+ return timer->state != HRTIMER_STATE_INACTIVE;
+}
+
+/*
+ * Helper function to check, whether the timer is on one of the queues
+ */
+static inline int hrtimer_is_queued(struct hrtimer *timer)
+{
+ return timer->state &
+ (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
}
/* Forward a hrtimer so it expires after now: */
@@ -149,4 +310,53 @@ extern void hrtimer_run_queues(void);
/* Bootup initialization: */
extern void __init hrtimers_init(void);
+#if BITS_PER_LONG < 64
+extern unsigned long ktime_divns(const ktime_t kt, s64 div);
+#else /* BITS_PER_LONG < 64 */
+# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
+#endif
+
+/* Show pending timers: */
+extern void sysrq_timer_list_show(void);
+
+/*
+ * Timer-statistics info:
+ */
+#ifdef CONFIG_TIMER_STATS
+
+extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ void *timerf, char * comm);
+
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+ timer->function, timer->start_comm);
+}
+
+extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
+ void *addr);
+
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
+{
+ __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
+}
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+ timer->start_site = NULL;
+}
+#else
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+}
+
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
+{
+}
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5a8ba0b8ccba..e5ea1411050b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -42,6 +42,8 @@
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
+ * IRQF_PERCPU - Interrupt is per cpu
+ * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -49,6 +51,7 @@
#define IRQF_PROBE_SHARED 0x00000100
#define IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
+#define IRQF_NOBALANCING 0x00000800
/*
* Migration helpers. Scheduled for removal in 1/2007
@@ -239,6 +242,9 @@ enum
BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
+#ifdef CONFIG_HIGH_RES_TIMERS
+ HRTIMER_SOFTIRQ,
+#endif
};
/* softirq mask and active fields moved to irq_cpustat_t in
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 5504b671357f..1939d42c21d2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -31,7 +31,7 @@ typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
/*
* IRQ line status.
*
- * Bits 0-16 are reserved for the IRQF_* bits in linux/interrupt.h
+ * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h
*
* IRQ types
*/
@@ -45,28 +45,30 @@ typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */
/* Internal flags */
-#define IRQ_INPROGRESS 0x00010000 /* IRQ handler active - do not enter! */
-#define IRQ_DISABLED 0x00020000 /* IRQ disabled - do not enter! */
-#define IRQ_PENDING 0x00040000 /* IRQ pending - replay on enable */
-#define IRQ_REPLAY 0x00080000 /* IRQ has been replayed but not acked yet */
-#define IRQ_AUTODETECT 0x00100000 /* IRQ is being autodetected */
-#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */
-#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */
-#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */
-#define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
+#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */
+#define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */
+#define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */
+#define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */
+#define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */
+#define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */
+#define IRQ_LEVEL 0x00004000 /* IRQ level triggered */
+#define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */
+#define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */
+#define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */
+#define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */
+#define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */
+#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */
+#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
+#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
+
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
+# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
#else
# define CHECK_IRQ_PER_CPU(var) 0
+# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
#endif
-#define IRQ_NOPROBE 0x02000000 /* IRQ is not valid for probing */
-#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */
-#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
-#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
-#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
-#define IRQ_MOVE_PENDING 0x40000000 /* need to re-target IRQ destination */
-
struct proc_dir_entry;
struct msi_desc;
@@ -127,6 +129,7 @@ struct irq_chip {
*
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @chip: low level interrupt hardware access
+ * @msi_desc: MSI descriptor
* @handler_data: per-IRQ data for the irq_chip methods
* @chip_data: platform-specific per-chip private data for the chip
* methods, to allow shared chip implementations
@@ -235,11 +238,21 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
#endif /* CONFIG_GENERIC_PENDING_IRQ */
+extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern int irq_can_set_affinity(unsigned int irq);
+
#else /* CONFIG_SMP */
#define move_native_irq(x)
#define move_masked_irq(x)
+static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+ return -EINVAL;
+}
+
+static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
+
#endif /* CONFIG_SMP */
#ifdef CONFIG_IRQBALANCE
@@ -261,6 +274,11 @@ static inline int select_smp_affinity(unsigned int irq)
extern int no_irq_affinity;
+static inline int irq_balancing_disabled(unsigned int irq)
+{
+ return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
+}
+
/* Handle irq action chains: */
extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 0ec6e28bccd2..c080f61fb024 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -142,13 +142,13 @@ static inline u64 get_jiffies_64(void)
*
* And some not so obvious.
*
- * Note that we don't want to return MAX_LONG, because
+ * Note that we don't want to return LONG_MAX, because
* for various timeout reasons we often end up having
* to wait "jiffies+1" in order to guarantee that we wait
* at _least_ "jiffies" - so "jiffies+1" had better still
* be positive.
*/
-#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
+#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
/*
* We want to do realistic conversions of time so we need to use the same
@@ -259,207 +259,23 @@ static inline u64 get_jiffies_64(void)
#endif
/*
- * Convert jiffies to milliseconds and back.
- *
- * Avoid unnecessary multiplications/divisions in the
- * two most common HZ cases:
- */
-static inline unsigned int jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (MSEC_PER_SEC / HZ) * j;
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
-#else
- return (j * MSEC_PER_SEC) / HZ;
-#endif
-}
-
-static inline unsigned int jiffies_to_usecs(const unsigned long j)
-{
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
- return (USEC_PER_SEC / HZ) * j;
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
- return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
-#else
- return (j * USEC_PER_SEC) / HZ;
-#endif
-}
-
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
-{
- if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return m * (HZ / MSEC_PER_SEC);
-#else
- return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
-#endif
-}
-
-static inline unsigned long usecs_to_jiffies(const unsigned int u)
-{
- if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
- return MAX_JIFFY_OFFSET;
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
- return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
- return u * (HZ / USEC_PER_SEC);
-#else
- return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC;
-#endif
-}
-
-/*
- * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
- * that a remainder subtract here would not do the right thing as the
- * resolution values don't fall on second boundries. I.e. the line:
- * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
- *
- * Rather, we just shift the bits off the right.
- *
- * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
- * value to a scaled second value.
- */
-static __inline__ unsigned long
-timespec_to_jiffies(const struct timespec *value)
-{
- unsigned long sec = value->tv_sec;
- long nsec = value->tv_nsec + TICK_NSEC - 1;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- nsec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)nsec * NSEC_CONVERSION) >>
- (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-
-}
-
-static __inline__ void
-jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
-}
-
-/* Same for "timeval"
- *
- * Well, almost. The problem here is that the real system resolution is
- * in nanoseconds and the value being converted is in micro seconds.
- * Also for some machines (those that use HZ = 1024, in-particular),
- * there is a LARGE error in the tick size in microseconds.
-
- * The solution we use is to do the rounding AFTER we convert the
- * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
- * Instruction wise, this should cost only an additional add with carry
- * instruction above the way it was done above.
- */
-static __inline__ unsigned long
-timeval_to_jiffies(const struct timeval *value)
-{
- unsigned long sec = value->tv_sec;
- long usec = value->tv_usec;
-
- if (sec >= MAX_SEC_IN_JIFFIES){
- sec = MAX_SEC_IN_JIFFIES;
- usec = 0;
- }
- return (((u64)sec * SEC_CONVERSION) +
- (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
- (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-}
-
-static __inline__ void
-jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
-{
- /*
- * Convert jiffies to nanoseconds and separate with
- * one divide.
- */
- u64 nsec = (u64)jiffies * TICK_NSEC;
- long tv_usec;
-
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
- tv_usec /= NSEC_PER_USEC;
- value->tv_usec = tv_usec;
-}
-
-/*
- * Convert jiffies/jiffies_64 to clock_t and back.
+ * Convert various time units to each other:
*/
-static inline clock_t jiffies_to_clock_t(long x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- return x / (HZ / USER_HZ);
-#else
- u64 tmp = (u64)x * TICK_NSEC;
- do_div(tmp, (NSEC_PER_SEC / USER_HZ));
- return (long)tmp;
-#endif
-}
-
-static inline unsigned long clock_t_to_jiffies(unsigned long x)
-{
-#if (HZ % USER_HZ)==0
- if (x >= ~0UL / (HZ / USER_HZ))
- return ~0UL;
- return x * (HZ / USER_HZ);
-#else
- u64 jif;
-
- /* Don't worry about loss of precision here .. */
- if (x >= ~0UL / HZ * USER_HZ)
- return ~0UL;
-
- /* .. but do try to contain it here */
- jif = x * (u64) HZ;
- do_div(jif, USER_HZ);
- return jif;
-#endif
-}
-
-static inline u64 jiffies_64_to_clock_t(u64 x)
-{
-#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
- do_div(x, HZ / USER_HZ);
-#else
- /*
- * There are better ways that don't overflow early,
- * but even this doesn't overflow in hundreds of years
- * in 64 bits, so..
- */
- x *= TICK_NSEC;
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#endif
- return x;
-}
-
-static inline u64 nsec_to_clock_t(u64 x)
-{
-#if (NSEC_PER_SEC % USER_HZ) == 0
- do_div(x, (NSEC_PER_SEC / USER_HZ));
-#elif (USER_HZ % 512) == 0
- x *= USER_HZ/512;
- do_div(x, (NSEC_PER_SEC / 512));
-#else
- /*
- * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
- * overflow after 64.99 years.
- * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
- */
- x *= 9;
- do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2))
- / USER_HZ));
-#endif
- return x;
-}
+extern unsigned int jiffies_to_msecs(const unsigned long j);
+extern unsigned int jiffies_to_usecs(const unsigned long j);
+extern unsigned long msecs_to_jiffies(const unsigned int m);
+extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long timespec_to_jiffies(const struct timespec *value);
+extern void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value);
+extern unsigned long timeval_to_jiffies(const struct timeval *value);
+extern void jiffies_to_timeval(const unsigned long jiffies,
+ struct timeval *value);
+extern clock_t jiffies_to_clock_t(long x);
+extern unsigned long clock_t_to_jiffies(unsigned long x);
+extern u64 jiffies_64_to_clock_t(u64 x);
+extern u64 nsec_to_clock_t(u64 x);
+
+#define TIMESTAMP_SIZE 30
#endif
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 7444a6326231..c68c7ac6b232 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -261,8 +261,7 @@ static inline s64 ktime_to_ns(const ktime_t kt)
* idea of the (in)accuracy of timers. Timer values are rounded up to
* this resolution values.
*/
-#define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC }
-#define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC }
+#define KTIME_LOW_RES (ktime_t){ .tv64 = TICK_NSEC }
/* Get the monotonic time in timespec format: */
extern void ktime_get_ts(struct timespec *ts);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index db05182ca0e8..1be5be88debe 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -105,12 +105,11 @@ struct nfs4_ace {
uint32_t access_mask;
int whotype;
uid_t who;
- struct list_head l_ace;
};
struct nfs4_acl {
uint32_t naces;
- struct list_head ace_head;
+ struct nfs4_ace aces[0];
};
typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h
index 22aff4d01f20..409b6e02f337 100644
--- a/include/linux/nfs4_acl.h
+++ b/include/linux/nfs4_acl.h
@@ -39,9 +39,12 @@
#include <linux/posix_acl.h>
-struct nfs4_acl *nfs4_acl_new(void);
-void nfs4_acl_free(struct nfs4_acl *);
-int nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
+/* Maximum ACL we'll accept from client; chosen (somewhat arbitrarily) to
+ * fit in a page: */
+#define NFS4_ACL_MAX 170
+
+struct nfs4_acl *nfs4_acl_new(int);
+void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
int nfs4_acl_get_whotype(char *, u32);
int nfs4_acl_write_who(int who, char *p);
int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
diff --git a/include/linux/tick.h b/include/linux/tick.h
new file mode 100644
index 000000000000..9a7252e089b9
--- /dev/null
+++ b/include/linux/tick.h
@@ -0,0 +1,109 @@
+/* linux/include/linux/tick.h
+ *
+ * This file contains the structure definitions for tick related functions
+ *
+ */
+#ifndef _LINUX_TICK_H
+#define _LINUX_TICK_H
+
+#include <linux/clockchips.h>
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+enum tick_device_mode {
+ TICKDEV_MODE_PERIODIC,
+ TICKDEV_MODE_ONESHOT,
+};
+
+struct tick_device {
+ struct clock_event_device *evtdev;
+ enum tick_device_mode mode;
+};
+
+enum tick_nohz_mode {
+ NOHZ_MODE_INACTIVE,
+ NOHZ_MODE_LOWRES,
+ NOHZ_MODE_HIGHRES,
+};
+
+/**
+ * struct tick_sched - sched tick emulation and no idle tick control/stats
+ * @sched_timer: hrtimer to schedule the periodic tick in high
+ * resolution mode
+ * @idle_tick: Store the last idle tick expiry time when the tick
+ * timer is modified for idle sleeps. This is necessary
+ * to resume the tick timer operation in the timeline
+ * when the CPU returns from idle
+ * @tick_stopped: Indicator that the idle tick has been stopped
+ * @idle_jiffies: jiffies at the entry to idle for idle time accounting
+ * @idle_calls: Total number of idle calls
+ * @idle_sleeps: Number of idle calls, where the sched tick was stopped
+ * @idle_entrytime: Time when the idle call was entered
+ * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
+ */
+struct tick_sched {
+ struct hrtimer sched_timer;
+ unsigned long check_clocks;
+ enum tick_nohz_mode nohz_mode;
+ ktime_t idle_tick;
+ int tick_stopped;
+ unsigned long idle_jiffies;
+ unsigned long idle_calls;
+ unsigned long idle_sleeps;
+ ktime_t idle_entrytime;
+ ktime_t idle_sleeptime;
+ unsigned long last_jiffies;
+ unsigned long next_jiffies;
+ ktime_t idle_expires;
+};
+
+extern void __init tick_init(void);
+extern int tick_is_oneshot_available(void);
+extern struct tick_device *tick_get_device(int cpu);
+
+# ifdef CONFIG_HIGH_RES_TIMERS
+extern int tick_init_highres(void);
+extern int tick_program_event(ktime_t expires, int force);
+extern void tick_setup_sched_timer(void);
+extern void tick_cancel_sched_timer(int cpu);
+# else
+static inline void tick_cancel_sched_timer(int cpu) { }
+# endif /* HIGHRES */
+
+# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern struct tick_device *tick_get_broadcast_device(void);
+extern cpumask_t *tick_get_broadcast_mask(void);
+
+# ifdef CONFIG_TICK_ONESHOT
+extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
+# endif
+
+# endif /* BROADCAST */
+
+# ifdef CONFIG_TICK_ONESHOT
+extern void tick_clock_notify(void);
+extern int tick_check_oneshot_change(int allow_nohz);
+extern struct tick_sched *tick_get_tick_sched(int cpu);
+# else
+static inline void tick_clock_notify(void) { }
+static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+# endif
+
+#else /* CONFIG_GENERIC_CLOCKEVENTS */
+static inline void tick_init(void) { }
+static inline void tick_cancel_sched_timer(int cpu) { }
+static inline void tick_clock_notify(void) { }
+static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+
+# ifdef CONFIG_NO_HZ
+extern void tick_nohz_stop_sched_tick(void);
+extern void tick_nohz_restart_sched_tick(void);
+extern void tick_nohz_update_jiffies(void);
+# else
+static inline void tick_nohz_stop_sched_tick(void) { }
+static inline void tick_nohz_restart_sched_tick(void) { }
+static inline void tick_nohz_update_jiffies(void) { }
+# endif /* !NO_HZ */
+
+#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index eceb1a59b078..8ea8dea713c7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -92,6 +92,7 @@ extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock __attribute__((weak));
+extern unsigned long read_persistent_clock(void);
void timekeeping_init(void);
static inline unsigned long get_seconds(void)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index fb5edaaf0ebd..719113b652dd 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -2,6 +2,7 @@
#define _LINUX_TIMER_H
#include <linux/list.h>
+#include <linux/ktime.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
@@ -15,6 +16,11 @@ struct timer_list {
unsigned long data;
struct tvec_t_base_s *base;
+#ifdef CONFIG_TIMER_STATS
+ void *start_site;
+ char start_comm[16];
+ int start_pid;
+#endif
};
extern struct tvec_t_base_s boot_tvec_bases;
@@ -61,7 +67,65 @@ extern int del_timer(struct timer_list * timer);
extern int __mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
+/*
+ * Return when the next timer-wheel timeout occurs (in absolute jiffies),
+ * locks the timer base:
+ */
extern unsigned long next_timer_interrupt(void);
+/*
+ * Return when the next timer-wheel timeout occurs (in absolute jiffies),
+ * locks the timer base and does the comparison against the given
+ * jiffie.
+ */
+extern unsigned long get_next_timer_interrupt(unsigned long now);
+
+/*
+ * Timer-statistics info:
+ */
+#ifdef CONFIG_TIMER_STATS
+
+extern void init_timer_stats(void);
+
+extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ void *timerf, char * comm);
+
+static inline void timer_stats_account_timer(struct timer_list *timer)
+{
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+ timer->function, timer->start_comm);
+}
+
+extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
+ void *addr);
+
+static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+ __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
+}
+
+static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+ timer->start_site = NULL;
+}
+#else
+static inline void init_timer_stats(void)
+{
+}
+
+static inline void timer_stats_account_timer(struct timer_list *timer)
+{
+}
+
+static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+}
+
+static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+}
+#endif
+
+extern void delayed_work_timer_fn(unsigned long __data);
/**
* add_timer - start a timer
@@ -96,7 +160,7 @@ static inline void add_timer(struct timer_list *timer)
extern void init_timers(void);
extern void run_local_timers(void);
struct hrtimer;
-extern int it_real_fn(struct hrtimer *);
+extern enum hrtimer_restart it_real_fn(struct hrtimer *);
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 9a24e500c311..da929dbbea2a 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -286,6 +286,13 @@ static inline void time_interpolator_update(long delta_nsec)
#define TICK_LENGTH_SHIFT 32
+#ifdef CONFIG_NO_HZ
+#define NTP_INTERVAL_FREQ (2)
+#else
+#define NTP_INTERVAL_FREQ (HZ)
+#endif
+#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
+
/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
extern u64 current_tick_length(void);