summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h24
-rw-r--r--arch/powerpc/include/asm/highmem.h57
-rw-r--r--arch/powerpc/include/asm/hw_irq.h20
-rw-r--r--arch/powerpc/include/asm/perf_counter.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h3
-rw-r--r--arch/powerpc/include/asm/rtas.h5
7 files changed, 40 insertions, 73 deletions
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 2ff798744c1d..7685ffde8821 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -598,8 +598,6 @@ typedef struct risc_timer_pram {
#define CICR_IEN ((uint)0x00000080) /* Int. enable */
#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
-#define IMAP_ADDR (get_immrbase())
-
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
#define CPM_PIN_PRIMARY 0
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3d9e887c3c0c..b44aaabdd1a6 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
size, direction);
}
@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle,
0, size, direction);
}
@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle,
offset, size, direction);
}
@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
size, direction);
}
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 684a73f4324f..a74c4ee6c020 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -22,9 +22,7 @@
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/highmem.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_atomic_prot(struct page *page, enum km_type type,
+ pgprot_t prot);
+extern void kunmap_atomic(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page)
{
@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page)
kunmap_high(page);
}
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
-{
- unsigned int idx;
- unsigned long vaddr;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- debug_kmap_atomic(type);
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
- local_flush_tlb_page(NULL, vaddr);
-
- return (void*) vaddr;
-}
-
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
return kmap_atomic_prot(page, type, kmap_prot);
}
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- return;
- }
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
-#endif
- pagefault_enable();
-}
-
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
+
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 867ab8ed69b3..8b505eaaa38a 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
#if defined(CONFIG_BOOKE)
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
#else
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) mtmsr(flags)
+#define raw_local_irq_restore(flags) mtmsr(flags)
#endif
-static inline void local_irq_disable(void)
+static inline void raw_local_irq_disable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 0": : :"memory");
@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
#endif
}
-static inline void local_irq_enable(void)
+static inline void raw_local_irq_enable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 1": : :"memory");
@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
#endif
}
-static inline void local_irq_save_ptr(unsigned long *flags)
+static inline void raw_local_irq_save_ptr(unsigned long *flags)
{
unsigned long msr;
msr = mfmsr();
@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#endif
}
-#define local_save_flags(flags) ((flags) = mfmsr())
-#define local_irq_save(flags) local_irq_save_ptr(&flags)
-#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_local_save_flags(flags) ((flags) = mfmsr())
+#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
+#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
-#define hard_irq_enable() local_irq_enable()
-#define hard_irq_disable() local_irq_disable()
+#define hard_irq_disable() raw_local_irq_disable()
static inline int irqs_disabled_flags(unsigned long flags)
{
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index 8ccd4e155768..0ea0639fcf75 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -61,6 +61,8 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+#define PERF_COUNTER_INDEX_OFFSET 1
+
/*
* Only override the default definitions in include/linux/perf_counter.h
* if we have hardware PMU support.
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index e05d26fa372f..82b72207c51c 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,8 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), pte_val(*((p) + PTRS_PER_PTE)) })
+ (e), ((e) & _PAGE_COMBO) ? \
+ (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define __rpte_to_pte(r) ((r).pte)
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c12339b304..168fce726201 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff);
}
+extern void __cpuinit rtas_give_timebase(void);
+extern void __cpuinit rtas_take_timebase(void);
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */