From 9d40ee200a527ce08ab8c793ba8ae3e242edbb0e Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 7 Oct 2009 10:54:19 -0700 Subject: [IA64] Squeeze ticket locks back into 4 bytes. Linus pointed out that other people have spent large amounts of time and effort to optimize the layout of frequently used structures. Often these have embedded locks, and the assumption is that a lock takes 4 bytes. Linus also pointed out how to work with the limited options for atomic instructions on Itanium. Signed-off-by: Tony Luck --- arch/ia64/include/asm/spinlock.h | 45 ++++++++++++++++++++-------------- arch/ia64/include/asm/spinlock_types.h | 2 +- 2 files changed, 27 insertions(+), 20 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 30bb930e1111..4fa502739d64 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -25,61 +25,68 @@ * by atomically noting the tail and incrementing it by one (thus adding * ourself to the queue and noting our position), then waiting until the head * becomes equal to the the initial value of the tail. + * The pad bits in the middle are used to prevent the next_ticket number + * overflowing into the now_serving number. * - * 63 32 31 0 + * 31 17 16 15 14 0 * +----------------------------------------------------+ - * | next_ticket_number | now_serving | + * | now_serving | padding | next_ticket | * +----------------------------------------------------+ */ -#define TICKET_SHIFT 32 +#define TICKET_SHIFT 17 +#define TICKET_BITS 15 +#define TICKET_MASK ((1 << TICKET_BITS) - 1) static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { - int *p = (int *)&lock->lock, turn, now_serving; + int *p = (int *)&lock->lock, ticket, serve; - now_serving = *p; - turn = ia64_fetchadd(1, p+1, acq); + ticket = ia64_fetchadd(1, p, acq); - if (turn == now_serving) + if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) return; - do { + ia64_invala(); + + for (;;) { + asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); + + if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) + return; cpu_relax(); - } while (ACCESS_ONCE(*p) != turn); + } } static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) { - long tmp = ACCESS_ONCE(lock->lock), try; + int tmp = ACCESS_ONCE(lock->lock); - if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { - try = tmp + (1L << TICKET_SHIFT); - - return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; - } + if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) + return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; return 0; } static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) { - int *p = (int *)&lock->lock; + unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; - (void)ia64_fetchadd(1, p, rel); + asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); + ACCESS_ONCE(*p) = (tmp + 2) & ~1; } static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); - return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); + return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); - return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; + return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } static inline int __raw_spin_is_locked(raw_spinlock_t *lock) diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index b61d136d9bc2..474e46f1ab4a 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #endif typedef struct { - volatile unsigned long lock; + volatile unsigned int lock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } -- cgit v1.2.3 From 883a3acf5b0d4782ac35981227a0d094e8b44850 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 9 Oct 2009 10:52:39 -0700 Subject: [IA64] Re-implement spinaphores using ticket lock concepts Bound the wait time for the ptcg_sem by using similar idea to the ticket spin locks. In this case we have only one instance of a spinaphore, so make it 8 bytes rather than try to squeeze it into 4-bytes to keep the code simpler (and shorter). Signed-off-by: Tony Luck --- arch/ia64/mm/tlb.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index f426dc78d959..ee09d261f2e6 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm) * this primitive it can be moved up to a spinaphore.h header. */ struct spinaphore { - atomic_t cur; + unsigned long ticket; + unsigned long serve; }; static inline void spinaphore_init(struct spinaphore *ss, int val) { - atomic_set(&ss->cur, val); + ss->ticket = 0; + ss->serve = val; } static inline void down_spin(struct spinaphore *ss) { - while (unlikely(!atomic_add_unless(&ss->cur, -1, 0))) - while (atomic_read(&ss->cur) == 0) - cpu_relax(); + unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; + + if (time_before(t, ss->serve)) + return; + + ia64_invala(); + + for (;;) { + asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); + if (time_before(t, serve)) + return; + cpu_relax(); + } } static inline void up_spin(struct spinaphore *ss) { - atomic_add(1, &ss->cur); + ia64_fetchadd(1, &ss->serve, rel); } static struct spinaphore ptcg_sem; -- cgit v1.2.3 From 3b885787ea4112eaa80945999ea0901bf742707f Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Mon, 12 Oct 2009 13:26:31 -0700 Subject: net: Generalize socket rx gap / receive queue overflow cmsg Create a new socket level option to report number of queue overflows Recently I augmented the AF_PACKET protocol to report the number of frames lost on the socket receive queue between any two enqueued frames. This value was exported via a SOL_PACKET level cmsg. AFter I completed that work it was requested that this feature be generalized so that any datagram oriented socket could make use of this option. As such I've created this patch, It creates a new SOL_SOCKET level option called SO_RXQ_OVFL, which when enabled exports a SOL_SOCKET level cmsg that reports the nubmer of times the sk_receive_queue overflowed between any two given frames. It also augments the AF_PACKET protocol to take advantage of this new feature (as it previously did not touch sk->sk_drops, which this patch uses to record the overflow count). Tested successfully by me. Notes: 1) Unlike my previous patch, this patch simply records the sk_drops value, which is not a number of drops between packets, but rather a total number of drops. Deltas must be computed in user space. 2) While this patch currently works with datagram oriented protocols, it will also be accepted by non-datagram oriented protocols. I'm not sure if thats agreeable to everyone, but my argument in favor of doing so is that, for those protocols which aren't applicable to this option, sk_drops will always be zero, and reporting no drops on a receive queue that isn't used for those non-participating protocols seems reasonable to me. This also saves us having to code in a per-protocol opt in mechanism. 3) This applies cleanly to net-next assuming that commit 977750076d98c7ff6cbda51858bb5a5894a9d9ab (my af packet cmsg patch) is reverted Signed-off-by: Neil Horman Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/ia64/include/asm/socket.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 0b0d5ff062e5..51427eaa51ba 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h @@ -69,4 +69,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_IA64_SOCKET_H */ -- cgit v1.2.3 From a2e2725541fad72416326798c2d7fa4dafb7d337 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 12 Oct 2009 23:40:10 -0700 Subject: net: Introduce recvmmsg socket syscall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Meaning receive multiple messages, reducing the number of syscalls and net stack entry/exit operations. Next patches will introduce mechanisms where protocols that want to optimize this operation will provide an unlocked_recvmsg operation. This takes into account comments made by: . Paul Moore: sock_recvmsg is called only for the first datagram, sock_recvmsg_nosec is used for the rest. . Caitlin Bestler: recvmmsg now has a struct timespec timeout, that works in the same fashion as the ppoll one. If the underlying protocol returns a datagram with MSG_OOB set, this will make recvmmsg return right away with as many datagrams (+ the OOB one) it has received so far. . Rémi Denis-Courmont & Steven Whitehouse: If we receive N < vlen datagrams and then recvmsg returns an error, recvmmsg will return the successfully received datagrams, store the error and return it in the next call. This paves the way for a subsequent optimization, sk_prot->unlocked_recvmsg, where we will be able to acquire the lock only at batch start and end, not at every underlying recvmsg call. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- arch/ia64/kernel/entry.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index d0e7d37017b4..d75b872ca4dc 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1806,6 +1806,7 @@ sys_call_table: data8 sys_preadv data8 sys_pwritev // 1320 data8 sys_rt_tgsigqueueinfo + data8 sys_recvmmsg .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ -- cgit v1.2.3 From 29e4e025be52c0619b9dfe6faba29bc3deac6272 Mon Sep 17 00:00:00 2001 From: Takao Indoh Date: Thu, 1 Oct 2009 17:55:16 -0400 Subject: [IA64] Restore registers in the stack on INIT Registers are not saved anywhere when INIT comes during fsys mode and we cannot know what happened when we investigate vmcore captured by kdump. This patch adds new function finish_pt_regs() so registers can be saved in such a case. Signed-off-by: Takao Indoh Signed-off-by: Tony Luck --- arch/ia64/kernel/mca.c | 104 +++++++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 47 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index d2877a7bfe2e..496ac7a99488 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -887,6 +887,60 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) memcpy(current->comm, comm, sizeof(current->comm)); } +static void +finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, + unsigned long *nat) +{ + const u64 *bank; + + /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use + * pmsa_{xip,xpsr,xfs} + */ + if (ia64_psr(regs)->ic) { + regs->cr_iip = ms->pmsa_iip; + regs->cr_ipsr = ms->pmsa_ipsr; + regs->cr_ifs = ms->pmsa_ifs; + } else { + regs->cr_iip = ms->pmsa_xip; + regs->cr_ipsr = ms->pmsa_xpsr; + regs->cr_ifs = ms->pmsa_xfs; + } + regs->pr = ms->pmsa_pr; + regs->b0 = ms->pmsa_br0; + regs->ar_rsc = ms->pmsa_rsc; + copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, ®s->r1, nat); + copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, ®s->r2, nat); + copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, ®s->r3, nat); + copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, ®s->r8, nat); + copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, ®s->r9, nat); + copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, ®s->r10, nat); + copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, ®s->r11, nat); + copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, ®s->r12, nat); + copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, ®s->r13, nat); + copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, ®s->r14, nat); + copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, ®s->r15, nat); + if (ia64_psr(regs)->bn) + bank = ms->pmsa_bank1_gr; + else + bank = ms->pmsa_bank0_gr; + copy_reg(&bank[16-16], ms->pmsa_nat_bits, ®s->r16, nat); + copy_reg(&bank[17-16], ms->pmsa_nat_bits, ®s->r17, nat); + copy_reg(&bank[18-16], ms->pmsa_nat_bits, ®s->r18, nat); + copy_reg(&bank[19-16], ms->pmsa_nat_bits, ®s->r19, nat); + copy_reg(&bank[20-16], ms->pmsa_nat_bits, ®s->r20, nat); + copy_reg(&bank[21-16], ms->pmsa_nat_bits, ®s->r21, nat); + copy_reg(&bank[22-16], ms->pmsa_nat_bits, ®s->r22, nat); + copy_reg(&bank[23-16], ms->pmsa_nat_bits, ®s->r23, nat); + copy_reg(&bank[24-16], ms->pmsa_nat_bits, ®s->r24, nat); + copy_reg(&bank[25-16], ms->pmsa_nat_bits, ®s->r25, nat); + copy_reg(&bank[26-16], ms->pmsa_nat_bits, ®s->r26, nat); + copy_reg(&bank[27-16], ms->pmsa_nat_bits, ®s->r27, nat); + copy_reg(&bank[28-16], ms->pmsa_nat_bits, ®s->r28, nat); + copy_reg(&bank[29-16], ms->pmsa_nat_bits, ®s->r29, nat); + copy_reg(&bank[30-16], ms->pmsa_nat_bits, ®s->r30, nat); + copy_reg(&bank[31-16], ms->pmsa_nat_bits, ®s->r31, nat); +} + /* On entry to this routine, we are running on the per cpu stack, see * mca_asm.h. The original stack has not been touched by this event. Some of * the original stack's registers will be in the RBS on this stack. This stack @@ -921,7 +975,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 ar_bspstore = regs->ar_bspstore; u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); - const u64 *bank; const char *msg; int cpu = smp_processor_id(); @@ -1024,54 +1077,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, p = (char *)r12 - sizeof(*regs); old_regs = (struct pt_regs *)p; memcpy(old_regs, regs, sizeof(*regs)); - /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use - * pmsa_{xip,xpsr,xfs} - */ - if (ia64_psr(regs)->ic) { - old_regs->cr_iip = ms->pmsa_iip; - old_regs->cr_ipsr = ms->pmsa_ipsr; - old_regs->cr_ifs = ms->pmsa_ifs; - } else { - old_regs->cr_iip = ms->pmsa_xip; - old_regs->cr_ipsr = ms->pmsa_xpsr; - old_regs->cr_ifs = ms->pmsa_xfs; - } - old_regs->pr = ms->pmsa_pr; - old_regs->b0 = ms->pmsa_br0; old_regs->loadrs = loadrs; - old_regs->ar_rsc = ms->pmsa_rsc; old_unat = old_regs->ar_unat; - copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); - copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); - copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); - copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); - copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); - copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); - copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); - copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); - copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); - copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); - copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); - if (ia64_psr(old_regs)->bn) - bank = ms->pmsa_bank1_gr; - else - bank = ms->pmsa_bank0_gr; - copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); - copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); - copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); - copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); - copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); - copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); - copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); - copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); - copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); - copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); - copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); - copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); - copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); - copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); - copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); - copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); + finish_pt_regs(old_regs, ms, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and @@ -1141,6 +1149,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, no_mod: mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", smp_processor_id(), type, msg); + old_unat = regs->ar_unat; + finish_pt_regs(regs, ms, &old_unat); return previous_current; } -- cgit v1.2.3 From dec1798f81f7e8a299734bdc29197ae77bf08018 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Sat, 10 Oct 2009 22:21:59 +0200 Subject: [IA64] unsigned cannot be less than 0 in sn_hwperf_ioctl() struct sn_hwperf_ioctl_args member arg (u64) cannot be less than 0. Signed-off-by: Roel Kluin Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/sn/kernel/sn2/sn_hwperf.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 4c7e74790958..55ac3c4e11d2 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c @@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) break; case SN_HWPERF_GET_OBJ_NODE: - if (a.sz != sizeof(u64) || a.arg < 0) { + i = a.arg; + if (a.sz != sizeof(u64) || i < 0) { r = -EINVAL; goto error; } if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { - if (a.arg >= nobj) { + if (i >= nobj) { r = -EINVAL; vfree(objs); goto error; } - if (objs[(i = a.arg)].id != a.arg) { + if (objs[i].id != a.arg) { for (i = 0; i < nobj; i++) { if (objs[i].id == a.arg) break; -- cgit v1.2.3 From adcd740341dbd58eb94a8c2885c171ce9eb8677c Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 12 Oct 2009 08:24:30 -0600 Subject: [IA64] Require SAL 3.2 in order to do extended config space ops We had assumed that SAL firmware would return an error if it didn't understand extended config space. Unfortunately, the SAL on the SGI 750 doesn't do that, it panics the machine. So, condition the extended PCI config space accesses on SAL revision 3.2. Signed-off-by: Matthew Wilcox Tested-by: Brad Spengler Signed-off-by: Tony Luck --- arch/ia64/pci/pci.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7de76dd352fe..c0fca2c1c858 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn, if ((seg | reg) <= 255) { addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); mode = 0; - } else { + } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); mode = 1; + } else { + return -EINVAL; } + result = ia64_sal_pci_config_read(addr, mode, len, &data); if (result != 0) return -EINVAL; @@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn, if ((seg | reg) <= 255) { addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); mode = 0; - } else { + } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); mode = 1; + } else { + return -EINVAL; } result = ia64_sal_pci_config_write(addr, mode, len, value); if (result != 0) -- cgit v1.2.3 From 54f8dd3c99052456a65bd26aa3313f57cd145a73 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 18 Sep 2009 12:48:12 -0700 Subject: [IA64] use printk_once() unaligned.c/io_common.c Use printk_once() in a couple of places. Signed-off-by: Marcin Slusarz Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/kernel/unaligned.c | 6 ++---- arch/ia64/sn/kernel/io_common.c | 8 ++------ 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 6db08599ebbc..776dd40397e2 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len) */ int no_unaligned_warning; int unaligned_dump_stack; -static int noprint_warning; /* * For M-unit: @@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) /* watch for command names containing %s */ printk(KERN_WARNING "%s", buf); } else { - if (no_unaligned_warning && !noprint_warning) { - noprint_warning = 1; - printk(KERN_WARNING "%s(%d) encountered an " + if (no_unaligned_warning) { + printk_once(KERN_WARNING "%s(%d) encountered an " "unaligned exception which required\n" "kernel assistance, which degrades " "the performance of the application.\n" diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 25831c47c579..308e6595110e 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev) * Additionally note that the struct sn_flush_device_war also has to be * removed from arch/ia64/sn/include/xtalk/hubdev.h */ -static u8 war_implemented = 0; static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, struct sn_flush_device_common *common) @@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, struct sn_flush_device_war *dev_entry; struct ia64_sal_retval isrv = {0,0,0,0}; - if (!war_implemented) { - printk(KERN_WARNING "PROM version < 4.50 -- implementing old " - "PROM flush WAR\n"); - war_implemented = 1; - } + printk_once(KERN_WARNING + "PROM version < 4.50 -- implementing old PROM flush WAR\n"); war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); BUG_ON(!war_list); -- cgit v1.2.3 From 1502f08edc040b6ba4b986454416564088995e79 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 12 Oct 2009 09:51:41 -0700 Subject: [IA64] SMT friendly version of spin_unlock_wait() We can be kinder to SMT systems in spin_unlock_wait. Signed-off-by: Tony Luck --- arch/ia64/include/asm/spinlock.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 4fa502739d64..239ecdc9516d 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -75,6 +75,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } +static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +{ + int *p = (int *)&lock->lock, ticket; + + ia64_invala(); + + for (;;) { + asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory"); + if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) + return; + cpu_relax(); + } +} + static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); @@ -123,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) - cpu_relax(); + __ticket_spin_unlock_wait(lock); } #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -- cgit v1.2.3 From 67ca0e5fa884f483d655ef19f22ad8509138dd3e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 14 Oct 2009 13:11:30 -0700 Subject: ia64: Fix up the syscall table for recvmmsg Reported-by: "Tony Luck" Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Tony Luck Signed-off-by: David S. Miller --- arch/ia64/include/asm/unistd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 5a5347f5c4e4..9c72e36c5281 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -311,11 +311,12 @@ #define __NR_preadv 1319 #define __NR_pwritev 1320 #define __NR_rt_tgsigqueueinfo 1321 +#define __NR_rt_recvmmsg 1322 #ifdef __KERNEL__ -#define NR_syscalls 298 /* length of syscall table */ +#define NR_syscalls 299 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about -- cgit v1.2.3 From b94b08081fcecf83fa690d6c5664f6316fe72208 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 14 Oct 2009 15:10:03 -0700 Subject: [IA64] fix percpu warnings Fix percpu types warning in ia64/sn: arch/ia64/sn/kernel/setup.c:74: error: conflicting types for '__pcpu_scope___sn_cnodeid_to_nasid' arch/ia64/include/asm/sn/arch.h:74: error: previous declaration of '__pcpu_scope___sn_cnodeid_to_nasid' was here Signed-off-by: Randy Dunlap Cc: Jes Sorensen Acked-by: Tejun Heo Signed-off-by: Tony Luck --- arch/ia64/include/asm/sn/arch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h index 7caa1f44cd95..f5f493b0c077 100644 --- a/arch/ia64/include/asm/sn/arch.h +++ b/arch/ia64/include/asm/sn/arch.h @@ -71,7 +71,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); * Compact node ID to nasid mappings kept in the per-cpu data areas of each * cpu. */ -DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); +DECLARE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) -- cgit v1.2.3 From e8c93fc7b7221b6ac7e7ddbd0e21e205bf9e801a Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 2 Nov 2009 09:23:08 -0800 Subject: Revert "[IA64] fix percpu warnings" This reverts commit b94b08081fcecf83fa690d6c5664f6316fe72208. genksyms currently cannot handle complicated types for exported percpu variables. Drop this patch for now as it prevents a module from being loaded on sn2 systems: xpc: no symbol version for per_cpu____sn_cnodeid_to_nasid xpc: Unknown symbol per_cpu____sn_cnodeid_to_nasid Signed-off-by: Tony Luck --- arch/ia64/include/asm/sn/arch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h index f5f493b0c077..7caa1f44cd95 100644 --- a/arch/ia64/include/asm/sn/arch.h +++ b/arch/ia64/include/asm/sn/arch.h @@ -71,7 +71,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); * Compact node ID to nasid mappings kept in the per-cpu data areas of each * cpu. */ -DECLARE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); +DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) -- cgit v1.2.3 From ac1aa47b131416a6ff37eb1005a0a1d2541aad6c Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Mon, 26 Oct 2009 13:20:44 -0700 Subject: PCI: determine CLS more intelligently Till now, CLS has been determined either by arch code or as L1_CACHE_BYTES. Only x86 and ia64 set CLS explicitly and x86 doesn't always get it right. On most configurations, the chance is that firmware configures the correct value during boot. This patch makes pci_init() determine CLS by looking at what firmware has configured. It scans all devices and if all non-zero values agree, the value is used. If none is configured or there is a disagreement, pci_dfl_cache_line_size is used. arch can set the dfl value (via PCI_CACHE_LINE_BYTES or pci_dfl_cache_line_size) or override the actual one. ia64, x86 and sparc64 updated to set the default cls instead of the actual one. While at it, declare pci_cache_line_size and pci_dfl_cache_line_size in pci.h and drop private declarations from arch code. Signed-off-by: Tejun Heo Acked-by: David Miller Acked-by: Greg KH Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Tony Luck Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index c0fca2c1c858..d60e7195b7dd 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -720,9 +720,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) return ret; } -/* It's defined in drivers/pci/pci.c */ -extern u8 pci_cache_line_size; - /** * set_pci_cacheline_size - determine cacheline size for PCI devices * @@ -731,7 +728,7 @@ extern u8 pci_cache_line_size; * * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). */ -static void __init set_pci_cacheline_size(void) +static void __init set_pci_dfl_cacheline_size(void) { unsigned long levels, unique_caches; long status; @@ -751,7 +748,7 @@ static void __init set_pci_cacheline_size(void) "(status=%ld)\n", __func__, status); return; } - pci_cache_line_size = (1 << cci.pcci_line_size) / 4; + pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; } u64 ia64_dma_get_required_mask(struct device *dev) @@ -782,7 +779,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask); static int __init pcibios_init(void) { - set_pci_cacheline_size(); + set_pci_dfl_cacheline_size(); return 0; } -- cgit v1.2.3 From 637b363e86f71effe74cba5d509f18bd3199a65b Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 6 Oct 2009 15:33:54 -0600 Subject: ia64/PCI: print resources consistently with %pRt This uses %pRt to print additional resource information (type, size, prefetchability, etc.) consistently. Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d60e7195b7dd..06413b827e97 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -131,6 +131,7 @@ alloc_pci_controller (int seg) } struct pci_root_info { + struct acpi_device *bridge; struct pci_controller *controller; char *name; }; @@ -297,9 +298,19 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) window->offset = offset; if (insert_resource(root, &window->resource)) { - printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", - window->resource.start, window->resource.end, - root->name, info->name); + dev_err(&info->bridge->dev, "can't allocate %pRt\n", + &window->resource); + } else { + if (offset) + dev_info(&info->bridge->dev, "host bridge window: %pRt " + "(PCI address [%#llx-%#llx])\n", + &window->resource, + window->resource.start - offset, + window->resource.end - offset); + else + dev_info(&info->bridge->dev, + "host bridge window: %pRt\n", + &window->resource); } return AE_OK; @@ -319,8 +330,7 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) (res->end - res->start < 16)) continue; if (j >= PCI_BUS_NUM_RESOURCES) { - printk("Ignoring range [%#llx-%#llx] (%lx)\n", - res->start, res->end, res->flags); + dev_warn(&bus->dev, "ignoring %pRf (no space)\n", res); continue; } bus->resource[j++] = res; @@ -364,6 +374,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) goto out3; sprintf(name, "PCI Bus %04x:%02x", domain, bus); + info.bridge = device; info.controller = controller; info.name = name; acpi_walk_resources(device->handle, METHOD_NAME__CRS, -- cgit v1.2.3 From c7dabef8a2c59e6a3de9d66fc35fb6a43ef7172d Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 27 Oct 2009 13:26:47 -0600 Subject: vsprintf: use %pR, %pr instead of %pRt, %pRf Jesse accidentally applied v1 [1] of the patchset instead of v2 [2]. This is the diff between v1 and v2. The changes in this patch are: - tidied vsprintf stack buffer to shrink and compute size more accurately - use %pR for decoding and %pr for "raw" (with type and flags) instead of adding %pRt and %pRf [1] http://lkml.org/lkml/2009/10/6/491 [2] http://lkml.org/lkml/2009/10/13/441 Signed-off-by: Bjorn Helgaas Signed-off-by: Jesse Barnes --- arch/ia64/pci/pci.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 06413b827e97..df639db779f9 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -298,18 +298,19 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data) window->offset = offset; if (insert_resource(root, &window->resource)) { - dev_err(&info->bridge->dev, "can't allocate %pRt\n", + dev_err(&info->bridge->dev, + "can't allocate host bridge window %pR\n", &window->resource); } else { if (offset) - dev_info(&info->bridge->dev, "host bridge window: %pRt " + dev_info(&info->bridge->dev, "host bridge window %pR " "(PCI address [%#llx-%#llx])\n", &window->resource, window->resource.start - offset, window->resource.end - offset); else dev_info(&info->bridge->dev, - "host bridge window: %pRt\n", + "host bridge window %pR\n", &window->resource); } @@ -330,7 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) (res->end - res->start < 16)) continue; if (j >= PCI_BUS_NUM_RESOURCES) { - dev_warn(&bus->dev, "ignoring %pRf (no space)\n", res); + dev_warn(&bus->dev, + "ignoring host bridge window %pR (no space)\n", + res); continue; } bus->resource[j++] = res; -- cgit v1.2.3 From 6f589526df36dc6ac60df48e7ca23f536e71d332 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Apr 2009 00:44:03 -0700 Subject: sysctl: ia64 Use the compat_sys_sysctl Now that we have a generic 32bit compatibility implementation there is no need for ia64 to implement it's own. Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Eric W. Biederman --- arch/ia64/ia32/ia32_entry.S | 2 +- arch/ia64/ia32/sys_ia32.c | 55 --------------------------------------------- 2 files changed, 1 insertion(+), 56 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index af9405cd70e5..10c37510f4b4 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -327,7 +327,7 @@ ia32_syscall_table: data8 compat_sys_writev data8 sys_getsid data8 sys_fdatasync - data8 sys32_sysctl + data8 compat_sys_sysctl data8 sys_mlock /* 150 */ data8 sys_munlock data8 sys_mlockall diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 625ed8f76fce..429ec968c9ee 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1628,61 +1628,6 @@ sys32_msync (unsigned int start, unsigned int len, int flags) return sys_msync(addr, len + (start - addr), flags); } -struct sysctl32 { - unsigned int name; - int nlen; - unsigned int oldval; - unsigned int oldlenp; - unsigned int newval; - unsigned int newlen; - unsigned int __unused[4]; -}; - -#ifdef CONFIG_SYSCTL_SYSCALL -asmlinkage long -sys32_sysctl (struct sysctl32 __user *args) -{ - struct sysctl32 a32; - mm_segment_t old_fs = get_fs (); - void __user *oldvalp, *newvalp; - size_t oldlen; - int __user *namep; - long ret; - - if (copy_from_user(&a32, args, sizeof(a32))) - return -EFAULT; - - /* - * We need to pre-validate these because we have to disable address checking - * before calling do_sysctl() because of OLDLEN but we can't run the risk of the - * user specifying bad addresses here. Well, since we're dealing with 32 bit - * addresses, we KNOW that access_ok() will always succeed, so this is an - * expensive NOP, but so what... - */ - namep = (int __user *) compat_ptr(a32.name); - oldvalp = compat_ptr(a32.oldval); - newvalp = compat_ptr(a32.newval); - - if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp))) - || !access_ok(VERIFY_WRITE, namep, 0) - || !access_ok(VERIFY_WRITE, oldvalp, 0) - || !access_ok(VERIFY_WRITE, newvalp, 0)) - return -EFAULT; - - set_fs(KERNEL_DS); - lock_kernel(); - ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen, - newvalp, (size_t) a32.newlen); - unlock_kernel(); - set_fs(old_fs); - - if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp))) - return -EFAULT; - - return ret; -} -#endif - asmlinkage long sys32_newuname (struct new_utsname __user *name) { -- cgit v1.2.3 From 5b5d94487d934be6b0aa966c9acbdf15b07ef627 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 6 Nov 2009 11:11:43 +0900 Subject: ia64/xen: compilation fix This patch fixes the following compilation error introduced by a PCI related features. The change set of 5dd1af9f84c79bedd589db89e71ca733f3bf0ebd moves some xen related definitions from the arch header file (x86/include/asm/xen/hypervisor.h) to the common header file (include/xen/xen.h). So ia64/xen also follows it. In file included from linux-next/include/xen/grant_table.h:41, from linux-next/drivers/block/xen-blkfront.c:48: linux-next/arch/ia64/include/asm/xen/hypervisor.h:43: error: nested redefinition of 'enum xen_domain_type' linux-next/arch/ia64/include/asm/xen/hypervisor.h:43: error: redeclaration of 'enum xen_domain_type' linux-next/arch/ia64/include/asm/xen/hypervisor.h:44: error: redeclaration of enumerator 'XEN_NATIVE' linux-next/include/xen/xen.h:5: error: previous definition of 'XEN_NATIVE' was here linux-next/arch/ia64/include/asm/xen/hypervisor.h:45: error: redeclaration of enumerator 'XEN_PV_DOMAIN' linux-next/include/xen/xen.h:6: error: previous definition of 'XEN_PV_DOMAIN' was here linux-next/arch/ia64/include/asm/xen/hypervisor.h:46: error: redeclaration of enumerator 'XEN_HVM_DOMAIN' linux-next/include/xen/xen.h:7: error: previous definition of 'XEN_HVM_DOMAIN' was here Signed-off-by: Isaku Yamahata Signed-off-by: Jesse Barnes --- arch/ia64/include/asm/xen/hypervisor.h | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 88afb54501e4..67455c2ed2b1 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h @@ -37,35 +37,9 @@ #include #include /* to compile feature.c */ #include /* to comiple xen-netfront.c */ +#include #include -/* xen_domain_type is set before executing any C code by early_xen_setup */ -enum xen_domain_type { - XEN_NATIVE, /* running on bare hardware */ - XEN_PV_DOMAIN, /* running in a PV domain */ - XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ -}; - -#ifdef CONFIG_XEN -extern enum xen_domain_type xen_domain_type; -#else -#define xen_domain_type XEN_NATIVE -#endif - -#define xen_domain() (xen_domain_type != XEN_NATIVE) -#define xen_pv_domain() (xen_domain() && \ - xen_domain_type == XEN_PV_DOMAIN) -#define xen_hvm_domain() (xen_domain() && \ - xen_domain_type == XEN_HVM_DOMAIN) - -#ifdef CONFIG_XEN_DOM0 -#define xen_initial_domain() (xen_pv_domain() && \ - (xen_start_info->flags & SIF_INITDOMAIN)) -#else -#define xen_initial_domain() (0) -#endif - - #ifdef CONFIG_XEN extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; -- cgit v1.2.3 From ad32e8cb86e7894aac51c8963eaa9f36bb8a4e14 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 10 Nov 2009 19:46:19 +0900 Subject: swiotlb: Defer swiotlb init printing, export swiotlb_print_info() This enables us to avoid printing swiotlb memory info when we initialize swiotlb. After swiotlb initialization, we could find that we don't need swiotlb. This patch removes the code to print swiotlb memory info in swiotlb_init() and exports the function to do that. Signed-off-by: FUJITA Tomonori Cc: chrisw@sous-sol.org Cc: dwmw2@infradead.org Cc: joerg.roedel@amd.com Cc: muli@il.ibm.com Cc: tony.luck@intel.com Cc: benh@kernel.crashing.org LKML-Reference: <1257849980-22640-9-git-send-email-fujita.tomonori@lab.ntt.co.jp> [ -v2: merge up conflict ] Signed-off-by: Ingo Molnar --- arch/ia64/kernel/pci-swiotlb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 285aae8431c6..53292abf846c 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = { void __init swiotlb_dma_init(void) { dma_ops = &swiotlb_dma_ops; - swiotlb_init(); + swiotlb_init(1); } void __init pci_swiotlb_init(void) @@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void) swiotlb = 1; printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); machvec_init("dig"); - swiotlb_init(); + swiotlb_init(1); dma_ops = &swiotlb_dma_ops; #else panic("Unable to find Intel IOMMU"); -- cgit v1.2.3 From d00faf81afa288a8f8447f00a38405873c550092 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 3 Apr 2009 05:15:37 -0700 Subject: sysctl ia64: Remove dead binary sysctl support Now that sys_sysctl is a generic wrapper around /proc/sys .ctl_name and .strategy members of sysctl tables are dead code. Remove them. Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Eric W. Biederman --- arch/ia64/kernel/crash.c | 7 ++----- arch/ia64/kernel/perfmon.c | 6 ------ 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 6631a9dfafdc..7c7d6a6dc0f7 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -239,7 +239,6 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) #ifdef CONFIG_SYSCTL static ctl_table kdump_ctl_table[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "kdump_on_init", .data = &kdump_on_init, .maxlen = sizeof(int), @@ -247,24 +246,22 @@ static ctl_table kdump_ctl_table[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "kdump_on_fatal_mca", .data = &kdump_on_fatal_mca, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &proc_dointvec, }, - { .ctl_name = 0 } + { } }; static ctl_table sys_table[] = { { - .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, .child = kdump_ctl_table, }, - { .ctl_name = 0 } + { } }; #endif diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f7..ca30b3646405 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -522,7 +522,6 @@ EXPORT_SYMBOL(pfm_sysctl); static ctl_table pfm_ctl_table[]={ { - .ctl_name = CTL_UNNUMBERED, .procname = "debug", .data = &pfm_sysctl.debug, .maxlen = sizeof(int), @@ -530,7 +529,6 @@ static ctl_table pfm_ctl_table[]={ .proc_handler = &proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "debug_ovfl", .data = &pfm_sysctl.debug_ovfl, .maxlen = sizeof(int), @@ -538,7 +536,6 @@ static ctl_table pfm_ctl_table[]={ .proc_handler = &proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "fastctxsw", .data = &pfm_sysctl.fastctxsw, .maxlen = sizeof(int), @@ -546,7 +543,6 @@ static ctl_table pfm_ctl_table[]={ .proc_handler = &proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "expert_mode", .data = &pfm_sysctl.expert_mode, .maxlen = sizeof(int), @@ -557,7 +553,6 @@ static ctl_table pfm_ctl_table[]={ }; static ctl_table pfm_sysctl_dir[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "perfmon", .mode = 0555, .child = pfm_ctl_table, @@ -566,7 +561,6 @@ static ctl_table pfm_sysctl_dir[] = { }; static ctl_table pfm_sysctl_root[] = { { - .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, .child = pfm_sysctl_dir, -- cgit v1.2.3 From 6959450e567c1f17d3ce8489099fc56c3721d577 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Sat, 14 Nov 2009 20:46:38 +0900 Subject: swiotlb: Remove duplicate swiotlb_force extern declarations Signed-off-by: FUJITA Tomonori Cc: tony.luck@intel.com LKML-Reference: <1258199198-16657-4-git-send-email-fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/swiotlb.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index dcbaea7ce128..f0acde68aaea 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h @@ -4,8 +4,6 @@ #include #include -extern int swiotlb_force; - #ifdef CONFIG_SWIOTLB extern int swiotlb; extern void pci_swiotlb_init(void); -- cgit v1.2.3 From 0696b711e4be45fa104c12329f617beb29c03f78 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Tue, 17 Nov 2009 13:49:50 +0800 Subject: timekeeping: Fix clock_gettime vsyscall time warp Since commit 0a544198 "timekeeping: Move NTP adjusted clock multiplier to struct timekeeper" the clock multiplier of vsyscall is updated with the unmodified clock multiplier of the clock source and not with the NTP adjusted multiplier of the timekeeper. This causes user space observerable time warps: new CLOCK-warp maximum: 120 nsecs, 00000025c337c537 -> 00000025c337c4bf Add a new argument "mult" to update_vsyscall() and hand in the timekeeping internal NTP adjusted multiplier. Signed-off-by: Lin Ming Cc: "Zhang Yanmin" Cc: Martin Schwidefsky Cc: Benjamin Herrenschmidt Cc: Tony Luck LKML-Reference: <1258436990.17765.83.camel@minggr.sh.intel.com> Signed-off-by: Thomas Gleixner --- arch/ia64/kernel/time.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 4990495d7531..a35c661e5e89 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -473,7 +473,7 @@ void update_vsyscall_tz(void) { } -void update_vsyscall(struct timespec *wall, struct clocksource *c) +void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) { unsigned long flags; @@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; - fsyscall_gtod_data.clk_mult = c->mult; + fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; fsyscall_gtod_data.clk_cycle_last = c->cycle_last; -- cgit v1.2.3 From 6d4561110a3e9fa742aeec6717248a491dfb1878 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 16 Nov 2009 03:11:48 -0800 Subject: sysctl: Drop & in front of every proc_handler. For consistency drop & in front of every proc_handler. Explicity taking the address is unnecessary and it prevents optimizations like stubbing the proc_handlers to NULL. Cc: Alexey Dobriyan Cc: Ingo Molnar Cc: Joe Perches Signed-off-by: Eric W. Biederman --- arch/ia64/kernel/crash.c | 4 ++-- arch/ia64/kernel/perfmon.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 7c7d6a6dc0f7..b942f4032d7a 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -243,14 +243,14 @@ static ctl_table kdump_ctl_table[] = { .data = &kdump_on_init, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "kdump_on_fatal_mca", .data = &kdump_on_fatal_mca, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index ca30b3646405..402698b6689f 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -526,28 +526,28 @@ static ctl_table pfm_ctl_table[]={ .data = &pfm_sysctl.debug, .maxlen = sizeof(int), .mode = 0666, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "debug_ovfl", .data = &pfm_sysctl.debug_ovfl, .maxlen = sizeof(int), .mode = 0666, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "fastctxsw", .data = &pfm_sysctl.fastctxsw, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "expert_mode", .data = &pfm_sysctl.expert_mode, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; -- cgit v1.2.3 From 2263576cfc6e8f6ab038126c3254404b9fcb1c33 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Fri, 13 Nov 2009 10:06:08 +0800 Subject: ACPICA: Add post-order callback to acpi_walk_namespace The existing interface only has a pre-order callback. This change adds an additional parameter for a post-order callback which will be more useful for bus scans. ACPICA BZ 779. Also update the external calls to acpi_walk_namespace. http://www.acpica.org/bugzilla/show_bug.cgi?id=779 Signed-off-by: Lin Ming Signed-off-by: Bob Moore Signed-off-by: Len Brown --- arch/ia64/sn/kernel/io_acpi_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index fd50ff94302b..66f633bff059 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c @@ -390,7 +390,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, pcidev_match.handle = NULL; acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, - find_matching_device, &pcidev_match, NULL); + find_matching_device, NULL, &pcidev_match, NULL); if (!pcidev_match.handle) { printk(KERN_ERR -- cgit v1.2.3 From 2d4dc890b5c8fabd818a8586607e6843c4375e62 Mon Sep 17 00:00:00 2001 From: Ilya Loginov Date: Thu, 26 Nov 2009 09:16:19 +0100 Subject: block: add helpers to run flush_dcache_page() against a bio and a request's pages Mtdblock driver doesn't call flush_dcache_page for pages in request. So, this causes problems on architectures where the icache doesn't fill from the dcache or with dcache aliases. The patch fixes this. The ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE symbol was introduced to avoid pointless empty cache-thrashing loops on architectures for which flush_dcache_page() is a no-op. Every architecture was provided with this flush pages on architectires where ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE is equal 1 or do nothing otherwise. See "fix mtd_blkdevs problem with caches on some architectures" discussion on LKML for more information. Signed-off-by: Ilya Loginov Cc: Ingo Molnar Cc: David Woodhouse Cc: Peter Horton Cc: "Ed L. Cashin" Signed-off-by: Jens Axboe --- arch/ia64/include/asm/cacheflush.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h index c8ce2719fee8..429eefc93ee7 100644 --- a/arch/ia64/include/asm/cacheflush.h +++ b/arch/ia64/include/asm/cacheflush.h @@ -25,6 +25,7 @@ #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) \ do { \ clear_bit(PG_arch_1, &(page)->flags); \ -- cgit v1.2.3 From 3e71f88bc90792a187703860cf22fbed7c12cbd9 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Aug 2009 11:54:21 +0300 Subject: KVM: Maintain back mapping from irqchip/pin to gsi Maintain back mapping from irqchip/pin to gsi to speedup interrupt acknowledgment notifications. [avi: build fix on non-x86/ia64] Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index 18a7e49abbc5..bc90c75adf67 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -60,6 +60,7 @@ struct kvm_ioapic_state { #define KVM_IRQCHIP_PIC_MASTER 0 #define KVM_IRQCHIP_PIC_SLAVE 1 #define KVM_IRQCHIP_IOAPIC 2 +#define KVM_NR_IRQCHIPS 3 #define KVM_CONTEXT_SIZE 8*1024 -- cgit v1.2.3 From 136bdfeee7b5bc986fc94af3a40d7d13ea37bb95 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Aug 2009 11:54:23 +0300 Subject: KVM: Move irq ack notifier list to arch independent code Mask irq notifier list is already there. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm_host.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index d9b6325a9328..a362e67e0ca6 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -475,7 +475,6 @@ struct kvm_arch { struct list_head assigned_dev_head; struct iommu_domain *iommu_domain; int iommu_flags; - struct hlist_head irq_ack_notifier_list; unsigned long irq_sources_bitmap; unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; -- cgit v1.2.3 From eba0226bdfffe262e72b8360e4d0d12070e9a0f0 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Aug 2009 11:54:25 +0300 Subject: KVM: Move IO APIC to its own lock The allows removal of irq_lock from the injection path. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0ad09f05efa9..4a983147f6eb 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -851,8 +851,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: - memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), - sizeof(struct kvm_ioapic_state)); + r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; @@ -868,9 +867,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_IOAPIC: - memcpy(ioapic_irqchip(kvm), - &chip->chip.ioapic, - sizeof(struct kvm_ioapic_state)); + r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; -- cgit v1.2.3 From 680b3648ba89c44ac8d0316f78a0d6e147b88809 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Aug 2009 11:54:26 +0300 Subject: KVM: Drop kvm->irq_lock lock from irq injection path The only thing it protects now is interrupt injection into lapic and this can work lockless. Even now with kvm->irq_lock in place access to lapic is not entirely serialized since vcpu access doesn't take kvm->irq_lock. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 4a983147f6eb..f534e0f6bb0d 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -982,10 +982,8 @@ long kvm_arch_vm_ioctl(struct file *filp, goto out; if (irqchip_in_kernel(kvm)) { __s32 status; - mutex_lock(&kvm->irq_lock); status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); - mutex_unlock(&kvm->irq_lock); if (ioctl == KVM_IRQ_LINE_STATUS) { irq_event.status = status; if (copy_to_user(argp, &irq_event, -- cgit v1.2.3 From 367e1319b229110a27c53221c2fa32a6aa86d4a9 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 26 Aug 2009 14:57:07 +0300 Subject: KVM: Return -ENOTTY on unrecognized ioctls Not the incorrect -EINVAL. Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f534e0f6bb0d..f6471c882667 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -941,7 +941,7 @@ long kvm_arch_vm_ioctl(struct file *filp, { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; - int r = -EINVAL; + int r = -ENOTTY; switch (ioctl) { case KVM_SET_MEMORY_REGION: { -- cgit v1.2.3 From bfd99ff5d483b11c32bca49fbff7a5ac59038b0a Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Wed, 26 Aug 2009 14:57:50 +0300 Subject: KVM: Move assigned device code to own file Signed-off-by: Avi Kivity --- arch/ia64/kvm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 0bb99b732908..1089b3e918ac 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile @@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ - coalesced_mmio.o irq_comm.o) + coalesced_mmio.o irq_comm.o assigned-dev.o) ifeq ($(CONFIG_IOMMU_API),y) common-objs += $(addprefix ../../../virt/kvm/, iommu.o) -- cgit v1.2.3 From 10474ae8945ce08622fd1f3464e55bd817bf2376 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 15 Sep 2009 11:37:46 +0200 Subject: KVM: Activate Virtualization On Demand X86 CPUs need to have some magic happening to enable the virtualization extensions on them. This magic can result in unpleasant results for users, like blocking other VMMs from working (vmx) or using invalid TLB entries (svm). Currently KVM activates virtualization when the respective kernel module is loaded. This blocks us from autoloading KVM modules without breaking other VMMs. To circumvent this problem at least a bit, this patch introduces on demand activation of virtualization. This means, that instead virtualization is enabled on creation of the first virtual machine and disabled on destruction of the last one. So using this, KVM can be easily autoloaded, while keeping other hypervisors usable. Signed-off-by: Alexander Graf Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f6471c882667..5fdeec5fddcf 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) static DEFINE_SPINLOCK(vp_lock); -void kvm_arch_hardware_enable(void *garbage) +int kvm_arch_hardware_enable(void *garbage) { long status; long tmp_base; @@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage) slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); local_irq_restore(saved_psr); if (slot < 0) - return; + return -EINVAL; spin_lock(&vp_lock); status = ia64_pal_vp_init_env(kvm_vsa_base ? @@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage) __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); if (status != 0) { printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); - return ; + return -EINVAL; } if (!kvm_vsa_base) { @@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage) } spin_unlock(&vp_lock); ia64_ptr_entry(0x3, slot); + + return 0; } void kvm_arch_hardware_disable(void *garbage) -- cgit v1.2.3 From af901ca181d92aac3a7dc265144a9081a86d8f39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Goddard=20Rosa?= Date: Sat, 14 Nov 2009 13:09:05 -0200 Subject: tree-wide: fix assorted typos all over the place MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That is "success", "unknown", "through", "performance", "[re|un]mapping" , "access", "default", "reasonable", "[con]currently", "temperature" , "channel", "[un]used", "application", "example","hierarchy", "therefore" , "[over|under]flow", "contiguous", "threshold", "enough" and others. Signed-off-by: André Goddard Rosa Signed-off-by: Jiri Kosina --- arch/ia64/hp/common/sba_iommu.c | 2 +- arch/ia64/ia32/ia32_entry.S | 2 +- arch/ia64/include/asm/perfmon_default_smpl.h | 2 +- arch/ia64/include/asm/sn/shubio.h | 2 +- arch/ia64/kernel/esi.c | 2 +- arch/ia64/kernel/perfmon.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 674a8374c6d9..f332e3fe4237 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1381,7 +1381,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, #endif /* - ** Not virtually contigous. + ** Not virtually contiguous. ** Terminate prev chunk. ** Start a new chunk. ** diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index af9405cd70e5..02d1fb732951 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -79,7 +79,7 @@ GLOBAL_ENTRY(ia32_ret_from_clone) (p6) br.cond.spnt .ia32_strace_check_retval ;; // prevent RAW on r8 END(ia32_ret_from_clone) - // fall thrugh + // fall through GLOBAL_ENTRY(ia32_ret_from_syscall) PT_REGS_UNWIND_INFO(0) diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h index 48822c0811d8..74724b24c2b7 100644 --- a/arch/ia64/include/asm/perfmon_default_smpl.h +++ b/arch/ia64/include/asm/perfmon_default_smpl.h @@ -67,7 +67,7 @@ typedef struct { unsigned long ip; /* where did the overflow interrupt happened */ unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ - unsigned short cpu; /* cpu on which the overfow occured */ + unsigned short cpu; /* cpu on which the overflow occured */ unsigned short set; /* event set active when overflow ocurred */ int tgid; /* thread group id (for NPTL, this is getpid()) */ } pfm_default_smpl_entry_t; diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h index 22a6f18a5313..6052422a22b3 100644 --- a/arch/ia64/include/asm/sn/shubio.h +++ b/arch/ia64/include/asm/sn/shubio.h @@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t; #define IIO_IIDSR_LVL_SHIFT 0 #define IIO_IIDSR_LVL_MASK 0x000000ff -/* Xtalk timeout threshhold register (IIO_IXTT) */ +/* Xtalk timeout threshold register (IIO_IXTT) */ #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c index d5764a3d74af..b091111270cb 100644 --- a/arch/ia64/kernel/esi.c +++ b/arch/ia64/kernel/esi.c @@ -84,7 +84,7 @@ static int __init esi_init (void) case ESI_DESC_ENTRY_POINT: break; default: - printk(KERN_WARNING "Unkown table type %d found in " + printk(KERN_WARNING "Unknown table type %d found in " "ESI table, ignoring rest of table\n", *p); return -ENODEV; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f7..b3a1cb3e6b25 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -3523,7 +3523,7 @@ pfm_use_debug_registers(struct task_struct *task) * IA64_THREAD_DBG_VALID set. This indicates a task which was * able to use the debug registers for debugging purposes via * ptrace(). Therefore we know it was not using them for - * perfmormance monitoring, so we only decrement the number + * performance monitoring, so we only decrement the number * of "ptraced" debug register users to keep the count up to date */ int -- cgit v1.2.3 From 27a338a69ed9a8a672cd620f5fd7fa450209313c Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 8 Dec 2009 15:29:05 -0800 Subject: [IA64] Fix cut/paste detritus from unistd.h Build warning: :1523:2: warning: #warning syscall recvmmsg not implemented Because when recvmmesg was added, the previous syscall define was cut&pasted, and a spurious "rt_" left in the name of the define. Signed-off-by: Tony Luck --- arch/ia64/include/asm/unistd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 9c72e36c5281..10a8f21ca9e3 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -311,7 +311,7 @@ #define __NR_preadv 1319 #define __NR_pwritev 1320 #define __NR_rt_tgsigqueueinfo 1321 -#define __NR_rt_recvmmsg 1322 +#define __NR_recvmmsg 1322 #ifdef __KERNEL__ -- cgit v1.2.3 From 0067bd8a55862ac9dd212bd1c4f6f5bff1ca1301 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 30 Nov 2009 17:34:06 -0500 Subject: Cut hugetlb case early for 32bit on ia64 It won't work anyway (hugetlb addresses there are way beyond 4Gb) and it's easier to stop it here. Signed-off-by: Al Viro --- arch/ia64/ia32/sys_ia32.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 429ec968c9ee..045b746b9808 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot prot = get_prot32(prot); + if (flags & MAP_HUGETLB) + return -ENOMEM; + #if PAGE_SHIFT > IA32_PAGE_SHIFT mutex_lock(&ia32_mmap_mutex); { -- cgit v1.2.3 From f8b7256096a20436f6d0926747e3ac3d64c81d24 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 30 Nov 2009 17:37:04 -0500 Subject: Unify sys_mmap* New helper - sys_mmap_pgoff(); switch syscalls to using it. Acked-by: David S. Miller Signed-off-by: Al Viro --- arch/ia64/kernel/sys_ia64.c | 37 ++----------------------------------- 1 file changed, 2 insertions(+), 35 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 92ed83f34036..ae384a2974c2 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -185,39 +185,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, return 0; } -static inline unsigned long -do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) -{ - struct file *file = NULL; - - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - return -EBADF; - - if (!file->f_op || !file->f_op->mmap) { - addr = -ENODEV; - goto out; - } - } - - /* Careful about overflows.. */ - len = PAGE_ALIGN(len); - if (!len || len > TASK_SIZE) { - addr = -EINVAL; - goto out; - } - - down_write(¤t->mm->mmap_sem); - addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - -out: if (file) - fput(file); - return addr; -} - /* * mmap2() is like mmap() except that the offset is expressed in units * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces @@ -226,7 +193,7 @@ out: if (file) asmlinkage unsigned long sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) { - addr = do_mmap2(addr, len, prot, flags, fd, pgoff); + addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; @@ -238,7 +205,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo if (offset_in_page(off) != 0) return -EINVAL; - addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR((void *) addr)) force_successful_syscall_return(); return addr; -- cgit v1.2.3 From bb52d6694002b9d632bb355f64daa045c6293a4e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 3 Dec 2009 19:59:24 -0500 Subject: Get rid of open-coding in ia64_brk() The comment in there used to be true, but these days do_brk() does the arch-specific check that covers what we open-coded here. So we can use sys_brk() just fine, only need to do force_successful_syscall_return() after it. See commit 3a459756810912d2c2bf188cef566af255936b4d - that's when the checks in do_brk() had been originally added. Acked-by: Hugh Dickins Signed-off-by: Al Viro --- arch/ia64/kernel/sys_ia64.c | 46 +-------------------------------------------- 1 file changed, 1 insertion(+), 45 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index ae384a2974c2..609d50056a6c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -100,51 +100,7 @@ sys_getpagesize (void) asmlinkage unsigned long ia64_brk (unsigned long brk) { - unsigned long rlim, retval, newbrk, oldbrk; - struct mm_struct *mm = current->mm; - - /* - * Most of this replicates the code in sys_brk() except for an additional safety - * check and the clearing of r8. However, we can't call sys_brk() because we need - * to acquire the mmap_sem before we can do the test... - */ - down_write(&mm->mmap_sem); - - if (brk < mm->end_code) - goto out; - newbrk = PAGE_ALIGN(brk); - oldbrk = PAGE_ALIGN(mm->brk); - if (oldbrk == newbrk) - goto set_brk; - - /* Always allow shrinking brk. */ - if (brk <= mm->brk) { - if (!do_munmap(mm, newbrk, oldbrk-newbrk)) - goto set_brk; - goto out; - } - - /* Check against unimplemented/unmapped addresses: */ - if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT) - goto out; - - /* Check against rlimit.. */ - rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; - if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) - goto out; - - /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) - goto out; - - /* Ok, looks good - let it rip. */ - if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) - goto out; -set_brk: - mm->brk = brk; -out: - retval = mm->brk; - up_write(&mm->mmap_sem); + unsigned long retval = sys_brk(brk); force_successful_syscall_return(); return retval; } -- cgit v1.2.3 From 559df2e0210352f83926d178c40c51142292a18c Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Apr 2009 22:35:10 +0200 Subject: kbuild: move asm-offsets.h to include/generated The simplest method was to add an extra asm-offsets.h file in arch/$ARCH/include/asm that references the generated file. We can now migrate the architectures one-by-one to reference the generated file direct - and when done we can delete the temporary arch/$ARCH/include/asm/asm-offsets.h file. Signed-off-by: Sam Ravnborg Cc: Al Viro Signed-off-by: Michal Marek --- arch/ia64/include/asm/asm-offsets.h | 1 + 1 file changed, 1 insertion(+) create mode 100644 arch/ia64/include/asm/asm-offsets.h (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/ia64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include -- cgit v1.2.3 From 4929d29c0dffd5fdc2df987254366c2e25c392d4 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Mon, 20 Apr 2009 19:35:54 +0200 Subject: ia64: move nr-irqs.h to include/generated Avoid generating files in the now deprecated asm-ia64 dir Simplified the logic in the Makefile when editing stuff in the area Signed-off-by: Sam Ravnborg Cc: Al Viro Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Michal Marek --- arch/ia64/Makefile | 2 +- arch/ia64/include/asm/irq.h | 2 +- arch/ia64/kernel/Makefile | 7 ++----- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index e7cbaa02cd0b..475e2725fbde 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -103,4 +103,4 @@ archprepare: make_nr_irqs_h FORCE PHONY += make_nr_irqs_h FORCE make_nr_irqs_h: FORCE - $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h + $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 5282546cdf82..91b920fd7d53 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h @@ -13,7 +13,7 @@ #include #include -#include +#include static __inline__ int irq_canonicalize (int irq) diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 6b7edcab0cb5..2a75e937ae8d 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -81,17 +81,14 @@ define cmd_nr_irqs endef # We use internal kbuild rules to avoid the "is up to date" message from make -arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \ - $(wildcard $(srctree)/include/asm-ia64/*/irq.h) +arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c $(Q)mkdir -p $(dir $@) $(call if_changed_dep,cc_s_c) -include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s +include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s $(Q)mkdir -p $(dir $@) $(call cmd,nr_irqs) -clean-files += $(objtree)/include/asm-ia64/nr-irqs.h - # # native ivt.S, entry.S and fsys.S # -- cgit v1.2.3 From 98b8788ae91694499d1995035625bea16a4db0c4 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 18 Oct 2009 00:39:40 +0200 Subject: drop explicit include of autoconf.h kbuild.h forces include of autoconf.h on the commandline using -include - so we do not need to include the file explicit. Signed-off-by: Sam Ravnborg Signed-off-by: Michal Marek --- arch/ia64/kvm/asm-offsets.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c index 0c3564a7a033..9324c875caf5 100644 --- a/arch/ia64/kvm/asm-offsets.c +++ b/arch/ia64/kvm/asm-offsets.c @@ -22,7 +22,6 @@ * */ -#include #include #include -- cgit v1.2.3 From 445c89514be242b1b0080056d50bdc1b72adeb5c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 19:49:50 +0100 Subject: locking: Convert raw_spinlock to arch_spinlock The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/ia64/include/asm/spinlock.h | 26 +++++++++++++------------- arch/ia64/include/asm/spinlock_types.h | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516d..9fbdf7e61087 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -38,7 +38,7 @@ #define TICKET_BITS 15 #define TICKET_MASK ((1 << TICKET_BITS) - 1) -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket, serve; @@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) } } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->lock); @@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return 0; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; @@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } -static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket; @@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) } } -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4a..447ccc6ca7a8 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } -- cgit v1.2.3 From edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 12:38:57 +0100 Subject: locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Further name space cleanup. No functional change Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/ia64/include/asm/spinlock_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 447ccc6ca7a8..6a11b65fa66d 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; -- cgit v1.2.3 From 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 20:01:25 +0100 Subject: locking: Convert __raw_spin* functions to arch_spin* Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/ia64/include/asm/bitops.h | 2 +- arch/ia64/include/asm/spinlock.h | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9fb..6ebc229a1c51 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) * @addr: Address to start counting from * * Similarly to clear_bit_unlock, the implementation uses a store - * with release semantics. See also __raw_spin_unlock(). + * with release semantics. See also arch_spin_unlock(). */ static __inline__ void __clear_bit_unlock(int nr, void *addr) diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 9fbdf7e61087..b06165f6352f 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -17,7 +17,7 @@ #include #include -#define __raw_spin_lock_init(x) ((x)->lock = 0) +#define arch_spin_lock_init(x) ((x)->lock = 0) /* * Ticket locks are conceptually two parts, one indicating the current head of @@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } @@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */ -- cgit v1.2.3 From fb3a6bbc912b12347614e5742c7c61416cdb0ca0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:01:19 +0100 Subject: locking: Convert raw_rwlock to arch_rwlock Not strictly necessary for -rt as -rt does not have non sleeping rwlocks, but it's odd to not have a consistent naming convention. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/ia64/include/asm/spinlock.h | 16 ++++++++-------- arch/ia64/include/asm/spinlock_types.h | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index b06165f6352f..6715b6a8ebc3 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -146,7 +146,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) +__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -177,7 +177,7 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) #define __raw_read_lock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -190,14 +190,14 @@ do { \ #define __raw_read_unlock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) +__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(raw_rwlock_t *x) +static inline int __raw_read_trylock(arch_rwlock_t *x) { union { - raw_rwlock_t lock; + arch_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 6a11b65fa66d..e2b42a52a6d3 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -14,8 +14,8 @@ typedef struct { typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } #endif -- cgit v1.2.3 From e5931943d02bf751b1ec849c0d2ade23d76a8d41 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:08:46 +0100 Subject: locking: Convert raw_rwlock functions to arch_rwlock Name space cleanup for rwlock functions. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/ia64/include/asm/spinlock.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 6715b6a8ebc3..1a91c9121d17 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) __ticket_spin_unlock_wait(lock); } -#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } -#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) +#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) -#define __raw_read_lock(rw) \ +#define arch_read_lock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ \ @@ -188,7 +188,7 @@ do { \ #endif /* !ASM_SUPPORTED */ -#define __raw_read_unlock(rw) \ +#define arch_read_unlock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -197,7 +197,7 @@ do { \ #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } -#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) +#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ register long result; \ \ @@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(arch_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) +#define arch_write_lock_flags(l, flags) arch_write_lock(l) -#define __raw_write_lock(l) \ +#define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ @@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) } while (ia64_val); \ }) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(arch_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(arch_rwlock_t *x) +static inline int arch_read_trylock(arch_rwlock_t *x) { union { arch_rwlock_t lock; -- cgit v1.2.3 From 239007b8440abff689632f50cdf0f2b9e895b534 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Nov 2009 16:46:45 +0100 Subject: genirq: Convert irq_desc.lock to raw_spinlock Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: Ingo Molnar --- arch/ia64/kernel/iosapic.c | 6 +++--- arch/ia64/kernel/irq.c | 4 ++-- arch/ia64/kernel/irq_ia64.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d393908c..95ac77aeae9b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, goto unlock_iosapic_lock; } - spin_lock(&irq_desc[irq].lock); + raw_spin_lock(&irq_desc[irq].lock); dest = get_target_cpu(gsi, irq); dmode = choose_dmode(); err = register_intr(gsi, irq, dmode, polarity, trigger); if (err < 0) { - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); irq = err; goto unlock_iosapic_lock; } @@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, irq_to_vector(irq)); - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); unlock_iosapic_lock: spin_unlock_irqrestore(&iosapic_lock, flags); return irq; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d8951229e7c..94ee9d067cbd 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); return 0; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..70e4bad23432 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) desc = irq_desc + irq; cfg = irq_cfg + irq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; @@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } -- cgit v1.2.3 From 09b366b78c9602f53344c269eac608fd6e24d670 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 9 Dec 2009 10:56:41 +0900 Subject: [IA64] preallocate IA64_IRQ_MOVE_VECTOR Previously, we tried to use IA64_DEF_FIRST_DEVICE_VECTOR (0x30) as the IA64_IRQ_MOVE_VECTOR. However, we allocate other IRQs from the device vector range, so there's no guarantee that IA64_DEF_FIRST_DEVICE_VECTOR will still be available when we register IA64_IRQ_MOVE_VECTOR. This patch statically allocates 0x30 for IA64_IRQ_MOVE_VECTOR and removes it from the device vector range. Without this patch, we crash on machines like the HP rx3600 that use vector 48 (0x30) as the ACPI SCI interrupt: kernel BUG at arch/ia64/kernel/irq_ia64.c:647! swapper[0]: bugcheck! 0 [1] Modules linked in: Pid: 0, CPU 0, comm: swapper psr : 00001010084a2018 ifs : 800000000000030e ip : [] Not tainted (2.6.32-rc8-00184-gd5d4ec8) ip is at ia64_native_register_percpu_irq+0x110/0x1e0 Signed-off-by: Bjorn Helgaas Signed-off-by: Kenji Kaneshige Tested-by: Kenji Kaneshige Tested-by: Bjorn Helgaas Signed-off-by: Tony Luck --- arch/ia64/include/asm/hw_irq.h | 6 ++++++ arch/ia64/kernel/irq_ia64.c | 6 +----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index 91619b31dbf5..bf2e37493e04 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h @@ -59,7 +59,13 @@ typedef u16 ia64_vector; extern int ia64_first_device_vector; extern int ia64_last_device_vector; +#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG)) +/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */ +#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */ +#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31 +#else #define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 +#endif #define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 #define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector #define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a1..181a9344e67b 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu) } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) -#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR static enum vector_domain_type { VECTOR_DOMAIN_NONE, @@ -659,11 +658,8 @@ init_IRQ (void) register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) - if (vector_domain_type != VECTOR_DOMAIN_NONE) { - BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); - IA64_FIRST_DEVICE_VECTOR++; + if (vector_domain_type != VECTOR_DOMAIN_NONE) register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); - } #endif #endif #ifdef CONFIG_PERFMON -- cgit v1.2.3 From 9ee27c76393394c7fb1ddeca3f1622d4537185a0 Mon Sep 17 00:00:00 2001 From: Takao Indoh Date: Thu, 19 Nov 2009 16:39:22 -0500 Subject: [IA64] Save I-resources to ia64_sal_os_state This is a patch related to this discussion. http://www.spinics.net/lists/linux-ia64/msg07605.html When INIT is sent, ip/psr/pfs register is stored to the I-resources (iip/ipsr/ifs registers), and they are copied in the min-state save area(pmsa_{iip,ipsr,ifs}). Therefore, in creating pt_regs at ia64_mca_modify_original_stack(), cr_{iip,ipsr,ifs} should be derived from pmsa_{iip,ipsr,ifs}. But current code copies pmsa_{xip,xpsr,xfs} to cr_{iip,ipsr,ifs} when PSR.ic is 0. finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, unsigned long *nat) { (snip) if (ia64_psr(regs)->ic) { regs->cr_iip = ms->pmsa_iip; regs->cr_ipsr = ms->pmsa_ipsr; regs->cr_ifs = ms->pmsa_ifs; } else { regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; } It's ok when PSR.ic is not 0. But when PSR.ic is 0, this could be a problem when we investigate kernel as the value of regs->cr_iip does not point to where INIT really interrupted. At first I tried to change finish_pt_regs() so that it uses always pmsa_{iip,ipsr,ifs} for cr_{iip,ipsr,ifs}, but Keith Owens pointed out it could cause another problem if I change it. >The only problem I can think of is an MCA/INIT >arriving while code like SAVE_MIN or SAVE_REST is executing. Back >tracing at that point using pmsa_iip is going to be a problem, you have >no idea what state the registers or stack are in. I confirmed he was right, so I decided to keep it as-is and to save pmsa_{iip,ipsr,ifs} to ia64_sal_os_state for debugging. An attached patch is just adding new members into ia64_sal_os_state to save pmsa_{iip,ipsr,ifs}. Signed-off-by: Takao Indoh Signed-off-by: Tony Luck --- arch/ia64/include/asm/mca.h | 5 +++++ arch/ia64/kernel/mca.c | 11 ++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index c171cdf0a789..43f96ab18fa0 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -106,6 +106,11 @@ struct ia64_sal_os_state { unsigned long os_status; /* OS status to SAL, enum below */ unsigned long context; /* 0 if return to same context 1 if return to new context */ + + /* I-resources */ + unsigned long iip; + unsigned long ipsr; + unsigned long ifs; }; enum { diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 496ac7a99488..32f2639e9b0a 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) } static void -finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, +finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, unsigned long *nat) { + const pal_min_state_area_t *ms = sos->pal_min_state; const u64 *bank; /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use @@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; + + sos->iip = ms->pmsa_iip; + sos->ipsr = ms->pmsa_ipsr; + sos->ifs = ms->pmsa_ifs; } regs->pr = ms->pmsa_pr; regs->b0 = ms->pmsa_br0; @@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, memcpy(old_regs, regs, sizeof(*regs)); old_regs->loadrs = loadrs; old_unat = old_regs->ar_unat; - finish_pt_regs(old_regs, ms, &old_unat); + finish_pt_regs(old_regs, sos, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and @@ -1150,7 +1155,7 @@ no_mod: mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", smp_processor_id(), type, msg); old_unat = regs->ar_unat; - finish_pt_regs(regs, ms, &old_unat); + finish_pt_regs(regs, sos, &old_unat); return previous_current; } -- cgit v1.2.3 From e2a465675dc089e9a56ba2fa2a5fbd9bd8844d18 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 17 Nov 2009 14:44:35 -0800 Subject: [IA64] fix SBA IOMMU to handle allocation failure properly It's possible that SBA IOMMU might fail to find I/O space under heavy I/Os. SBA IOMMU panics on allocation failure but it shouldn't; drivers can handle the failure. The majority of other IOMMU drivers don't panic on allocation failure. This patch fixes SBA IOMMU path to handle allocation failure properly. Signed-off-by: FUJITA Tomonori Cc: Fenghua Yu Signed-off-by: Andrew Morton Signed-off-by: Tony Luck --- arch/ia64/hp/common/sba_iommu.c | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index f332e3fe4237..e14c492a8a93 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) spin_unlock_irqrestore(&ioc->saved_lock, flags); pide = sba_search_bitmap(ioc, dev, pages_needed, 0); - if (unlikely(pide >= (ioc->res_size << 3))) - panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", - ioc->ioc_hpa); + if (unlikely(pide >= (ioc->res_size << 3))) { + printk(KERN_WARNING "%s: I/O MMU @ %p is" + "out of mapping resources, %u %u %lx\n", + __func__, ioc->ioc_hpa, ioc->res_size, + pages_needed, dma_get_seg_boundary(dev)); + return -1; + } #else - panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", - ioc->ioc_hpa); + printk(KERN_WARNING "%s: I/O MMU @ %p is" + "out of mapping resources, %u %u %lx\n", + __func__, ioc->ioc_hpa, ioc->res_size, + pages_needed, dma_get_seg_boundary(dev)); + return -1; #endif } } @@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, #endif pide = sba_alloc_range(ioc, dev, size); + if (pide < 0) + return 0; iovp = (dma_addr_t) pide << iovp_shift; @@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, unsigned long dma_offset, dma_len; /* start/len of DMA stream */ int n_mappings = 0; unsigned int max_seg_size = dma_get_max_seg_size(dev); + int idx; while (nents > 0) { unsigned long vaddr = (unsigned long) sba_sg_address(startsg); @@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, vcontig_sg->dma_length = vcontig_len; dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; ASSERT(dma_len <= DMA_CHUNK_SIZE); - dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG - | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) - | dma_offset); + idx = sba_alloc_range(ioc, dev, dma_len); + if (idx < 0) { + dma_sg->dma_length = 0; + return -1; + } + dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift) + | dma_offset); n_mappings++; } return n_mappings; } - +static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); /** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. @@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, ** Access to the virtual address is what forces a two pass algorithm. */ coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); + if (coalesced < 0) { + sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); + return 0; + } /* ** Program the I/O Pdir -- cgit v1.2.3 From 21fc3fded7095f6018e770412b6881ced4c09d6f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 6 Nov 2009 22:42:01 +0000 Subject: [IA64] Replace old style lock initializer SPIN_LOCK_UNLOCKED is deprecated. Use __SPIN_LOCK_UNLOCKED instead. Signed-off-by: Thomas Gleixner Cc: linux-ia64@vger.kernel.org Signed-off-by: Tony Luck --- arch/ia64/include/asm/rwsem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index fbee74b15782..e8762688e8e3 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -47,7 +47,7 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ -- cgit v1.2.3 From 430677133fd5a2033222b3b5016bb461fe8fabf2 Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Mon, 14 Dec 2009 15:00:37 -0500 Subject: [IA64] implement early_io{re,un}map for ia64 drivers/pci/dmar.c uses these functions, so provide them for ia64 Signed-off-by: Tony Luck --- arch/ia64/include/asm/io.h | 2 ++ arch/ia64/mm/ioremap.c | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 0d9d16e2d949..cc8335eb3110 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr) extern void __iomem * ioremap(unsigned long offset, unsigned long size); extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap (volatile void __iomem *addr); +extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); +extern void early_iounmap (volatile void __iomem *addr, unsigned long size); /* * String version of IO memory access ops: diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 2a140627dfd6..3dccdd8eb275 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -21,6 +21,12 @@ __ioremap (unsigned long phys_addr) return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } +void __iomem * +early_ioremap (unsigned long phys_addr, unsigned long size) +{ + return __ioremap(phys_addr); +} + void __iomem * ioremap (unsigned long phys_addr, unsigned long size) { @@ -101,6 +107,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) } EXPORT_SYMBOL(ioremap_nocache); +void +early_iounmap (volatile void __iomem *addr, unsigned long size) +{ +} + void iounmap (volatile void __iomem *addr) { -- cgit v1.2.3 From 4e25b2576efda24c02e2d6b9bcb5965a3f865f33 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:23 -0800 Subject: hugetlb: add generic definition of NUMA_NO_NODE Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific headers to generic header 'linux/numa.h' for use in generic code. NUMA_NO_NODE replaces bare '-1' where it's used in this series to indicate "no node id specified". Ultimately, it can be used to replace the -1 elsewhere where it is used similarly. Signed-off-by: Lee Schermerhorn Acked-by: David Rientjes Acked-by: Mel Gorman Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/numa.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 3499ff57bf42..6a8a27cfae3e 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h @@ -22,8 +22,6 @@ #include -#define NUMA_NO_NODE -1 - extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern pg_data_t *pgdat_list[MAX_NUMNODES]; -- cgit v1.2.3 From cd7bcf32d42b15891620b3f1387a00178b54291a Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Mon, 14 Dec 2009 20:00:36 +0000 Subject: implement early_io{re,un}map for ia64 Needed for commit 2c992208 ("intel-iommu: Detect DMAR in hyperspace at probe time.) to build on IA64. Signed-off-by: Tony Luck Signed-off-by: David Woodhouse --- arch/ia64/include/asm/io.h | 2 ++ arch/ia64/mm/ioremap.c | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 0d9d16e2d949..cc8335eb3110 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr) extern void __iomem * ioremap(unsigned long offset, unsigned long size); extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap (volatile void __iomem *addr); +extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); +extern void early_iounmap (volatile void __iomem *addr, unsigned long size); /* * String version of IO memory access ops: diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 2a140627dfd6..3dccdd8eb275 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -21,6 +21,12 @@ __ioremap (unsigned long phys_addr) return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } +void __iomem * +early_ioremap (unsigned long phys_addr, unsigned long size) +{ + return __ioremap(phys_addr); +} + void __iomem * ioremap (unsigned long phys_addr, unsigned long size) { @@ -101,6 +107,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) } EXPORT_SYMBOL(ioremap_nocache); +void +early_iounmap (volatile void __iomem *addr, unsigned long size) +{ +} + void iounmap (volatile void __iomem *addr) { -- cgit v1.2.3 From 698ba7b5a3a7be772922340fade365c675b8243f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Dec 2009 16:47:37 -0800 Subject: elf: kill USE_ELF_CORE_DUMP Currently all architectures but microblaze unconditionally define USE_ELF_CORE_DUMP. The microblaze omission seems like an error to me, so let's kill this ifdef and make sure we are the same everywhere. Signed-off-by: Christoph Hellwig Acked-by: Hugh Dickins Cc: Cc: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/ia32/elfcore32.h | 2 -- arch/ia64/include/asm/elf.h | 1 - 2 files changed, 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h index 9a3abf58cea3..657725742617 100644 --- a/arch/ia64/ia32/elfcore32.h +++ b/arch/ia64/ia32/elfcore32.h @@ -11,8 +11,6 @@ #include #include -#define USE_ELF_CORE_DUMP 1 - /* Override elfcore.h */ #define _LINUX_ELFCORE_H 1 typedef unsigned int elf_greg_t; diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 86eddee029cb..e14108b19c09 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -25,7 +25,6 @@ #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_IA_64 -#define USE_ELF_CORE_DUMP #define CORE_DUMP_USE_REGSET /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are -- cgit v1.2.3 From ac2b3e67dd59b8c6ef8c199641444c6ea03535a6 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 15 Dec 2009 16:47:43 -0800 Subject: dma-mapping: fix off-by-one error in dma_capable() dma_mask is, when interpreted as address, the last valid byte, and hence comparison msut also be done using the last valid of the buffer in question. Also fix the open-coded instances in lib/swiotlb.c. Signed-off-by: Jan Beulich Cc: FUJITA Tomonori Cc: Becky Bruce Cc: "Luck, Tony" Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 8d3c79cd81e7..7d09a09cdaad 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return 0; - return addr + size <= *dev->dma_mask; + return addr + size - 1 <= *dev->dma_mask; } static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -- cgit v1.2.3 From 81f6527bd322ef1f3a58c2c438b9ded3bd8f606a Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 15 Dec 2009 16:48:30 -0800 Subject: ia64: use bitmap_find_next_zero_area Signed-off-by: Akinobu Mita Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/sn/pci/tioca_provider.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 35b2a27d2e77..efb454534e52 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) static dma_addr_t tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) { - int i, ps, ps_shift, entry, entries, mapsize, last_entry; + int ps, ps_shift, entry, entries, mapsize; u64 xio_addr, end_xio_addr; struct tioca_common *tioca_common; struct tioca_kernel *tioca_kern; @@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) map = tioca_kern->ca_pcigart_pagemap; mapsize = tioca_kern->ca_pcigart_entries; - entry = find_first_zero_bit(map, mapsize); - while (entry < mapsize) { - last_entry = find_next_bit(map, mapsize, entry); - - if (last_entry - entry >= entries) - break; - - entry = find_next_zero_bit(map, mapsize, last_entry); - } - - if (entry > mapsize) { + entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0); + if (entry >= mapsize) { kfree(ca_dmamap); goto map_return; } - for (i = 0; i < entries; i++) - set_bit(entry + i, map); + bitmap_set(map, entry, entries); bus_addr = tioca_kern->ca_pciap_base + (entry * ps); -- cgit v1.2.3 From 2c48b9c45579a9b5e3e74694eebf3d2451f3dbd3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 9 Aug 2009 00:52:35 +0400 Subject: switch alloc_file() to passing struct path ... and have the caller grab both mnt and dentry; kill leak in infiniband, while we are at it. Signed-off-by: Al Viro --- arch/ia64/kernel/perfmon.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 599b233bef75..5246285a95fb 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2200,7 +2200,7 @@ pfm_alloc_file(pfm_context_t *ctx) { struct file *file; struct inode *inode; - struct dentry *dentry; + struct path path; char name[32]; struct qstr this; @@ -2225,18 +2225,19 @@ pfm_alloc_file(pfm_context_t *ctx) /* * allocate a new dcache entry */ - dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); - if (!dentry) { + path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); + if (!path.dentry) { iput(inode); return ERR_PTR(-ENOMEM); } + path.mnt = mntget(pfmfs_mnt); - dentry->d_op = &pfmfs_dentry_operations; - d_add(dentry, inode); + path.dentry->d_op = &pfmfs_dentry_operations; + d_add(path.dentry, inode); - file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops); + file = alloc_file(&path, FMODE_READ, &pfm_file_ops); if (!file) { - dput(dentry); + path_put(&path); return ERR_PTR(-ENFILE); } -- cgit v1.2.3 From 1d9cb470a755409ce97c3376174b1e234bd20371 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:14 -0700 Subject: ACPI: processor: introduce arch_has_acpi_pdc arch dependent helper function that tells us if we should attempt to evaluate _PDC on this machine or not. The x86 implementation assumes that the CPUs in the machine must be homogeneous, and that you cannot mix CPUs of different vendors. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/include/asm/acpi.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 91df9686a0da..3b7888294648 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -132,6 +132,8 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif +static inline bool arch_has_acpi_pdc(void) { return true; } + #define acpi_unlazy_tlb(x) #ifdef CONFIG_ACPI_NUMA -- cgit v1.2.3 From 407cd87c54e76c266245e8faef8dd4a84b7254fe Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:19 -0700 Subject: ACPI: processor: unify arch_acpi_processor_init_pdc The x86 and ia64 implementations of arch_acpi_processor_init_pdc() are almost exactly the same. The only difference is in what bits they set in obj_list buffer. Combine the boilerplate memory management code, and leave the arch-specific bit twiddling in separate implementations. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/kernel/acpi-processor.c | 34 +--------------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c index dbda7bde6112..ab72d46f52c6 100644 --- a/arch/ia64/kernel/acpi-processor.c +++ b/arch/ia64/kernel/acpi-processor.c @@ -16,31 +16,7 @@ static void init_intel_pdc(struct acpi_processor *pr) { - struct acpi_object_list *obj_list; - union acpi_object *obj; - u32 *buf; - - /* allocate and initialize pdc. It will be used later. */ - obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); - if (!obj_list) { - printk(KERN_ERR "Memory allocation error\n"); - return; - } - - obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); - if (!obj) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj_list); - return; - } - - buf = kmalloc(12, GFP_KERNEL); - if (!buf) { - printk(KERN_ERR "Memory allocation error\n"); - kfree(obj); - kfree(obj_list); - return; - } + u32 *buf = (u32 *)pr->pdc->pointer->buffer.pointer; buf[0] = ACPI_PDC_REVISION_ID; buf[1] = 1; @@ -52,20 +28,12 @@ static void init_intel_pdc(struct acpi_processor *pr) */ buf[2] |= ACPI_PDC_SMP_T_SWCOORD; - obj->type = ACPI_TYPE_BUFFER; - obj->buffer.length = 12; - obj->buffer.pointer = (u8 *) buf; - obj_list->count = 1; - obj_list->pointer = obj; - pr->pdc = obj_list; - return; } /* Initialize _PDC data based on the CPU vendor */ void arch_acpi_processor_init_pdc(struct acpi_processor *pr) { - pr->pdc = NULL; init_intel_pdc(pr); return; } -- cgit v1.2.3 From 08ea48a326d8030ef5b7fb02292faf5a53c95e0a Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:24 -0700 Subject: ACPI: processor: factor out common _PDC settings Both x86 and ia64 initialize _PDC with mostly common bit settings. Factor out the common settings and leave the arch-specific ones alone. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/kernel/acpi-processor.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c index ab72d46f52c6..ebe23f58bd6e 100644 --- a/arch/ia64/kernel/acpi-processor.c +++ b/arch/ia64/kernel/acpi-processor.c @@ -18,15 +18,7 @@ static void init_intel_pdc(struct acpi_processor *pr) { u32 *buf = (u32 *)pr->pdc->pointer->buffer.pointer; - buf[0] = ACPI_PDC_REVISION_ID; - buf[1] = 1; - buf[2] = ACPI_PDC_EST_CAPABILITY_SMP; - /* - * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so - * that OSPM is capable of native ACPI throttling software - * coordination using BIOS supplied _TSD info. - */ - buf[2] |= ACPI_PDC_SMP_T_SWCOORD; + buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; return; } -- cgit v1.2.3 From 6c5807d7bc7d051fce00863ffb98d36325501eb2 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:29 -0700 Subject: ACPI: processor: finish unifying arch_acpi_processor_init_pdc() The only thing arch-specific about calling _PDC is what bits get set in the input obj_list buffer. There's no need for several levels of indirection to twiddle those bits. Additionally, since we're just messing around with a buffer, we can simplify the interface; no need to pass around the entire struct acpi_processor * just to get at the buffer. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/include/asm/acpi.h | 4 ++++ arch/ia64/kernel/acpi-processor.c | 18 ------------------ 2 files changed, 4 insertions(+), 18 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 3b7888294648..7ae58892ba8d 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -133,6 +133,10 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif static inline bool arch_has_acpi_pdc(void) { return true; } +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; +} #define acpi_unlazy_tlb(x) diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c index ebe23f58bd6e..7ba5accebf66 100644 --- a/arch/ia64/kernel/acpi-processor.c +++ b/arch/ia64/kernel/acpi-processor.c @@ -14,24 +14,6 @@ #include #include -static void init_intel_pdc(struct acpi_processor *pr) -{ - u32 *buf = (u32 *)pr->pdc->pointer->buffer.pointer; - - buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; - - return; -} - -/* Initialize _PDC data based on the CPU vendor */ -void arch_acpi_processor_init_pdc(struct acpi_processor *pr) -{ - init_intel_pdc(pr); - return; -} - -EXPORT_SYMBOL(arch_acpi_processor_init_pdc); - void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) { if (pr->pdc) { -- cgit v1.2.3 From 47817254b8637b56730aec26eed2c337d3938bb5 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:34 -0700 Subject: ACPI: processor: unify arch_acpi_processor_cleanup_pdc The x86 and ia64 implementations of the function in $subject are exactly the same. Also, since the arch-specific implementations of setting _PDC have been completely hollowed out, remove the empty shells. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/kernel/Makefile | 4 ---- arch/ia64/kernel/acpi-processor.c | 27 --------------------------- 2 files changed, 31 deletions(-) delete mode 100644 arch/ia64/kernel/acpi-processor.c (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 2a75e937ae8d..e1236349c99f 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -18,10 +18,6 @@ obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o -ifneq ($(CONFIG_ACPI_PROCESSOR),) -obj-y += acpi-processor.o -endif - obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c deleted file mode 100644 index 7ba5accebf66..000000000000 --- a/arch/ia64/kernel/acpi-processor.c +++ /dev/null @@ -1,27 +0,0 @@ -/* - * arch/ia64/kernel/acpi-processor.c - * - * Copyright (C) 2005 Intel Corporation - * Venkatesh Pallipadi - * - Added _PDC for platforms with Intel CPUs - */ - -#include -#include -#include -#include - -#include -#include - -void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) -{ - if (pr->pdc) { - kfree(pr->pdc->pointer->buffer.pointer); - kfree(pr->pdc->pointer); - kfree(pr->pdc); - pr->pdc = NULL; - } -} - -EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc); -- cgit v1.2.3 From a662b8135a1f9fee7d3f9129498cb03f3d6ce772 Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Thu, 17 Dec 2009 17:05:03 -0800 Subject: KVM: ia64: fix build breakage due to host spinlock change Len Brown pointed out that allmodconfig is broken for ia64 because of: arch/ia64/kvm/vmm.c: In function 'vmm_spin_unlock': arch/ia64/kvm/vmm.c:70: error: 'spinlock_t' has no member named 'raw_lock' KVM has it's own spinlock routines. It should not depend on the base kernel spinlock_t type (which changed when ia64 switched to ticket locks). Define its own vmm_spinlock_t type. Signed-off-by: Tony Luck Signed-off-by: Avi Kivity --- arch/ia64/kvm/vcpu.h | 9 ++++++--- arch/ia64/kvm/vmm.c | 4 ++-- arch/ia64/kvm/vtlb.c | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 360724d3ae69..988911b4cc7a 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define _vmm_raw_spin_lock(x) do {}while(0) #define _vmm_raw_spin_unlock(x) do {}while(0) #else +typedef struct { + volatile unsigned int lock; +} vmm_spinlock_t; #define _vmm_raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ @@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define _vmm_raw_spin_unlock(x) \ do { barrier(); \ - ((spinlock_t *)x)->raw_lock.lock = 0; } \ + ((vmm_spinlock_t *)x)->lock = 0; } \ while (0) #endif -void vmm_spin_lock(spinlock_t *lock); -void vmm_spin_unlock(spinlock_t *lock); +void vmm_spin_lock(vmm_spinlock_t *lock); +void vmm_spin_unlock(vmm_spinlock_t *lock); enum { I_TLB = 1, D_TLB = 2 diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index f4b4c899bb6c..7a62f75778c5 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c @@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void) return ; } -void vmm_spin_lock(spinlock_t *lock) +void vmm_spin_lock(vmm_spinlock_t *lock) { _vmm_raw_spin_lock(lock); } -void vmm_spin_unlock(spinlock_t *lock) +void vmm_spin_unlock(vmm_spinlock_t *lock) { _vmm_raw_spin_unlock(lock); } diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 20b3852f7a6e..4332f7ee5203 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) { u64 i, dirty_pages = 1; u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; - spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); + vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; -- cgit v1.2.3