summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/mem.c39
-rw-r--r--drivers/char/mspec.c421
-rw-r--r--drivers/char/rtc.c5
-rw-r--r--drivers/char/watchdog/Kconfig8
-rw-r--r--drivers/char/watchdog/shwdt.c110
8 files changed, 558 insertions, 40 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 52ea94b891f5..1b21c3a911d9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -439,6 +439,14 @@ config SGI_MBCS
If you have an SGI Altix with an attached SABrick
say Y or M here, otherwise say N.
+config MSPEC
+ tristate "Memory special operations driver"
+ depends on IA64
+ help
+ If you have an ia64 and you want to enable memory special
+ operations support (formerly known as fetchop), say Y here,
+ otherwise say N.
+
source "drivers/serial/Kconfig"
config UNIX98_PTYS
@@ -739,7 +747,7 @@ config NVRAM
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM
+ depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 8c6dfc621520..b583d0cd9fbe 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
+obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_VIOCONS) += viocons.o
obj-$(CONFIG_VIOTAPE) += viotape.o
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 8afba339f05a..58b0eb581114 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -868,8 +868,8 @@ int hpet_alloc(struct hpet_data *hdp)
do_div(temp, period);
hpetp->hp_tick_freq = temp; /* ticks per second */
- printk(KERN_INFO "hpet%d: at MMIO 0x%lx (virtual 0x%p), IRQ%s",
- hpetp->hp_which, hdp->hd_phys_address, hdp->hd_address,
+ printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
+ hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 917b20402664..4ac70ec697f0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -238,6 +238,32 @@ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
#endif
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_mem(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ if (!valid_mmap_phys_addr_range(pgoff, len))
+ return (unsigned long) -EINVAL;
+ return pgoff;
+}
+
+/* can't do an in-place private mapping if there's no MMU */
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_MAYSHARE;
+}
+#else
+#define get_unmapped_area_mem NULL
+
+static inline int private_mapping_ok(struct vm_area_struct *vma)
+{
+ return 1;
+}
+#endif
+
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{
size_t size = vma->vm_end - vma->vm_start;
@@ -245,6 +271,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
+ if (!private_mapping_ok(vma))
+ return -ENOSYS;
+
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
size,
vma->vm_page_prot);
@@ -782,6 +811,7 @@ static const struct file_operations mem_fops = {
.write = write_mem,
.mmap = mmap_mem,
.open = open_mem,
+ .get_unmapped_area = get_unmapped_area_mem,
};
static const struct file_operations kmem_fops = {
@@ -790,6 +820,7 @@ static const struct file_operations kmem_fops = {
.write = write_kmem,
.mmap = mmap_kmem,
.open = open_kmem,
+ .get_unmapped_area = get_unmapped_area_mem,
};
static const struct file_operations null_fops = {
@@ -815,6 +846,10 @@ static const struct file_operations zero_fops = {
.mmap = mmap_zero,
};
+/*
+ * capabilities for /dev/zero
+ * - permits private mappings, "copies" are taken of the source of zeros
+ */
static struct backing_dev_info zero_bdi = {
.capabilities = BDI_CAP_MAP_COPY,
};
@@ -862,9 +897,13 @@ static int memory_open(struct inode * inode, struct file * filp)
switch (iminor(inode)) {
case 1:
filp->f_op = &mem_fops;
+ filp->f_mapping->backing_dev_info =
+ &directly_mappable_cdev_bdi;
break;
case 2:
filp->f_op = &kmem_fops;
+ filp->f_mapping->backing_dev_info =
+ &directly_mappable_cdev_bdi;
break;
case 3:
filp->f_op = &null_fops;
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
new file mode 100644
index 000000000000..5426b1e5595f
--- /dev/null
+++ b/drivers/char/mspec.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/*
+ * SN Platform Special Memory (mspec) Support
+ *
+ * This driver exports the SN special memory (mspec) facility to user
+ * processes.
+ * There are three types of memory made available thru this driver:
+ * fetchops, uncached and cached.
+ *
+ * Fetchops are atomic memory operations that are implemented in the
+ * memory controller on SGI SN hardware.
+ *
+ * Uncached are used for memory write combining feature of the ia64
+ * cpu.
+ *
+ * Cached are used for areas of memory that are used as cached addresses
+ * on our partition and used as uncached addresses from other partitions.
+ * Due to a design constraint of the SN2 Shub, you can not have processors
+ * on the same FSB perform both a cached and uncached reference to the
+ * same cache line. These special memory cached regions prevent the
+ * kernel from ever dropping in a TLB entry and therefore prevent the
+ * processor from ever speculating a cache line from this page.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/numa.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+#include <asm/tlbflush.h>
+#include <asm/uncached.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/io.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/shubio.h>
+
+
+#define FETCHOP_ID "SGI Fetchop,"
+#define CACHED_ID "Cached,"
+#define UNCACHED_ID "Uncached"
+#define REVISION "4.0"
+#define MSPEC_BASENAME "mspec"
+
+/*
+ * Page types allocated by the device.
+ */
+enum {
+ MSPEC_FETCHOP = 1,
+ MSPEC_CACHED,
+ MSPEC_UNCACHED
+};
+
+static int is_sn2;
+
+/*
+ * One of these structures is allocated when an mspec region is mmaped. The
+ * structure is pointed to by the vma->vm_private_data field in the vma struct.
+ * This structure is used to record the addresses of the mspec pages.
+ */
+struct vma_data {
+ atomic_t refcnt; /* Number of vmas sharing the data. */
+ spinlock_t lock; /* Serialize access to the vma. */
+ int count; /* Number of pages allocated. */
+ int type; /* Type of pages allocated. */
+ unsigned long maddr[0]; /* Array of MSPEC addresses. */
+};
+
+/* used on shub2 to clear FOP cache in the HUB */
+static unsigned long scratch_page[MAX_NUMNODES];
+#define SH2_AMO_CACHE_ENTRIES 4
+
+static inline int
+mspec_zero_block(unsigned long addr, int len)
+{
+ int status;
+
+ if (is_sn2) {
+ if (is_shub2()) {
+ int nid;
+ void *p;
+ int i;
+
+ nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
+ p = (void *)TO_AMO(scratch_page[nid]);
+
+ for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
+ FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
+ p += FETCHOP_VAR_SIZE;
+ }
+ }
+
+ status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
+ BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
+ } else {
+ memset((char *) addr, 0, len);
+ status = 0;
+ }
+ return status;
+}
+
+/*
+ * mspec_open
+ *
+ * Called when a device mapping is created by a means other than mmap
+ * (via fork, etc.). Increments the reference count on the underlying
+ * mspec data so it is not freed prematurely.
+ */
+static void
+mspec_open(struct vm_area_struct *vma)
+{
+ struct vma_data *vdata;
+
+ vdata = vma->vm_private_data;
+ atomic_inc(&vdata->refcnt);
+}
+
+/*
+ * mspec_close
+ *
+ * Called when unmapping a device mapping. Frees all mspec pages
+ * belonging to the vma.
+ */
+static void
+mspec_close(struct vm_area_struct *vma)
+{
+ struct vma_data *vdata;
+ int i, pages, result, vdata_size;
+
+ vdata = vma->vm_private_data;
+ if (!atomic_dec_and_test(&vdata->refcnt))
+ return;
+
+ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
+ for (i = 0; i < pages; i++) {
+ if (vdata->maddr[i] == 0)
+ continue;
+ /*
+ * Clear the page before sticking it back
+ * into the pool.
+ */
+ result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
+ if (!result)
+ uncached_free_page(vdata->maddr[i]);
+ else
+ printk(KERN_WARNING "mspec_close(): "
+ "failed to zero page %i\n",
+ result);
+ }
+
+ if (vdata_size <= PAGE_SIZE)
+ kfree(vdata);
+ else
+ vfree(vdata);
+}
+
+
+/*
+ * mspec_nopfn
+ *
+ * Creates a mspec page and maps it to user space.
+ */
+static unsigned long
+mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
+{
+ unsigned long paddr, maddr;
+ unsigned long pfn;
+ int index;
+ struct vma_data *vdata = vma->vm_private_data;
+
+ index = (address - vma->vm_start) >> PAGE_SHIFT;
+ maddr = (volatile unsigned long) vdata->maddr[index];
+ if (maddr == 0) {
+ maddr = uncached_alloc_page(numa_node_id());
+ if (maddr == 0)
+ return NOPFN_OOM;
+
+ spin_lock(&vdata->lock);
+ if (vdata->maddr[index] == 0) {
+ vdata->count++;
+ vdata->maddr[index] = maddr;
+ } else {
+ uncached_free_page(maddr);
+ maddr = vdata->maddr[index];
+ }
+ spin_unlock(&vdata->lock);
+ }
+
+ if (vdata->type == MSPEC_FETCHOP)
+ paddr = TO_AMO(maddr);
+ else
+ paddr = __pa(TO_CAC(maddr));
+
+ pfn = paddr >> PAGE_SHIFT;
+
+ return pfn;
+}
+
+static struct vm_operations_struct mspec_vm_ops = {
+ .open = mspec_open,
+ .close = mspec_close,
+ .nopfn = mspec_nopfn
+};
+
+/*
+ * mspec_mmap
+ *
+ * Called when mmaping the device. Initializes the vma with a fault handler
+ * and private data structure necessary to allocate, track, and free the
+ * underlying pages.
+ */
+static int
+mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
+{
+ struct vma_data *vdata;
+ int pages, vdata_size;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_WRITE) == 0)
+ return -EPERM;
+
+ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
+ if (vdata_size <= PAGE_SIZE)
+ vdata = kmalloc(vdata_size, GFP_KERNEL);
+ else
+ vdata = vmalloc(vdata_size);
+ if (!vdata)
+ return -ENOMEM;
+ memset(vdata, 0, vdata_size);
+
+ vdata->type = type;
+ spin_lock_init(&vdata->lock);
+ vdata->refcnt = ATOMIC_INIT(1);
+ vma->vm_private_data = vdata;
+
+ vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP);
+ if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_ops = &mspec_vm_ops;
+
+ return 0;
+}
+
+static int
+fetchop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_FETCHOP);
+}
+
+static int
+cached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_CACHED);
+}
+
+static int
+uncached_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return mspec_mmap(file, vma, MSPEC_UNCACHED);
+}
+
+static struct file_operations fetchop_fops = {
+ .owner = THIS_MODULE,
+ .mmap = fetchop_mmap
+};
+
+static struct miscdevice fetchop_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sgi_fetchop",
+ .fops = &fetchop_fops
+};
+
+static struct file_operations cached_fops = {
+ .owner = THIS_MODULE,
+ .mmap = cached_mmap
+};
+
+static struct miscdevice cached_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mspec_cached",
+ .fops = &cached_fops
+};
+
+static struct file_operations uncached_fops = {
+ .owner = THIS_MODULE,
+ .mmap = uncached_mmap
+};
+
+static struct miscdevice uncached_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mspec_uncached",
+ .fops = &uncached_fops
+};
+
+/*
+ * mspec_init
+ *
+ * Called at boot time to initialize the mspec facility.
+ */
+static int __init
+mspec_init(void)
+{
+ int ret;
+ int nid;
+
+ /*
+ * The fetchop device only works on SN2 hardware, uncached and cached
+ * memory drivers should both be valid on all ia64 hardware
+ */
+ if (ia64_platform_is("sn2")) {
+ is_sn2 = 1;
+ if (is_shub2()) {
+ ret = -ENOMEM;
+ for_each_online_node(nid) {
+ int actual_nid;
+ int nasid;
+ unsigned long phys;
+
+ scratch_page[nid] = uncached_alloc_page(nid);
+ if (scratch_page[nid] == 0)
+ goto free_scratch_pages;
+ phys = __pa(scratch_page[nid]);
+ nasid = get_node_number(phys);
+ actual_nid = nasid_to_cnodeid(nasid);
+ if (actual_nid != nid)
+ goto free_scratch_pages;
+ }
+ }
+
+ ret = misc_register(&fetchop_miscdev);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: failed to register device %i\n",
+ FETCHOP_ID, ret);
+ goto free_scratch_pages;
+ }
+ }
+ ret = misc_register(&cached_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register device %i\n",
+ CACHED_ID, ret);
+ if (is_sn2)
+ misc_deregister(&fetchop_miscdev);
+ goto free_scratch_pages;
+ }
+ ret = misc_register(&uncached_miscdev);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to register device %i\n",
+ UNCACHED_ID, ret);
+ misc_deregister(&cached_miscdev);
+ if (is_sn2)
+ misc_deregister(&fetchop_miscdev);
+ goto free_scratch_pages;
+ }
+
+ printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
+ MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
+ CACHED_ID, UNCACHED_ID);
+
+ return 0;
+
+ free_scratch_pages:
+ for_each_node(nid) {
+ if (scratch_page[nid] != 0)
+ uncached_free_page(scratch_page[nid]);
+ }
+ return ret;
+}
+
+static void __exit
+mspec_exit(void)
+{
+ int nid;
+
+ misc_deregister(&uncached_miscdev);
+ misc_deregister(&cached_miscdev);
+ if (is_sn2) {
+ misc_deregister(&fetchop_miscdev);
+
+ for_each_node(nid) {
+ if (scratch_page[nid] != 0)
+ uncached_free_page(scratch_page[nid]);
+ }
+ }
+}
+
+module_init(mspec_init);
+module_exit(mspec_exit);
+
+MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
+MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 6e6a7c7a7eff..ab6429b4a84e 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -209,11 +209,12 @@ static const unsigned char days_in_mo[] =
*/
static inline unsigned char rtc_is_updating(void)
{
+ unsigned long flags;
unsigned char uip;
- spin_lock_irq(&rtc_lock);
+ spin_lock_irqsave(&rtc_lock, flags);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
- spin_unlock_irq(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
return uip;
}
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index fff89c2d88fd..f114d7b5bb2a 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -510,6 +510,14 @@ config SH_WDT
To compile this driver as a module, choose M here: the
module will be called shwdt.
+config SH_WDT_MMAP
+ bool "Allow mmap of SH WDT"
+ default n
+ depends on SH_WDT
+ help
+ If you say Y here, user applications will be able to mmap the
+ WDT/CPG registers.
+#
# SPARC64 Architecture
config WATCHDOG_CP1XXX
diff --git a/drivers/char/watchdog/shwdt.c b/drivers/char/watchdog/shwdt.c
index 1355038f1044..e5b8c64f1d65 100644
--- a/drivers/char/watchdog/shwdt.c
+++ b/drivers/char/watchdog/shwdt.c
@@ -27,7 +27,7 @@
#include <linux/notifier.h>
#include <linux/ioport.h>
#include <linux/fs.h>
-
+#include <linux/mm.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/watchdog.h>
@@ -125,7 +125,6 @@ static void sh_wdt_start(void)
/**
* sh_wdt_stop - Stop the Watchdog
- *
* Stops the watchdog.
*/
static void sh_wdt_stop(void)
@@ -141,22 +140,20 @@ static void sh_wdt_stop(void)
/**
* sh_wdt_keepalive - Keep the Userspace Watchdog Alive
- *
* The Userspace watchdog got a KeepAlive: schedule the next heartbeat.
*/
-static void sh_wdt_keepalive(void)
+static inline void sh_wdt_keepalive(void)
{
next_heartbeat = jiffies + (heartbeat * HZ);
}
/**
* sh_wdt_set_heartbeat - Set the Userspace Watchdog heartbeat
- *
* Set the Userspace Watchdog heartbeat
*/
static int sh_wdt_set_heartbeat(int t)
{
- if ((t < 1) || (t > 3600)) /* arbitrary upper limit */
+ if (unlikely((t < 1) || (t > 3600))) /* arbitrary upper limit */
return -EINVAL;
heartbeat = t;
@@ -165,7 +162,6 @@ static int sh_wdt_set_heartbeat(int t)
/**
* sh_wdt_ping - Ping the Watchdog
- *
* @data: Unused
*
* Clears overflow bit, resets timer counter.
@@ -182,14 +178,13 @@ static void sh_wdt_ping(unsigned long data)
sh_wdt_write_cnt(0);
mod_timer(&timer, next_ping_period(clock_division_ratio));
- } else {
- printk(KERN_WARNING PFX "Heartbeat lost! Will not ping the watchdog\n");
- }
+ } else
+ printk(KERN_WARNING PFX "Heartbeat lost! Will not ping "
+ "the watchdog\n");
}
/**
* sh_wdt_open - Open the Device
- *
* @inode: inode of device
* @file: file handle of device
*
@@ -209,7 +204,6 @@ static int sh_wdt_open(struct inode *inode, struct file *file)
/**
* sh_wdt_close - Close the Device
- *
* @inode: inode of device
* @file: file handle of device
*
@@ -220,7 +214,8 @@ static int sh_wdt_close(struct inode *inode, struct file *file)
if (shwdt_expect_close == 42) {
sh_wdt_stop();
} else {
- printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
+ printk(KERN_CRIT PFX "Unexpected close, not "
+ "stopping watchdog!\n");
sh_wdt_keepalive();
}
@@ -232,7 +227,6 @@ static int sh_wdt_close(struct inode *inode, struct file *file)
/**
* sh_wdt_write - Write to Device
- *
* @file: file handle of device
* @buf: buffer to write
* @count: length of buffer
@@ -264,8 +258,56 @@ static ssize_t sh_wdt_write(struct file *file, const char *buf,
}
/**
- * sh_wdt_ioctl - Query Device
+ * sh_wdt_mmap - map WDT/CPG registers into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ *
+ * A simple mmap() implementation for the corner cases where the counter
+ * needs to be mapped in userspace directly. Due to the relatively small
+ * size of the area, neighbouring registers not necessarily tied to the
+ * CPG will also be accessible through the register page, so this remains
+ * configurable for users that really know what they're doing.
*
+ * Additionaly, the register page maps in the CPG register base relative
+ * to the nearest page-aligned boundary, which requires that userspace do
+ * the appropriate CPU subtype math for calculating the page offset for
+ * the counter value.
+ */
+static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret = -ENOSYS;
+
+#ifdef CONFIG_SH_WDT_MMAP
+ unsigned long addr;
+
+ /* Only support the simple cases where we map in a register page. */
+ if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
+ return -EINVAL;
+
+ /*
+ * Pick WTCNT as the start, it's usually the first register after the
+ * FRQCR, and neither one are generally page-aligned out of the box.
+ */
+ addr = WTCNT & ~(PAGE_SIZE - 1);
+
+ vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n",
+ __FUNCTION__);
+ return -EAGAIN;
+ }
+
+ ret = 0;
+#endif
+
+ return ret;
+}
+
+/**
+ * sh_wdt_ioctl - Query Device
* @inode: inode of device
* @file: file handle of device
* @cmd: watchdog command
@@ -326,7 +368,6 @@ static int sh_wdt_ioctl(struct inode *inode, struct file *file,
/**
* sh_wdt_notify_sys - Notifier Handler
- *
* @this: notifier block
* @code: notifier event
* @unused: unused
@@ -337,9 +378,8 @@ static int sh_wdt_ioctl(struct inode *inode, struct file *file,
static int sh_wdt_notify_sys(struct notifier_block *this,
unsigned long code, void *unused)
{
- if (code == SYS_DOWN || code == SYS_HALT) {
+ if (code == SYS_DOWN || code == SYS_HALT)
sh_wdt_stop();
- }
return NOTIFY_DONE;
}
@@ -351,10 +391,12 @@ static const struct file_operations sh_wdt_fops = {
.ioctl = sh_wdt_ioctl,
.open = sh_wdt_open,
.release = sh_wdt_close,
+ .mmap = sh_wdt_mmap,
};
static struct watchdog_info sh_wdt_info = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "SH WDT",
};
@@ -371,7 +413,6 @@ static struct miscdevice sh_wdt_miscdev = {
/**
* sh_wdt_init - Initialize module
- *
* Registers the device and notifier handler. Actual device
* initialization is handled by sh_wdt_open().
*/
@@ -381,15 +422,15 @@ static int __init sh_wdt_init(void)
if ((clock_division_ratio < 0x5) || (clock_division_ratio > 0x7)) {
clock_division_ratio = WTCSR_CKS_4096;
- printk(KERN_INFO PFX "clock_division_ratio value must be 0x5<=x<=0x7, using %d\n",
- clock_division_ratio);
+ printk(KERN_INFO PFX "clock_division_ratio value must "
+ "be 0x5<=x<=0x7, using %d\n", clock_division_ratio);
}
- if (sh_wdt_set_heartbeat(heartbeat))
- {
+ rc = sh_wdt_set_heartbeat(heartbeat);
+ if (unlikely(rc)) {
heartbeat = WATCHDOG_HEARTBEAT;
- printk(KERN_INFO PFX "heartbeat value must be 1<=x<=3600, using %d\n",
- heartbeat);
+ printk(KERN_INFO PFX "heartbeat value must "
+ "be 1<=x<=3600, using %d\n", heartbeat);
}
init_timer(&timer);
@@ -397,15 +438,16 @@ static int __init sh_wdt_init(void)
timer.data = 0;
rc = register_reboot_notifier(&sh_wdt_notifier);
- if (rc) {
- printk(KERN_ERR PFX "Can't register reboot notifier (err=%d)\n", rc);
+ if (unlikely(rc)) {
+ printk(KERN_ERR PFX "Can't register reboot notifier (err=%d)\n",
+ rc);
return rc;
}
rc = misc_register(&sh_wdt_miscdev);
- if (rc) {
- printk(KERN_ERR PFX "Can't register miscdev on minor=%d (err=%d)\n",
- sh_wdt_miscdev.minor, rc);
+ if (unlikely(rc)) {
+ printk(KERN_ERR PFX "Can't register miscdev on "
+ "minor=%d (err=%d)\n", sh_wdt_miscdev.minor, rc);
unregister_reboot_notifier(&sh_wdt_notifier);
return rc;
}
@@ -418,7 +460,6 @@ static int __init sh_wdt_init(void)
/**
* sh_wdt_exit - Deinitialize module
- *
* Unregisters the device and notifier handler. Actual device
* deinitialization is handled by sh_wdt_close().
*/
@@ -434,14 +475,13 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
-MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). Defaults to 0x7.");
+MODULE_PARM_DESC(clock_division_ratio, "Clock division ratio. Valid ranges are from 0x5 (1.31ms) to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")");
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<=heartbeat<=3600, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
module_param(nowayout, int, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
module_init(sh_wdt_init);
module_exit(sh_wdt_exit);
-