summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-31 11:25:51 +1100
committerPaul Mackerras <paulus@samba.org>2008-01-31 11:25:51 +1100
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /drivers/char
parent4eece4ccf997c0e6d8fdad3d842e37b16b8d705f (diff)
parent5bdeae46be6dfe9efa44a548bd622af325f4bdb4 (diff)
downloadlinux-bd45ac0c5daae35e7c71138172e63df5cf644cf6.tar.gz
linux-bd45ac0c5daae35e7c71138172e63df5cf644cf6.tar.bz2
linux-bd45ac0c5daae35e7c71138172e63df5cf644cf6.zip
Merge branch 'linux-2.6'
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/backend.c3
-rw-r--r--drivers/char/agp/generic.c3
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c22
-rw-r--r--drivers/char/drm/drm_pciids.h1
-rw-r--r--drivers/char/hpet.c126
-rw-r--r--drivers/char/hvc_console.c80
-rw-r--r--drivers/char/hvcs.c78
-rw-r--r--drivers/char/hw_random/amd-rng.c12
-rw-r--r--drivers/char/hw_random/core.c24
-rw-r--r--drivers/char/hw_random/geode-rng.c12
-rw-r--r--drivers/char/hw_random/intel-rng.c15
-rw-r--r--drivers/char/hw_random/omap-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c16
-rw-r--r--drivers/char/hw_random/via-rng.c19
-rw-r--r--drivers/char/nozomi.c1993
-rw-r--r--drivers/char/rtc.c253
20 files changed, 2335 insertions, 350 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2e3a0d4bc4c2..466629594776 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -373,6 +373,16 @@ config ISTALLION
To compile this driver as a module, choose M here: the
module will be called istallion.
+config NOZOMI
+ tristate "HSDPA Broadband Wireless Data Card - Globe Trotter"
+ depends on PCI && EXPERIMENTAL
+ help
+ If you have a HSDPA driver Broadband Wireless Data Card -
+ Globe Trotter PCMCIA card, say Y here.
+
+ To compile this driver as a module, choose M here, the module
+ will be called nozomi.
+
config A2232
tristate "Commodore A2232 serial support (EXPERIMENTAL)"
depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 07304d50e0cb..96fc01eddefe 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SERIAL167) += serial167.o
obj-$(CONFIG_CYCLADES) += cyclades.o
obj-$(CONFIG_STALLION) += stallion.o
obj-$(CONFIG_ISTALLION) += istallion.o
+obj-$(CONFIG_NOZOMI) += nozomi.o
obj-$(CONFIG_DIGIEPCA) += epca.o
obj-$(CONFIG_SPECIALIX) += specialix.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index aa5ddb716ffb..1ffb381130c3 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
void *addr = agp_generic_alloc_page(agp_bridge);
u32 temp;
- global_flush_tlb();
if (!addr)
return NULL;
@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
if (flags & AGP_PAGE_DESTROY_UNMAP) {
global_cache_flush(); /* is this really needed? --hch */
agp_generic_destroy_page(addr, flags);
- global_flush_tlb();
} else
agp_generic_destroy_page(addr, flags);
}
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 832ded20fe70..2720882e66fe 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
return -ENOMEM;
}
- flush_agp_mappings();
bridge->scratch_page_real = virt_to_gart(addr);
bridge->scratch_page =
@@ -191,7 +190,6 @@ err_out:
if (bridge->driver->needs_scratch_page) {
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
AGP_PAGE_DESTROY_UNMAP);
- flush_agp_mappings();
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
AGP_PAGE_DESTROY_FREE);
}
@@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
bridge->driver->needs_scratch_page) {
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
AGP_PAGE_DESTROY_UNMAP);
- flush_agp_mappings();
bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real),
AGP_PAGE_DESTROY_FREE);
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 64b2f6d7059d..1a4674ce0c71 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr)
for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP);
}
- flush_agp_mappings();
for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE);
}
@@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
}
new->bridge = bridge;
- flush_agp_mappings();
-
return new;
}
EXPORT_SYMBOL(agp_allocate_memory);
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index e72a83e2bad5..76f581c85a7d 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
page = agp_generic_alloc_page(agp_bridge);
- global_flush_tlb();
} else
/* Returning NULL would cause problems */
/* AK: really dubious code. */
@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
{
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
agp_generic_destroy_page(page, flags);
- global_flush_tlb();
}
}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index d87961993ccf..189efb6ef970 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -10,6 +10,8 @@
#include <linux/agp_backend.h>
#include "agp.h"
+#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
+#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
@@ -208,13 +210,11 @@ static void *i8xx_alloc_pages(void)
if (page == NULL)
return NULL;
- if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
- change_page_attr(page, 4, PAGE_KERNEL);
- global_flush_tlb();
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
__free_pages(page, 2);
return NULL;
}
- global_flush_tlb();
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page_address(page);
@@ -228,8 +228,7 @@ static void i8xx_destroy_pages(void *addr)
return;
page = virt_to_page(addr);
- change_page_attr(page, 4, PAGE_KERNEL);
- global_flush_tlb();
+ set_pages_wb(page, 4);
put_page(page);
__free_pages(page, 2);
atomic_dec(&agp_bridge->current_memory_agp);
@@ -339,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
switch (pg_count) {
case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
- global_flush_tlb();
break;
case 4:
/* kludge to get 4 physical pages for ARGB cursor */
@@ -402,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
else {
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
AGP_PAGE_DESTROY_UNMAP);
- global_flush_tlb();
agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
AGP_PAGE_DESTROY_FREE);
}
@@ -526,7 +523,8 @@ static void intel_i830_init_gtt_entries(void)
break;
case I915_GMCH_GMS_STOLEN_48M:
/* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
@@ -538,7 +536,8 @@ static void intel_i830_init_gtt_entries(void)
break;
case I915_GMCH_GMS_STOLEN_64M:
/* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
@@ -1854,6 +1853,8 @@ static const struct intel_driver_description {
{ PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865",
&intel_845_driver, &intel_830_driver },
{ PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)",
+ NULL, &intel_915_driver },
{ PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G",
NULL, &intel_915_driver },
{ PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM",
@@ -2059,6 +2060,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82875_HB),
ID(PCI_DEVICE_ID_INTEL_7505_0),
ID(PCI_DEVICE_ID_INTEL_7205_0),
+ ID(PCI_DEVICE_ID_INTEL_E7221_HB),
ID(PCI_DEVICE_ID_INTEL_82915G_HB),
ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index f3593974496c..43d3c42df360 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -297,6 +297,7 @@
{0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4c16778e3f84..465ad35ed38f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -600,63 +600,6 @@ static int hpet_is_known(struct hpet_data *hdp)
return 0;
}
-EXPORT_SYMBOL(hpet_alloc);
-EXPORT_SYMBOL(hpet_register);
-EXPORT_SYMBOL(hpet_unregister);
-EXPORT_SYMBOL(hpet_control);
-
-int hpet_register(struct hpet_task *tp, int periodic)
-{
- unsigned int i;
- u64 mask;
- struct hpet_timer __iomem *timer;
- struct hpet_dev *devp;
- struct hpets *hpetp;
-
- switch (periodic) {
- case 1:
- mask = Tn_PER_INT_CAP_MASK;
- break;
- case 0:
- mask = 0;
- break;
- default:
- return -EINVAL;
- }
-
- tp->ht_opaque = NULL;
-
- spin_lock_irq(&hpet_task_lock);
- spin_lock(&hpet_lock);
-
- for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
- for (timer = hpetp->hp_hpet->hpet_timers, i = 0;
- i < hpetp->hp_ntimer; i++, timer++) {
- if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK)
- != mask)
- continue;
-
- devp = &hpetp->hp_dev[i];
-
- if (devp->hd_flags & HPET_OPEN || devp->hd_task) {
- devp = NULL;
- continue;
- }
-
- tp->ht_opaque = devp;
- devp->hd_task = tp;
- break;
- }
-
- spin_unlock(&hpet_lock);
- spin_unlock_irq(&hpet_task_lock);
-
- if (tp->ht_opaque)
- return 0;
- else
- return -EBUSY;
-}
-
static inline int hpet_tpcheck(struct hpet_task *tp)
{
struct hpet_dev *devp;
@@ -706,24 +649,6 @@ int hpet_unregister(struct hpet_task *tp)
return 0;
}
-int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg)
-{
- struct hpet_dev *devp;
- int err;
-
- if ((err = hpet_tpcheck(tp)))
- return err;
-
- spin_lock_irq(&hpet_lock);
- devp = tp->ht_opaque;
- if (devp->hd_task != tp) {
- spin_unlock_irq(&hpet_lock);
- return -ENXIO;
- }
- spin_unlock_irq(&hpet_lock);
- return hpet_ioctl_common(devp, cmd, arg, 1);
-}
-
static ctl_table hpet_table[] = {
{
.ctl_name = CTL_UNNUMBERED,
@@ -806,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp)
int hpet_alloc(struct hpet_data *hdp)
{
- u64 cap, mcfg;
+ u64 cap, mcfg, hpet_config;
struct hpet_dev *devp;
- u32 i, ntimer;
+ u32 i, ntimer, irq;
struct hpets *hpetp;
size_t siz;
struct hpet __iomem *hpet;
static struct hpets *last = NULL;
- unsigned long period;
+ unsigned long period, irq_bitmap;
unsigned long long temp;
/*
@@ -840,11 +765,47 @@ int hpet_alloc(struct hpet_data *hdp)
hpetp->hp_hpet_phys = hdp->hd_phys_address;
hpetp->hp_ntimer = hdp->hd_nirqs;
+ hpet = hpetp->hp_hpet;
- for (i = 0; i < hdp->hd_nirqs; i++)
- hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
+ /* Assign IRQs statically for legacy devices */
+ hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0];
+ hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1];
- hpet = hpetp->hp_hpet;
+ /* Assign IRQs dynamically for the others */
+ for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) {
+ struct hpet_timer __iomem *timer;
+
+ timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
+
+ /* Check if there's already an IRQ assigned to the timer */
+ if (hdp->hd_irq[i]) {
+ hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
+ continue;
+ }
+
+ hpet_config = readq(&timer->hpet_config);
+ irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK)
+ >> Tn_INT_ROUTE_CAP_SHIFT;
+ if (!irq_bitmap)
+ irq = 0; /* No valid IRQ Assignable */
+ else {
+ irq = find_first_bit(&irq_bitmap, 32);
+ do {
+ hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT;
+ writeq(hpet_config, &timer->hpet_config);
+
+ /*
+ * Verify whether we have written a valid
+ * IRQ number by reading it back again
+ */
+ hpet_config = readq(&timer->hpet_config);
+ if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK)
+ >> Tn_INT_ROUTE_CNF_SHIFT)
+ break; /* Success */
+ } while ((irq = (find_next_bit(&irq_bitmap, 32, irq))));
+ }
+ hpetp->hp_dev[i].hd_hdwirq = irq;
+ }
cap = readq(&hpet->hpet_cap);
@@ -875,7 +836,8 @@ int hpet_alloc(struct hpet_data *hdp)
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
- printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
+ printk("%s %d", i > 0 ? "," : "",
+ hpetp->hp_dev[i].hd_hdwirq);
printk("\n");
printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n",
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 8252f8668538..480fae29c9b2 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -27,7 +27,7 @@
#include <linux/init.h>
#include <linux/kbd_kern.h>
#include <linux/kernel.h>
-#include <linux/kobject.h>
+#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -89,7 +89,7 @@ struct hvc_struct {
int irq_requested;
int irq;
struct list_head next;
- struct kobject kobj; /* ref count & hvc_struct lifetime */
+ struct kref kref; /* ref count & hvc_struct lifetime */
};
/* dynamic list of hvc_struct instances */
@@ -110,7 +110,7 @@ static int last_hvc = -1;
/*
* Do not call this function with either the hvc_structs_lock or the hvc_struct
- * lock held. If successful, this function increments the kobject reference
+ * lock held. If successful, this function increments the kref reference
* count against the target hvc_struct so it should be released when finished.
*/
static struct hvc_struct *hvc_get_by_index(int index)
@@ -123,7 +123,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
list_for_each_entry(hp, &hvc_structs, next) {
spin_lock_irqsave(&hp->lock, flags);
if (hp->index == index) {
- kobject_get(&hp->kobj);
+ kref_get(&hp->kref);
spin_unlock_irqrestore(&hp->lock, flags);
spin_unlock(&hvc_structs_lock);
return hp;
@@ -242,6 +242,23 @@ static int __init hvc_console_init(void)
}
console_initcall(hvc_console_init);
+/* callback when the kboject ref count reaches zero. */
+static void destroy_hvc_struct(struct kref *kref)
+{
+ struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref);
+ unsigned long flags;
+
+ spin_lock(&hvc_structs_lock);
+
+ spin_lock_irqsave(&hp->lock, flags);
+ list_del(&(hp->next));
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ spin_unlock(&hvc_structs_lock);
+
+ kfree(hp);
+}
+
/*
* hvc_instantiate() is an early console discovery method which locates
* consoles * prior to the vio subsystem discovering them. Hotplugged
@@ -261,7 +278,7 @@ int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
/* make sure no no tty has been registered in this index */
hp = hvc_get_by_index(index);
if (hp) {
- kobject_put(&hp->kobj);
+ kref_put(&hp->kref, destroy_hvc_struct);
return -1;
}
@@ -318,9 +335,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
unsigned long flags;
int irq = 0;
int rc = 0;
- struct kobject *kobjp;
- /* Auto increments kobject reference if found. */
+ /* Auto increments kref reference if found. */
if (!(hp = hvc_get_by_index(tty->index)))
return -ENODEV;
@@ -341,8 +357,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
if (irq)
hp->irq_requested = 1;
- kobjp = &hp->kobj;
-
spin_unlock_irqrestore(&hp->lock, flags);
/* check error, fallback to non-irq */
if (irq)
@@ -352,7 +366,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
* If the request_irq() fails and we return an error. The tty layer
* will call hvc_close() after a failed open but we don't want to clean
* up there so we'll clean up here and clear out the previously set
- * tty fields and return the kobject reference.
+ * tty fields and return the kref reference.
*/
if (rc) {
spin_lock_irqsave(&hp->lock, flags);
@@ -360,7 +374,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
hp->irq_requested = 0;
spin_unlock_irqrestore(&hp->lock, flags);
tty->driver_data = NULL;
- kobject_put(kobjp);
+ kref_put(&hp->kref, destroy_hvc_struct);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
}
/* Force wakeup of the polling thread */
@@ -372,7 +386,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
static void hvc_close(struct tty_struct *tty, struct file * filp)
{
struct hvc_struct *hp;
- struct kobject *kobjp;
int irq = 0;
unsigned long flags;
@@ -382,7 +395,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
/*
* No driver_data means that this close was issued after a failed
* hvc_open by the tty layer's release_dev() function and we can just
- * exit cleanly because the kobject reference wasn't made.
+ * exit cleanly because the kref reference wasn't made.
*/
if (!tty->driver_data)
return;
@@ -390,7 +403,6 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
hp = tty->driver_data;
spin_lock_irqsave(&hp->lock, flags);
- kobjp = &hp->kobj;
if (--hp->count == 0) {
if (hp->irq_requested)
irq = hp->irq;
@@ -417,7 +429,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
spin_unlock_irqrestore(&hp->lock, flags);
}
- kobject_put(kobjp);
+ kref_put(&hp->kref, destroy_hvc_struct);
}
static void hvc_hangup(struct tty_struct *tty)
@@ -426,7 +438,6 @@ static void hvc_hangup(struct tty_struct *tty)
unsigned long flags;
int irq = 0;
int temp_open_count;
- struct kobject *kobjp;
if (!hp)
return;
@@ -443,7 +454,6 @@ static void hvc_hangup(struct tty_struct *tty)
return;
}
- kobjp = &hp->kobj;
temp_open_count = hp->count;
hp->count = 0;
hp->n_outbuf = 0;
@@ -457,7 +467,7 @@ static void hvc_hangup(struct tty_struct *tty)
free_irq(irq, hp);
while(temp_open_count) {
--temp_open_count;
- kobject_put(kobjp);
+ kref_put(&hp->kref, destroy_hvc_struct);
}
}
@@ -729,27 +739,6 @@ static const struct tty_operations hvc_ops = {
.chars_in_buffer = hvc_chars_in_buffer,
};
-/* callback when the kboject ref count reaches zero. */
-static void destroy_hvc_struct(struct kobject *kobj)
-{
- struct hvc_struct *hp = container_of(kobj, struct hvc_struct, kobj);
- unsigned long flags;
-
- spin_lock(&hvc_structs_lock);
-
- spin_lock_irqsave(&hp->lock, flags);
- list_del(&(hp->next));
- spin_unlock_irqrestore(&hp->lock, flags);
-
- spin_unlock(&hvc_structs_lock);
-
- kfree(hp);
-}
-
-static struct kobj_type hvc_kobj_type = {
- .release = destroy_hvc_struct,
-};
-
struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
struct hv_ops *ops, int outbuf_size)
{
@@ -776,8 +765,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
hp->outbuf_size = outbuf_size;
hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
- kobject_init(&hp->kobj);
- hp->kobj.ktype = &hvc_kobj_type;
+ kref_init(&hp->kref);
spin_lock_init(&hp->lock);
spin_lock(&hvc_structs_lock);
@@ -806,12 +794,10 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
int __devexit hvc_remove(struct hvc_struct *hp)
{
unsigned long flags;
- struct kobject *kobjp;
struct tty_struct *tty;
spin_lock_irqsave(&hp->lock, flags);
tty = hp->tty;
- kobjp = &hp->kobj;
if (hp->index < MAX_NR_HVC_CONSOLES)
vtermnos[hp->index] = -1;
@@ -821,12 +807,12 @@ int __devexit hvc_remove(struct hvc_struct *hp)
spin_unlock_irqrestore(&hp->lock, flags);
/*
- * We 'put' the instance that was grabbed when the kobject instance
- * was initialized using kobject_init(). Let the last holder of this
- * kobject cause it to be removed, which will probably be the tty_hangup
+ * We 'put' the instance that was grabbed when the kref instance
+ * was initialized using kref_init(). Let the last holder of this
+ * kref cause it to be removed, which will probably be the tty_hangup
* below.
*/
- kobject_put(kobjp);
+ kref_put(&hp->kref, destroy_hvc_struct);
/*
* This function call will auto chain call hvc_hangup. The tty should
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 69d8866de783..fd7559084b82 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -57,11 +57,7 @@
* rescanning partner information upon a user's request.
*
* Each vty-server, prior to being exposed to this driver is reference counted
- * using the 2.6 Linux kernel kobject construct. This kobject is also used by
- * the vio bus to provide a vio device sysfs entry that this driver attaches
- * device specific attributes to, including partner information. The vio bus
- * framework also provides a sysfs entry for each vio driver. The hvcs driver
- * provides driver attributes in this entry.
+ * using the 2.6 Linux kernel kref construct.
*
* For direction on installation and usage of this driver please reference
* Documentation/powerpc/hvcs.txt.
@@ -71,7 +67,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/kobject.h>
+#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/major.h>
@@ -293,12 +289,12 @@ struct hvcs_struct {
int chars_in_buffer;
/*
- * Any variable below the kobject is valid before a tty is connected and
+ * Any variable below the kref is valid before a tty is connected and
* stays valid after the tty is disconnected. These shouldn't be
* whacked until the koject refcount reaches zero though some entries
* may be changed via sysfs initiatives.
*/
- struct kobject kobj; /* ref count & hvcs_struct lifetime */
+ struct kref kref; /* ref count & hvcs_struct lifetime */
int connected; /* is the vty-server currently connected to a vty? */
uint32_t p_unit_address; /* partner unit address */
uint32_t p_partition_ID; /* partner partition ID */
@@ -307,8 +303,8 @@ struct hvcs_struct {
struct vio_dev *vdev;
};
-/* Required to back map a kobject to its containing object */
-#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj)
+/* Required to back map a kref to its containing object */
+#define from_kref(k) container_of(k, struct hvcs_struct, kref)
static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
static DEFINE_SPINLOCK(hvcs_structs_lock);
@@ -334,7 +330,6 @@ static void hvcs_partner_free(struct hvcs_struct *hvcsd);
static int hvcs_enable_device(struct hvcs_struct *hvcsd,
uint32_t unit_address, unsigned int irq, struct vio_dev *dev);
-static void destroy_hvcs_struct(struct kobject *kobj);
static int hvcs_open(struct tty_struct *tty, struct file *filp);
static void hvcs_close(struct tty_struct *tty, struct file *filp);
static void hvcs_hangup(struct tty_struct * tty);
@@ -703,10 +698,10 @@ static void hvcs_return_index(int index)
hvcs_index_list[index] = -1;
}
-/* callback when the kboject ref count reaches zero */
-static void destroy_hvcs_struct(struct kobject *kobj)
+/* callback when the kref ref count reaches zero */
+static void destroy_hvcs_struct(struct kref *kref)
{
- struct hvcs_struct *hvcsd = from_kobj(kobj);
+ struct hvcs_struct *hvcsd = from_kref(kref);
struct vio_dev *vdev;
unsigned long flags;
@@ -743,10 +738,6 @@ static void destroy_hvcs_struct(struct kobject *kobj)
kfree(hvcsd);
}
-static struct kobj_type hvcs_kobj_type = {
- .release = destroy_hvcs_struct,
-};
-
static int hvcs_get_index(void)
{
int i;
@@ -791,9 +782,7 @@ static int __devinit hvcs_probe(
spin_lock_init(&hvcsd->lock);
/* Automatically incs the refcount the first time */
- kobject_init(&hvcsd->kobj);
- /* Set up the callback for terminating the hvcs_struct's life */
- hvcsd->kobj.ktype = &hvcs_kobj_type;
+ kref_init(&hvcsd->kref);
hvcsd->vdev = dev;
dev->dev.driver_data = hvcsd;
@@ -844,7 +833,6 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
{
struct hvcs_struct *hvcsd = dev->dev.driver_data;
unsigned long flags;
- struct kobject *kobjp;
struct tty_struct *tty;
if (!hvcsd)
@@ -856,15 +844,13 @@ static int __devexit hvcs_remove(struct vio_dev *dev)
tty = hvcsd->tty;
- kobjp = &hvcsd->kobj;
-
spin_unlock_irqrestore(&hvcsd->lock, flags);
/*
* Let the last holder of this object cause it to be removed, which
* would probably be tty_hangup below.
*/
- kobject_put (kobjp);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
/*
* The hangup is a scheduled function which will auto chain call
@@ -1086,7 +1072,7 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
}
/*
- * This always increments the kobject ref count if the call is successful.
+ * This always increments the kref ref count if the call is successful.
* Please remember to dec when you are done with the instance.
*
* NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when
@@ -1103,7 +1089,7 @@ static struct hvcs_struct *hvcs_get_by_index(int index)
list_for_each_entry(hvcsd, &hvcs_structs, next) {
spin_lock_irqsave(&hvcsd->lock, flags);
if (hvcsd->index == index) {
- kobject_get(&hvcsd->kobj);
+ kref_get(&hvcsd->kref);
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
return hvcsd;
@@ -1129,14 +1115,13 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
unsigned int irq;
struct vio_dev *vdev;
unsigned long unit_address;
- struct kobject *kobjp;
if (tty->driver_data)
goto fast_open;
/*
* Is there a vty-server that shares the same index?
- * This function increments the kobject index.
+ * This function increments the kref index.
*/
if (!(hvcsd = hvcs_get_by_index(tty->index))) {
printk(KERN_WARNING "HVCS: open failed, no device associated"
@@ -1181,7 +1166,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
* and will grab the spinlock and free the connection if it fails.
*/
if (((rc = hvcs_enable_device(hvcsd, unit_address, irq, vdev)))) {
- kobject_put(&hvcsd->kobj);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
printk(KERN_WARNING "HVCS: enable device failed.\n");
return rc;
}
@@ -1192,17 +1177,11 @@ fast_open:
hvcsd = tty->driver_data;
spin_lock_irqsave(&hvcsd->lock, flags);
- if (!kobject_get(&hvcsd->kobj)) {
- spin_unlock_irqrestore(&hvcsd->lock, flags);
- printk(KERN_ERR "HVCS: Kobject of open"
- " hvcs doesn't exist.\n");
- return -EFAULT; /* Is this the right return value? */
- }
-
+ kref_get(&hvcsd->kref);
hvcsd->open_count++;
-
hvcsd->todo_mask |= HVCS_SCHED_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
+
open_success:
hvcs_kick();
@@ -1212,9 +1191,8 @@ open_success:
return 0;
error_release:
- kobjp = &hvcsd->kobj;
spin_unlock_irqrestore(&hvcsd->lock, flags);
- kobject_put(&hvcsd->kobj);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
printk(KERN_WARNING "HVCS: partner connect failed.\n");
return retval;
@@ -1224,7 +1202,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
{
struct hvcs_struct *hvcsd;
unsigned long flags;
- struct kobject *kobjp;
int irq = NO_IRQ;
/*
@@ -1245,7 +1222,6 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
hvcsd = tty->driver_data;
spin_lock_irqsave(&hvcsd->lock, flags);
- kobjp = &hvcsd->kobj;
if (--hvcsd->open_count == 0) {
vio_disable_interrupts(hvcsd->vdev);
@@ -1270,7 +1246,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
tty->driver_data = NULL;
free_irq(irq, hvcsd);
- kobject_put(kobjp);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
return;
} else if (hvcsd->open_count < 0) {
printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
@@ -1279,7 +1255,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
}
spin_unlock_irqrestore(&hvcsd->lock, flags);
- kobject_put(kobjp);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
}
static void hvcs_hangup(struct tty_struct * tty)
@@ -1287,21 +1263,17 @@ static void hvcs_hangup(struct tty_struct * tty)
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
int temp_open_count;
- struct kobject *kobjp;
int irq = NO_IRQ;
spin_lock_irqsave(&hvcsd->lock, flags);
- /* Preserve this so that we know how many kobject refs to put */
+ /* Preserve this so that we know how many kref refs to put */
temp_open_count = hvcsd->open_count;
/*
- * Don't kobject put inside the spinlock because the destruction
+ * Don't kref put inside the spinlock because the destruction
* callback may use the spinlock and it may get called before the
- * spinlock has been released. Get a pointer to the kobject and
- * kobject_put on that after releasing the spinlock.
+ * spinlock has been released.
*/
- kobjp = &hvcsd->kobj;
-
vio_disable_interrupts(hvcsd->vdev);
hvcsd->todo_mask = 0;
@@ -1324,7 +1296,7 @@ static void hvcs_hangup(struct tty_struct * tty)
free_irq(irq, hvcsd);
/*
- * We need to kobject_put() for every open_count we have since the
+ * We need to kref_put() for every open_count we have since the
* tty_hangup() function doesn't invoke a close per open connection on a
* non-console device.
*/
@@ -1335,7 +1307,7 @@ static void hvcs_hangup(struct tty_struct * tty)
* NOTE: If this hangup was signaled from user space then the
* final put will never happen.
*/
- kobject_put(kobjp);
+ kref_put(&hvcsd->kref, destroy_hvcs_struct);
}
}
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 556fd81fa815..c422e870dc52 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/hw_random.h>
+#include <linux/delay.h>
#include <asm/io.h>
@@ -52,11 +53,18 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct pci_dev *amd_pdev;
-static int amd_rng_data_present(struct hwrng *rng)
+static int amd_rng_data_present(struct hwrng *rng, int wait)
{
u32 pmbase = (u32)rng->priv;
+ int data, i;
- return !!(inl(pmbase + 0xF4) & 1);
+ for (i = 0; i < 20; i++) {
+ data = !!(inl(pmbase + 0xF4) & 1);
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
}
static int amd_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 26a860adcb38..0118b9817a95 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -66,11 +66,11 @@ static inline void hwrng_cleanup(struct hwrng *rng)
rng->cleanup(rng);
}
-static inline int hwrng_data_present(struct hwrng *rng)
+static inline int hwrng_data_present(struct hwrng *rng, int wait)
{
if (!rng->data_present)
return 1;
- return rng->data_present(rng);
+ return rng->data_present(rng, wait);
}
static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
@@ -94,8 +94,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
{
u32 data;
ssize_t ret = 0;
- int i, err = 0;
- int data_present;
+ int err = 0;
int bytes_read;
while (size) {
@@ -107,21 +106,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
err = -ENODEV;
goto out;
}
- if (filp->f_flags & O_NONBLOCK) {
- data_present = hwrng_data_present(current_rng);
- } else {
- /* Some RNG require some time between data_reads to gather
- * new entropy. Poll it.
- */
- for (i = 0; i < 20; i++) {
- data_present = hwrng_data_present(current_rng);
- if (data_present)
- break;
- udelay(10);
- }
- }
+
bytes_read = 0;
- if (data_present)
+ if (hwrng_data_present(current_rng,
+ !(filp->f_flags & O_NONBLOCK)))
bytes_read = hwrng_data_read(current_rng, &data);
mutex_unlock(&rng_mutex);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 8e8658dcd2e3..fed4ef5569f5 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/hw_random.h>
+#include <linux/delay.h>
#include <asm/io.h>
@@ -61,11 +62,18 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
return 4;
}
-static int geode_rng_data_present(struct hwrng *rng)
+static int geode_rng_data_present(struct hwrng *rng, int wait)
{
void __iomem *mem = (void __iomem *)rng->priv;
+ int data, i;
- return !!(readl(mem + GEODE_RNG_STATUS_REG));
+ for (i = 0; i < 20; i++) {
+ data = !!(readl(mem + GEODE_RNG_STATUS_REG));
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
}
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 753f46052b87..5cc651ef75eb 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>
+#include <linux/delay.h>
#include <asm/io.h>
@@ -162,11 +163,19 @@ static inline u8 hwstatus_set(void __iomem *mem,
return hwstatus_get(mem);
}
-static int intel_rng_data_present(struct hwrng *rng)
+static int intel_rng_data_present(struct hwrng *rng, int wait)
{
void __iomem *mem = (void __iomem *)rng->priv;
-
- return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT);
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = !!(readb(mem + INTEL_RNG_STATUS) &
+ INTEL_RNG_DATA_PRESENT);
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
}
static int intel_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3f35a1c562b1..7e319951fa41 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -29,6 +29,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/delay.h>
#include <asm/io.h>
@@ -65,9 +66,17 @@ static void omap_rng_write_reg(int reg, u32 val)
}
/* REVISIT: Does the status bit really work on 16xx? */
-static int omap_rng_data_present(struct hwrng *rng)
+static int omap_rng_data_present(struct hwrng *rng, int wait)
{
- return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
}
static int omap_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 24ae3073991f..6d50e9bc700b 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/delay.h>
#include <asm/of_platform.h>
#include <asm/io.h>
@@ -41,12 +42,19 @@
#define MODULE_NAME "pasemi_rng"
-static int pasemi_rng_data_present(struct hwrng *rng)
+static int pasemi_rng_data_present(struct hwrng *rng, int wait)
{
void __iomem *rng_regs = (void __iomem *)rng->priv;
-
- return (in_le32(rng_regs + SDCRNG_CTL_REG)
- & SDCRNG_CTL_FVLD_M) ? 1 : 0;
+ int data, i;
+
+ for (i = 0; i < 20; i++) {
+ data = (in_le32(rng_regs + SDCRNG_CTL_REG)
+ & SDCRNG_CTL_FVLD_M) ? 1 : 0;
+ if (data || !wait)
+ break;
+ udelay(10);
+ }
+ return data;
}
static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index ec435cb25c4f..868e39fd42e4 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/hw_random.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
@@ -77,10 +78,11 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
return eax_out;
}
-static int via_rng_data_present(struct hwrng *rng)
+static int via_rng_data_present(struct hwrng *rng, int wait)
{
u32 bytes_out;
u32 *via_rng_datum = (u32 *)(&rng->priv);
+ int i;
/* We choose the recommended 1-byte-per-instruction RNG rate,
* for greater randomness at the expense of speed. Larger
@@ -95,12 +97,15 @@ static int via_rng_data_present(struct hwrng *rng)
* completes.
*/
- *via_rng_datum = 0; /* paranoia, not really necessary */
- bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
- bytes_out &= VIA_XSTORE_CNT_MASK;
- if (bytes_out == 0)
- return 0;
- return 1;
+ for (i = 0; i < 20; i++) {
+ *via_rng_datum = 0; /* paranoia, not really necessary */
+ bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
+ bytes_out &= VIA_XSTORE_CNT_MASK;
+ if (bytes_out || !wait)
+ break;
+ udelay(10);
+ }
+ return bytes_out ? 1 : 0;
}
static int via_rng_data_read(struct hwrng *rng, u32 *data)
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
new file mode 100644
index 000000000000..6076e662886a
--- /dev/null
+++ b/drivers/char/nozomi.c
@@ -0,0 +1,1993 @@
+/*
+ * nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
+ *
+ * Written by: Ulf Jakobsson,
+ * Jan �erfeldt,
+ * Stefan Thomasson,
+ *
+ * Maintained by: Paul Hardwick (p.hardwick@option.com)
+ *
+ * Patches:
+ * Locking code changes for Vodafone by Sphere Systems Ltd,
+ * Andrew Bird (ajb@spheresystems.co.uk )
+ * & Phil Sanderson
+ *
+ * Source has been ported from an implementation made by Filip Aben @ Option
+ *
+ * --------------------------------------------------------------------------
+ *
+ * Copyright (c) 2005,2006 Option Wireless Sweden AB
+ * Copyright (c) 2006 Sphere Systems Ltd
+ * Copyright (c) 2006 Option Wireless n/v
+ * All rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * --------------------------------------------------------------------------
+ */
+
+/*
+ * CHANGELOG
+ * Version 2.1d
+ * 11-November-2007 Jiri Slaby, Frank Seidel
+ * - Big rework of multicard support by Jiri
+ * - Major cleanups (semaphore to mutex, endianess, no major reservation)
+ * - Optimizations
+ *
+ * Version 2.1c
+ * 30-October-2007 Frank Seidel
+ * - Completed multicard support
+ * - Minor cleanups
+ *
+ * Version 2.1b
+ * 07-August-2007 Frank Seidel
+ * - Minor cleanups
+ * - theoretical multicard support
+ *
+ * Version 2.1
+ * 03-July-2006 Paul Hardwick
+ *
+ * - Stability Improvements. Incorporated spinlock wraps patch.
+ * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
+ * - using __devexit macro for tty
+ *
+ *
+ * Version 2.0
+ * 08-feb-2006 15:34:10:Ulf
+ *
+ * -Fixed issue when not waking up line disipine layer, could probably result
+ * in better uplink performance for 2.4.
+ *
+ * -Fixed issue with big endian during initalization, now proper toggle flags
+ * are handled between preloader and maincode.
+ *
+ * -Fixed flow control issue.
+ *
+ * -Added support for setting DTR.
+ *
+ * -For 2.4 kernels, removing temporary buffer that's not needed.
+ *
+ * -Reading CTS only for modem port (only port that supports it).
+ *
+ * -Return 0 in write_room instead of netative value, it's not handled in
+ * upper layer.
+ *
+ * --------------------------------------------------------------------------
+ * Version 1.0
+ *
+ * First version of driver, only tested with card of type F32_2.
+ * Works fine with 2.4 and 2.6 kernels.
+ * Driver also support big endian architecture.
+ */
+
+/* Enable this to have a lot of debug printouts */
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+
+#include <linux/delay.h>
+
+
+#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \
+ __DATE__ " " __TIME__ ")"
+
+/* Macros definitions */
+
+/* Default debug printout level */
+#define NOZOMI_DEBUG_LEVEL 0x00
+
+#define P_BUF_SIZE 128
+#define NFO(_err_flag_, args...) \
+do { \
+ char tmp[P_BUF_SIZE]; \
+ snprintf(tmp, sizeof(tmp), ##args); \
+ printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \
+ __FUNCTION__, tmp); \
+} while (0)
+
+#define DBG1(args...) D_(0x01, ##args)
+#define DBG2(args...) D_(0x02, ##args)
+#define DBG3(args...) D_(0x04, ##args)
+#define DBG4(args...) D_(0x08, ##args)
+#define DBG5(args...) D_(0x10, ##args)
+#define DBG6(args...) D_(0x20, ##args)
+#define DBG7(args...) D_(0x40, ##args)
+#define DBG8(args...) D_(0x80, ##args)
+
+#ifdef DEBUG
+/* Do we need this settable at runtime? */
+static int debug = NOZOMI_DEBUG_LEVEL;
+
+#define D(lvl, args...) do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
+ while (0)
+#define D_(lvl, args...) D(lvl, ##args)
+
+/* These printouts are always printed */
+
+#else
+static int debug;
+#define D_(lvl, args...)
+#endif
+
+/* TODO: rewrite to optimize macros... */
+
+#define TMP_BUF_MAX 256
+
+#define DUMP(buf__,len__) \
+ do { \
+ char tbuf[TMP_BUF_MAX] = {0};\
+ if (len__ > 1) {\
+ snprintf(tbuf, len__ > TMP_BUF_MAX ? TMP_BUF_MAX : len__, "%s", buf__);\
+ if (tbuf[len__-2] == '\r') {\
+ tbuf[len__-2] = 'r';\
+ } \
+ DBG1("SENDING: '%s' (%d+n)", tbuf, len__);\
+ } else {\
+ DBG1("SENDING: '%s' (%d)", tbuf, len__);\
+ } \
+} while (0)
+
+/* Defines */
+#define NOZOMI_NAME "nozomi"
+#define NOZOMI_NAME_TTY "nozomi_tty"
+#define DRIVER_DESC "Nozomi driver"
+
+#define NTTY_TTY_MAXMINORS 256
+#define NTTY_FIFO_BUFFER_SIZE 8192
+
+/* Must be power of 2 */
+#define FIFO_BUFFER_SIZE_UL 8192
+
+/* Size of tmp send buffer to card */
+#define SEND_BUF_MAX 1024
+#define RECEIVE_BUF_MAX 4
+
+
+/* Define all types of vendors and devices to support */
+#define VENDOR1 0x1931 /* Vendor Option */
+#define DEVICE1 0x000c /* HSDPA card */
+
+#define R_IIR 0x0000 /* Interrupt Identity Register */
+#define R_FCR 0x0000 /* Flow Control Register */
+#define R_IER 0x0004 /* Interrupt Enable Register */
+
+#define CONFIG_MAGIC 0xEFEFFEFE
+#define TOGGLE_VALID 0x0000
+
+/* Definition of interrupt tokens */
+#define MDM_DL1 0x0001
+#define MDM_UL1 0x0002
+#define MDM_DL2 0x0004
+#define MDM_UL2 0x0008
+#define DIAG_DL1 0x0010
+#define DIAG_DL2 0x0020
+#define DIAG_UL 0x0040
+#define APP1_DL 0x0080
+#define APP1_UL 0x0100
+#define APP2_DL 0x0200
+#define APP2_UL 0x0400
+#define CTRL_DL 0x0800
+#define CTRL_UL 0x1000
+#define RESET 0x8000
+
+#define MDM_DL (MDM_DL1 | MDM_DL2)
+#define MDM_UL (MDM_UL1 | MDM_UL2)
+#define DIAG_DL (DIAG_DL1 | DIAG_DL2)
+
+/* modem signal definition */
+#define CTRL_DSR 0x0001
+#define CTRL_DCD 0x0002
+#define CTRL_RI 0x0004
+#define CTRL_CTS 0x0008
+
+#define CTRL_DTR 0x0001
+#define CTRL_RTS 0x0002
+
+#define MAX_PORT 4
+#define NOZOMI_MAX_PORTS 5
+#define NOZOMI_MAX_CARDS (NTTY_TTY_MAXMINORS / MAX_PORT)
+
+/* Type definitions */
+
+/*
+ * There are two types of nozomi cards,
+ * one with 2048 memory and with 8192 memory
+ */
+enum card_type {
+ F32_2 = 2048, /* 512 bytes downlink + uplink * 2 -> 2048 */
+ F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
+};
+
+/* Two different toggle channels exist */
+enum channel_type {
+ CH_A = 0,
+ CH_B = 1,
+};
+
+/* Port definition for the card regarding flow control */
+enum ctrl_port_type {
+ CTRL_CMD = 0,
+ CTRL_MDM = 1,
+ CTRL_DIAG = 2,
+ CTRL_APP1 = 3,
+ CTRL_APP2 = 4,
+ CTRL_ERROR = -1,
+};
+
+/* Ports that the nozomi has */
+enum port_type {
+ PORT_MDM = 0,
+ PORT_DIAG = 1,
+ PORT_APP1 = 2,
+ PORT_APP2 = 3,
+ PORT_CTRL = 4,
+ PORT_ERROR = -1,
+};
+
+#ifdef __BIG_ENDIAN
+/* Big endian */
+
+struct toggles {
+ unsigned enabled:5; /*
+ * Toggle fields are valid if enabled is 0,
+ * else A-channels must always be used.
+ */
+ unsigned diag_dl:1;
+ unsigned mdm_dl:1;
+ unsigned mdm_ul:1;
+} __attribute__ ((packed));
+
+/* Configuration table to read at startup of card */
+/* Is for now only needed during initialization phase */
+struct config_table {
+ u32 signature;
+ u16 product_information;
+ u16 version;
+ u8 pad3[3];
+ struct toggles toggle;
+ u8 pad1[4];
+ u16 dl_mdm_len1; /*
+ * If this is 64, it can hold
+ * 60 bytes + 4 that is length field
+ */
+ u16 dl_start;
+
+ u16 dl_diag_len1;
+ u16 dl_mdm_len2; /*
+ * If this is 64, it can hold
+ * 60 bytes + 4 that is length field
+ */
+ u16 dl_app1_len;
+
+ u16 dl_diag_len2;
+ u16 dl_ctrl_len;
+ u16 dl_app2_len;
+ u8 pad2[16];
+ u16 ul_mdm_len1;
+ u16 ul_start;
+ u16 ul_diag_len;
+ u16 ul_mdm_len2;
+ u16 ul_app1_len;
+ u16 ul_app2_len;
+ u16 ul_ctrl_len;
+} __attribute__ ((packed));
+
+/* This stores all control downlink flags */
+struct ctrl_dl {
+ u8 port;
+ unsigned reserved:4;
+ unsigned CTS:1;
+ unsigned RI:1;
+ unsigned DCD:1;
+ unsigned DSR:1;
+} __attribute__ ((packed));
+
+/* This stores all control uplink flags */
+struct ctrl_ul {
+ u8 port;
+ unsigned reserved:6;
+ unsigned RTS:1;
+ unsigned DTR:1;
+} __attribute__ ((packed));
+
+#else
+/* Little endian */
+
+/* This represents the toggle information */
+struct toggles {
+ unsigned mdm_ul:1;
+ unsigned mdm_dl:1;
+ unsigned diag_dl:1;
+ unsigned enabled:5; /*
+ * Toggle fields are valid if enabled is 0,
+ * else A-channels must always be used.
+ */
+} __attribute__ ((packed));
+
+/* Configuration table to read at startup of card */
+struct config_table {
+ u32 signature;
+ u16 version;
+ u16 product_information;
+ struct toggles toggle;
+ u8 pad1[7];
+ u16 dl_start;
+ u16 dl_mdm_len1; /*
+ * If this is 64, it can hold
+ * 60 bytes + 4 that is length field
+ */
+ u16 dl_mdm_len2;
+ u16 dl_diag_len1;
+ u16 dl_diag_len2;
+ u16 dl_app1_len;
+ u16 dl_app2_len;
+ u16 dl_ctrl_len;
+ u8 pad2[16];
+ u16 ul_start;
+ u16 ul_mdm_len2;
+ u16 ul_mdm_len1;
+ u16 ul_diag_len;
+ u16 ul_app1_len;
+ u16 ul_app2_len;
+ u16 ul_ctrl_len;
+} __attribute__ ((packed));
+
+/* This stores all control downlink flags */
+struct ctrl_dl {
+ unsigned DSR:1;
+ unsigned DCD:1;
+ unsigned RI:1;
+ unsigned CTS:1;
+ unsigned reserverd:4;
+ u8 port;
+} __attribute__ ((packed));
+
+/* This stores all control uplink flags */
+struct ctrl_ul {
+ unsigned DTR:1;
+ unsigned RTS:1;
+ unsigned reserved:6;
+ u8 port;
+} __attribute__ ((packed));
+#endif
+
+/* This holds all information that is needed regarding a port */
+struct port {
+ u8 update_flow_control;
+ struct ctrl_ul ctrl_ul;
+ struct ctrl_dl ctrl_dl;
+ struct kfifo *fifo_ul;
+ void __iomem *dl_addr[2];
+ u32 dl_size[2];
+ u8 toggle_dl;
+ void __iomem *ul_addr[2];
+ u32 ul_size[2];
+ u8 toggle_ul;
+ u16 token_dl;
+
+ struct tty_struct *tty;
+ int tty_open_count;
+ /* mutex to ensure one access patch to this port */
+ struct mutex tty_sem;
+ wait_queue_head_t tty_wait;
+ struct async_icount tty_icount;
+};
+
+/* Private data one for each card in the system */
+struct nozomi {
+ void __iomem *base_addr;
+ unsigned long flip;
+
+ /* Pointers to registers */
+ void __iomem *reg_iir;
+ void __iomem *reg_fcr;
+ void __iomem *reg_ier;
+
+ u16 last_ier;
+ enum card_type card_type;
+ struct config_table config_table; /* Configuration table */
+ struct pci_dev *pdev;
+ struct port port[NOZOMI_MAX_PORTS];
+ u8 *send_buf;
+
+ spinlock_t spin_mutex; /* secures access to registers and tty */
+
+ unsigned int index_start;
+ u32 open_ttys;
+};
+
+/* This is a data packet that is read or written to/from card */
+struct buffer {
+ u32 size; /* size is the length of the data buffer */
+ u8 *data;
+} __attribute__ ((packed));
+
+/* Global variables */
+static struct pci_device_id nozomi_pci_tbl[] = {
+ {PCI_DEVICE(VENDOR1, DEVICE1)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
+
+static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
+static struct tty_driver *ntty_driver;
+
+/*
+ * find card by tty_index
+ */
+static inline struct nozomi *get_dc_by_tty(const struct tty_struct *tty)
+{
+ return tty ? ndevs[tty->index / MAX_PORT] : NULL;
+}
+
+static inline struct port *get_port_by_tty(const struct tty_struct *tty)
+{
+ struct nozomi *ndev = get_dc_by_tty(tty);
+ return ndev ? &ndev->port[tty->index % MAX_PORT] : NULL;
+}
+
+/*
+ * TODO:
+ * -Optimize
+ * -Rewrite cleaner
+ */
+
+static void read_mem32(u32 *buf, const void __iomem *mem_addr_start,
+ u32 size_bytes)
+{
+ u32 i = 0;
+ const u32 *ptr = (__force u32 *) mem_addr_start;
+ u16 *buf16;
+
+ if (unlikely(!ptr || !buf))
+ goto out;
+
+ /* shortcut for extremely often used cases */
+ switch (size_bytes) {
+ case 2: /* 2 bytes */
+ buf16 = (u16 *) buf;
+ *buf16 = __le16_to_cpu(readw((void __iomem *)ptr));
+ goto out;
+ break;
+ case 4: /* 4 bytes */
+ *(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
+ goto out;
+ break;
+ }
+
+ while (i < size_bytes) {
+ if (size_bytes - i == 2) {
+ /* Handle 2 bytes in the end */
+ buf16 = (u16 *) buf;
+ *(buf16) = __le16_to_cpu(readw((void __iomem *)ptr));
+ i += 2;
+ } else {
+ /* Read 4 bytes */
+ *(buf) = __le32_to_cpu(readl((void __iomem *)ptr));
+ i += 4;
+ }
+ buf++;
+ ptr++;
+ }
+out:
+ return;
+}
+
+/*
+ * TODO:
+ * -Optimize
+ * -Rewrite cleaner
+ */
+static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
+ u32 size_bytes)
+{
+ u32 i = 0;
+ u32 *ptr = (__force u32 *) mem_addr_start;
+ u16 *buf16;
+
+ if (unlikely(!ptr || !buf))
+ return 0;
+
+ /* shortcut for extremely often used cases */
+ switch (size_bytes) {
+ case 2: /* 2 bytes */
+ buf16 = (u16 *) buf;
+ writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
+ return 2;
+ break;
+ case 1: /*
+ * also needs to write 4 bytes in this case
+ * so falling through..
+ */
+ case 4: /* 4 bytes */
+ writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
+ return 4;
+ break;
+ }
+
+ while (i < size_bytes) {
+ if (size_bytes - i == 2) {
+ /* 2 bytes */
+ buf16 = (u16 *) buf;
+ writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
+ i += 2;
+ } else {
+ /* 4 bytes */
+ writel(__cpu_to_le32(*buf), (void __iomem *)ptr);
+ i += 4;
+ }
+ buf++;
+ ptr++;
+ }
+ return i;
+}
+
+/* Setup pointers to different channels and also setup buffer sizes. */
+static void setup_memory(struct nozomi *dc)
+{
+ void __iomem *offset = dc->base_addr + dc->config_table.dl_start;
+ /* The length reported is including the length field of 4 bytes,
+ * hence subtract with 4.
+ */
+ const u16 buff_offset = 4;
+
+ /* Modem port dl configuration */
+ dc->port[PORT_MDM].dl_addr[CH_A] = offset;
+ dc->port[PORT_MDM].dl_addr[CH_B] =
+ (offset += dc->config_table.dl_mdm_len1);
+ dc->port[PORT_MDM].dl_size[CH_A] =
+ dc->config_table.dl_mdm_len1 - buff_offset;
+ dc->port[PORT_MDM].dl_size[CH_B] =
+ dc->config_table.dl_mdm_len2 - buff_offset;
+
+ /* Diag port dl configuration */
+ dc->port[PORT_DIAG].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_mdm_len2);
+ dc->port[PORT_DIAG].dl_size[CH_A] =
+ dc->config_table.dl_diag_len1 - buff_offset;
+ dc->port[PORT_DIAG].dl_addr[CH_B] =
+ (offset += dc->config_table.dl_diag_len1);
+ dc->port[PORT_DIAG].dl_size[CH_B] =
+ dc->config_table.dl_diag_len2 - buff_offset;
+
+ /* App1 port dl configuration */
+ dc->port[PORT_APP1].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_diag_len2);
+ dc->port[PORT_APP1].dl_size[CH_A] =
+ dc->config_table.dl_app1_len - buff_offset;
+
+ /* App2 port dl configuration */
+ dc->port[PORT_APP2].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_app1_len);
+ dc->port[PORT_APP2].dl_size[CH_A] =
+ dc->config_table.dl_app2_len - buff_offset;
+
+ /* Ctrl dl configuration */
+ dc->port[PORT_CTRL].dl_addr[CH_A] =
+ (offset += dc->config_table.dl_app2_len);
+ dc->port[PORT_CTRL].dl_size[CH_A] =
+ dc->config_table.dl_ctrl_len - buff_offset;
+
+ offset = dc->base_addr + dc->config_table.ul_start;
+
+ /* Modem Port ul configuration */
+ dc->port[PORT_MDM].ul_addr[CH_A] = offset;
+ dc->port[PORT_MDM].ul_size[CH_A] =
+ dc->config_table.ul_mdm_len1 - buff_offset;
+ dc->port[PORT_MDM].ul_addr[CH_B] =
+ (offset += dc->config_table.ul_mdm_len1);
+ dc->port[PORT_MDM].ul_size[CH_B] =
+ dc->config_table.ul_mdm_len2 - buff_offset;
+
+ /* Diag port ul configuration */
+ dc->port[PORT_DIAG].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_mdm_len2);
+ dc->port[PORT_DIAG].ul_size[CH_A] =
+ dc->config_table.ul_diag_len - buff_offset;
+
+ /* App1 port ul configuration */
+ dc->port[PORT_APP1].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_diag_len);
+ dc->port[PORT_APP1].ul_size[CH_A] =
+ dc->config_table.ul_app1_len - buff_offset;
+
+ /* App2 port ul configuration */
+ dc->port[PORT_APP2].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_app1_len);
+ dc->port[PORT_APP2].ul_size[CH_A] =
+ dc->config_table.ul_app2_len - buff_offset;
+
+ /* Ctrl ul configuration */
+ dc->port[PORT_CTRL].ul_addr[CH_A] =
+ (offset += dc->config_table.ul_app2_len);
+ dc->port[PORT_CTRL].ul_size[CH_A] =
+ dc->config_table.ul_ctrl_len - buff_offset;
+}
+
+/* Dump config table under initalization phase */
+#ifdef DEBUG
+static void dump_table(const struct nozomi *dc)
+{
+ DBG3("signature: 0x%08X", dc->config_table.signature);
+ DBG3("version: 0x%04X", dc->config_table.version);
+ DBG3("product_information: 0x%04X", \
+ dc->config_table.product_information);
+ DBG3("toggle enabled: %d", dc->config_table.toggle.enabled);
+ DBG3("toggle up_mdm: %d", dc->config_table.toggle.mdm_ul);
+ DBG3("toggle dl_mdm: %d", dc->config_table.toggle.mdm_dl);
+ DBG3("toggle dl_dbg: %d", dc->config_table.toggle.diag_dl);
+
+ DBG3("dl_start: 0x%04X", dc->config_table.dl_start);
+ DBG3("dl_mdm_len0: 0x%04X, %d", dc->config_table.dl_mdm_len1,
+ dc->config_table.dl_mdm_len1);
+ DBG3("dl_mdm_len1: 0x%04X, %d", dc->config_table.dl_mdm_len2,
+ dc->config_table.dl_mdm_len2);
+ DBG3("dl_diag_len0: 0x%04X, %d", dc->config_table.dl_diag_len1,
+ dc->config_table.dl_diag_len1);
+ DBG3("dl_diag_len1: 0x%04X, %d", dc->config_table.dl_diag_len2,
+ dc->config_table.dl_diag_len2);
+ DBG3("dl_app1_len: 0x%04X, %d", dc->config_table.dl_app1_len,
+ dc->config_table.dl_app1_len);
+ DBG3("dl_app2_len: 0x%04X, %d", dc->config_table.dl_app2_len,
+ dc->config_table.dl_app2_len);
+ DBG3("dl_ctrl_len: 0x%04X, %d", dc->config_table.dl_ctrl_len,
+ dc->config_table.dl_ctrl_len);
+ DBG3("ul_start: 0x%04X, %d", dc->config_table.ul_start,
+ dc->config_table.ul_start);
+ DBG3("ul_mdm_len[0]: 0x%04X, %d", dc->config_table.ul_mdm_len1,
+ dc->config_table.ul_mdm_len1);
+ DBG3("ul_mdm_len[1]: 0x%04X, %d", dc->config_table.ul_mdm_len2,
+ dc->config_table.ul_mdm_len2);
+ DBG3("ul_diag_len: 0x%04X, %d", dc->config_table.ul_diag_len,
+ dc->config_table.ul_diag_len);
+ DBG3("ul_app1_len: 0x%04X, %d", dc->config_table.ul_app1_len,
+ dc->config_table.ul_app1_len);
+ DBG3("ul_app2_len: 0x%04X, %d", dc->config_table.ul_app2_len,
+ dc->config_table.ul_app2_len);
+ DBG3("ul_ctrl_len: 0x%04X, %d", dc->config_table.ul_ctrl_len,
+ dc->config_table.ul_ctrl_len);
+}
+#else
+static __inline__ void dump_table(const struct nozomi *dc) { }
+#endif
+
+/*
+ * Read configuration table from card under intalization phase
+ * Returns 1 if ok, else 0
+ */
+static int nozomi_read_config_table(struct nozomi *dc)
+{
+ read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
+ sizeof(struct config_table));
+
+ if (dc->config_table.signature != CONFIG_MAGIC) {
+ dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
+ dc->config_table.signature, CONFIG_MAGIC);
+ return 0;
+ }
+
+ if ((dc->config_table.version == 0)
+ || (dc->config_table.toggle.enabled == TOGGLE_VALID)) {
+ int i;
+ DBG1("Second phase, configuring card");
+
+ setup_memory(dc);
+
+ dc->port[PORT_MDM].toggle_ul = dc->config_table.toggle.mdm_ul;
+ dc->port[PORT_MDM].toggle_dl = dc->config_table.toggle.mdm_dl;
+ dc->port[PORT_DIAG].toggle_dl = dc->config_table.toggle.diag_dl;
+ DBG1("toggle ports: MDM UL:%d MDM DL:%d, DIAG DL:%d",
+ dc->port[PORT_MDM].toggle_ul,
+ dc->port[PORT_MDM].toggle_dl, dc->port[PORT_DIAG].toggle_dl);
+
+ dump_table(dc);
+
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ dc->port[i].fifo_ul =
+ kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
+ memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
+ memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
+ }
+
+ /* Enable control channel */
+ dc->last_ier = dc->last_ier | CTRL_DL;
+ writew(dc->last_ier, dc->reg_ier);
+
+ dev_info(&dc->pdev->dev, "Initialization OK!\n");
+ return 1;
+ }
+
+ if ((dc->config_table.version > 0)
+ && (dc->config_table.toggle.enabled != TOGGLE_VALID)) {
+ u32 offset = 0;
+ DBG1("First phase: pushing upload buffers, clearing download");
+
+ dev_info(&dc->pdev->dev, "Version of card: %d\n",
+ dc->config_table.version);
+
+ /* Here we should disable all I/O over F32. */
+ setup_memory(dc);
+
+ /*
+ * We should send ALL channel pair tokens back along
+ * with reset token
+ */
+
+ /* push upload modem buffers */
+ write_mem32(dc->port[PORT_MDM].ul_addr[CH_A],
+ (u32 *) &offset, 4);
+ write_mem32(dc->port[PORT_MDM].ul_addr[CH_B],
+ (u32 *) &offset, 4);
+
+ writew(MDM_UL | DIAG_DL | MDM_DL, dc->reg_fcr);
+
+ DBG1("First phase done");
+ }
+
+ return 1;
+}
+
+/* Enable uplink interrupts */
+static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
+{
+ u16 mask[NOZOMI_MAX_PORTS] = \
+ {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier |= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/* Disable uplink interrupts */
+static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
+{
+ u16 mask[NOZOMI_MAX_PORTS] = \
+ {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier &= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/* Enable downlink interrupts */
+static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
+{
+ u16 mask[NOZOMI_MAX_PORTS] = \
+ {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier |= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/* Disable downlink interrupts */
+static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
+{
+ u16 mask[NOZOMI_MAX_PORTS] = \
+ {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
+
+ if (port < NOZOMI_MAX_PORTS) {
+ dc->last_ier &= mask[port];
+ writew(dc->last_ier, dc->reg_ier);
+ } else {
+ dev_err(&dc->pdev->dev, "Called with wrong port?\n");
+ }
+}
+
+/*
+ * Return 1 - send buffer to card and ack.
+ * Return 0 - don't ack, don't send buffer to card.
+ */
+static int send_data(enum port_type index, struct nozomi *dc)
+{
+ u32 size = 0;
+ struct port *port = &dc->port[index];
+ u8 toggle = port->toggle_ul;
+ void __iomem *addr = port->ul_addr[toggle];
+ u32 ul_size = port->ul_size[toggle];
+ struct tty_struct *tty = port->tty;
+
+ /* Get data from tty and place in buf for now */
+ size = __kfifo_get(port->fifo_ul, dc->send_buf,
+ ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
+
+ if (size == 0) {
+ DBG4("No more data to send, disable link:");
+ return 0;
+ }
+
+ /* DUMP(buf, size); */
+
+ /* Write length + data */
+ write_mem32(addr, (u32 *) &size, 4);
+ write_mem32(addr + 4, (u32 *) dc->send_buf, size);
+
+ if (tty)
+ tty_wakeup(tty);
+
+ return 1;
+}
+
+/* If all data has been read, return 1, else 0 */
+static int receive_data(enum port_type index, struct nozomi *dc)
+{
+ u8 buf[RECEIVE_BUF_MAX] = { 0 };
+ int size;
+ u32 offset = 4;
+ struct port *port = &dc->port[index];
+ void __iomem *addr = port->dl_addr[port->toggle_dl];
+ struct tty_struct *tty = port->tty;
+ int i;
+
+ if (unlikely(!tty)) {
+ DBG1("tty not open for port: %d?", index);
+ return 1;
+ }
+
+ read_mem32((u32 *) &size, addr, 4);
+ /* DBG1( "%d bytes port: %d", size, index); */
+
+ if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ DBG1("No room in tty, don't read data, don't ack interrupt, "
+ "disable interrupt");
+
+ /* disable interrupt in downlink... */
+ disable_transmit_dl(index, dc);
+ return 0;
+ }
+
+ if (unlikely(size == 0)) {
+ dev_err(&dc->pdev->dev, "size == 0?\n");
+ return 1;
+ }
+
+ tty_buffer_request_room(tty, size);
+
+ while (size > 0) {
+ read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
+
+ if (size == 1) {
+ tty_insert_flip_char(tty, buf[0], TTY_NORMAL);
+ size = 0;
+ } else if (size < RECEIVE_BUF_MAX) {
+ size -= tty_insert_flip_string(tty, (char *) buf, size);
+ } else {
+ i = tty_insert_flip_string(tty, \
+ (char *) buf, RECEIVE_BUF_MAX);
+ size -= i;
+ offset += i;
+ }
+ }
+
+ set_bit(index, &dc->flip);
+
+ return 1;
+}
+
+/* Debug for interrupts */
+#ifdef DEBUG
+static char *interrupt2str(u16 interrupt)
+{
+ static char buf[TMP_BUF_MAX];
+ char *p = buf;
+
+ interrupt & MDM_DL1 ? p += snprintf(p, TMP_BUF_MAX, "MDM_DL1 ") : NULL;
+ interrupt & MDM_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "MDM_DL2 ") : NULL;
+
+ interrupt & MDM_UL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "MDM_UL1 ") : NULL;
+ interrupt & MDM_UL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "MDM_UL2 ") : NULL;
+
+ interrupt & DIAG_DL1 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "DIAG_DL1 ") : NULL;
+ interrupt & DIAG_DL2 ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "DIAG_DL2 ") : NULL;
+
+ interrupt & DIAG_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "DIAG_UL ") : NULL;
+
+ interrupt & APP1_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP1_DL ") : NULL;
+ interrupt & APP2_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP2_DL ") : NULL;
+
+ interrupt & APP1_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP1_UL ") : NULL;
+ interrupt & APP2_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "APP2_UL ") : NULL;
+
+ interrupt & CTRL_DL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "CTRL_DL ") : NULL;
+ interrupt & CTRL_UL ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "CTRL_UL ") : NULL;
+
+ interrupt & RESET ? p += snprintf(p, TMP_BUF_MAX - (p - buf),
+ "RESET ") : NULL;
+
+ return buf;
+}
+#endif
+
+/*
+ * Receive flow control
+ * Return 1 - If ok, else 0
+ */
+static int receive_flow_control(struct nozomi *dc)
+{
+ enum port_type port = PORT_MDM;
+ struct ctrl_dl ctrl_dl;
+ struct ctrl_dl old_ctrl;
+ u16 enable_ier = 0;
+
+ read_mem32((u32 *) &ctrl_dl, dc->port[PORT_CTRL].dl_addr[CH_A], 2);
+
+ switch (ctrl_dl.port) {
+ case CTRL_CMD:
+ DBG1("The Base Band sends this value as a response to a "
+ "request for IMSI detach sent over the control "
+ "channel uplink (see section 7.6.1).");
+ break;
+ case CTRL_MDM:
+ port = PORT_MDM;
+ enable_ier = MDM_DL;
+ break;
+ case CTRL_DIAG:
+ port = PORT_DIAG;
+ enable_ier = DIAG_DL;
+ break;
+ case CTRL_APP1:
+ port = PORT_APP1;
+ enable_ier = APP1_DL;
+ break;
+ case CTRL_APP2:
+ port = PORT_APP2;
+ enable_ier = APP2_DL;
+ break;
+ default:
+ dev_err(&dc->pdev->dev,
+ "ERROR: flow control received for non-existing port\n");
+ return 0;
+ };
+
+ DBG1("0x%04X->0x%04X", *((u16 *)&dc->port[port].ctrl_dl),
+ *((u16 *)&ctrl_dl));
+
+ old_ctrl = dc->port[port].ctrl_dl;
+ dc->port[port].ctrl_dl = ctrl_dl;
+
+ if (old_ctrl.CTS == 1 && ctrl_dl.CTS == 0) {
+ DBG1("Disable interrupt (0x%04X) on port: %d",
+ enable_ier, port);
+ disable_transmit_ul(port, dc);
+
+ } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
+
+ if (__kfifo_len(dc->port[port].fifo_ul)) {
+ DBG1("Enable interrupt (0x%04X) on port: %d",
+ enable_ier, port);
+ DBG1("Data in buffer [%d], enable transmit! ",
+ __kfifo_len(dc->port[port].fifo_ul));
+ enable_transmit_ul(port, dc);
+ } else {
+ DBG1("No data in buffer...");
+ }
+ }
+
+ if (*(u16 *)&old_ctrl == *(u16 *)&ctrl_dl) {
+ DBG1(" No change in mctrl");
+ return 1;
+ }
+ /* Update statistics */
+ if (old_ctrl.CTS != ctrl_dl.CTS)
+ dc->port[port].tty_icount.cts++;
+ if (old_ctrl.DSR != ctrl_dl.DSR)
+ dc->port[port].tty_icount.dsr++;
+ if (old_ctrl.RI != ctrl_dl.RI)
+ dc->port[port].tty_icount.rng++;
+ if (old_ctrl.DCD != ctrl_dl.DCD)
+ dc->port[port].tty_icount.dcd++;
+
+ wake_up_interruptible(&dc->port[port].tty_wait);
+
+ DBG1("port: %d DCD(%d), CTS(%d), RI(%d), DSR(%d)",
+ port,
+ dc->port[port].tty_icount.dcd, dc->port[port].tty_icount.cts,
+ dc->port[port].tty_icount.rng, dc->port[port].tty_icount.dsr);
+
+ return 1;
+}
+
+static enum ctrl_port_type port2ctrl(enum port_type port,
+ const struct nozomi *dc)
+{
+ switch (port) {
+ case PORT_MDM:
+ return CTRL_MDM;
+ case PORT_DIAG:
+ return CTRL_DIAG;
+ case PORT_APP1:
+ return CTRL_APP1;
+ case PORT_APP2:
+ return CTRL_APP2;
+ default:
+ dev_err(&dc->pdev->dev,
+ "ERROR: send flow control " \
+ "received for non-existing port\n");
+ };
+ return CTRL_ERROR;
+}
+
+/*
+ * Send flow control, can only update one channel at a time
+ * Return 0 - If we have updated all flow control
+ * Return 1 - If we need to update more flow control, ack current enable more
+ */
+static int send_flow_control(struct nozomi *dc)
+{
+ u32 i, more_flow_control_to_be_updated = 0;
+ u16 *ctrl;
+
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ if (dc->port[i].update_flow_control) {
+ if (more_flow_control_to_be_updated) {
+ /* We have more flow control to be updated */
+ return 1;
+ }
+ dc->port[i].ctrl_ul.port = port2ctrl(i, dc);
+ ctrl = (u16 *)&dc->port[i].ctrl_ul;
+ write_mem32(dc->port[PORT_CTRL].ul_addr[0], \
+ (u32 *) ctrl, 2);
+ dc->port[i].update_flow_control = 0;
+ more_flow_control_to_be_updated = 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Handle donlink data, ports that are handled are modem and diagnostics
+ * Return 1 - ok
+ * Return 0 - toggle fields are out of sync
+ */
+static int handle_data_dl(struct nozomi *dc, enum port_type port, u8 *toggle,
+ u16 read_iir, u16 mask1, u16 mask2)
+{
+ if (*toggle == 0 && read_iir & mask1) {
+ if (receive_data(port, dc)) {
+ writew(mask1, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+
+ if (read_iir & mask2) {
+ if (receive_data(port, dc)) {
+ writew(mask2, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+ }
+ } else if (*toggle == 1 && read_iir & mask2) {
+ if (receive_data(port, dc)) {
+ writew(mask2, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+
+ if (read_iir & mask1) {
+ if (receive_data(port, dc)) {
+ writew(mask1, dc->reg_fcr);
+ *toggle = !(*toggle);
+ }
+ }
+ } else {
+ dev_err(&dc->pdev->dev, "port out of sync!, toggle:%d\n",
+ *toggle);
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Handle uplink data, this is currently for the modem port
+ * Return 1 - ok
+ * Return 0 - toggle field are out of sync
+ */
+static int handle_data_ul(struct nozomi *dc, enum port_type port, u16 read_iir)
+{
+ u8 *toggle = &(dc->port[port].toggle_ul);
+
+ if (*toggle == 0 && read_iir & MDM_UL1) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL1, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+
+ if (read_iir & MDM_UL2) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL2, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+ }
+
+ } else if (*toggle == 1 && read_iir & MDM_UL2) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL2, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+
+ if (read_iir & MDM_UL1) {
+ dc->last_ier &= ~MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(port, dc)) {
+ writew(MDM_UL1, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | MDM_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ *toggle = !*toggle;
+ }
+ }
+ } else {
+ writew(read_iir & MDM_UL, dc->reg_fcr);
+ dev_err(&dc->pdev->dev, "port out of sync!\n");
+ return 0;
+ }
+ return 1;
+}
+
+static irqreturn_t interrupt_handler(int irq, void *dev_id)
+{
+ struct nozomi *dc = dev_id;
+ unsigned int a;
+ u16 read_iir;
+
+ if (!dc)
+ return IRQ_NONE;
+
+ spin_lock(&dc->spin_mutex);
+ read_iir = readw(dc->reg_iir);
+
+ /* Card removed */
+ if (read_iir == (u16)-1)
+ goto none;
+ /*
+ * Just handle interrupt enabled in IER
+ * (by masking with dc->last_ier)
+ */
+ read_iir &= dc->last_ier;
+
+ if (read_iir == 0)
+ goto none;
+
+
+ DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
+ dc->last_ier);
+
+ if (read_iir & RESET) {
+ if (unlikely(!nozomi_read_config_table(dc))) {
+ dc->last_ier = 0x0;
+ writew(dc->last_ier, dc->reg_ier);
+ dev_err(&dc->pdev->dev, "Could not read status from "
+ "card, we should disable interface\n");
+ } else {
+ writew(RESET, dc->reg_fcr);
+ }
+ /* No more useful info if this was the reset interrupt. */
+ goto exit_handler;
+ }
+ if (read_iir & CTRL_UL) {
+ DBG1("CTRL_UL");
+ dc->last_ier &= ~CTRL_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_flow_control(dc)) {
+ writew(CTRL_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | CTRL_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+ if (read_iir & CTRL_DL) {
+ receive_flow_control(dc);
+ writew(CTRL_DL, dc->reg_fcr);
+ }
+ if (read_iir & MDM_DL) {
+ if (!handle_data_dl(dc, PORT_MDM,
+ &(dc->port[PORT_MDM].toggle_dl), read_iir,
+ MDM_DL1, MDM_DL2)) {
+ dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
+ goto exit_handler;
+ }
+ }
+ if (read_iir & MDM_UL) {
+ if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
+ dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
+ goto exit_handler;
+ }
+ }
+ if (read_iir & DIAG_DL) {
+ if (!handle_data_dl(dc, PORT_DIAG,
+ &(dc->port[PORT_DIAG].toggle_dl), read_iir,
+ DIAG_DL1, DIAG_DL2)) {
+ dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
+ goto exit_handler;
+ }
+ }
+ if (read_iir & DIAG_UL) {
+ dc->last_ier &= ~DIAG_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(PORT_DIAG, dc)) {
+ writew(DIAG_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | DIAG_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+ if (read_iir & APP1_DL) {
+ if (receive_data(PORT_APP1, dc))
+ writew(APP1_DL, dc->reg_fcr);
+ }
+ if (read_iir & APP1_UL) {
+ dc->last_ier &= ~APP1_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(PORT_APP1, dc)) {
+ writew(APP1_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | APP1_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+ if (read_iir & APP2_DL) {
+ if (receive_data(PORT_APP2, dc))
+ writew(APP2_DL, dc->reg_fcr);
+ }
+ if (read_iir & APP2_UL) {
+ dc->last_ier &= ~APP2_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ if (send_data(PORT_APP2, dc)) {
+ writew(APP2_UL, dc->reg_fcr);
+ dc->last_ier = dc->last_ier | APP2_UL;
+ writew(dc->last_ier, dc->reg_ier);
+ }
+ }
+
+exit_handler:
+ spin_unlock(&dc->spin_mutex);
+ for (a = 0; a < NOZOMI_MAX_PORTS; a++)
+ if (test_and_clear_bit(a, &dc->flip))
+ tty_flip_buffer_push(dc->port[a].tty);
+ return IRQ_HANDLED;
+none:
+ spin_unlock(&dc->spin_mutex);
+ return IRQ_NONE;
+}
+
+static void nozomi_get_card_type(struct nozomi *dc)
+{
+ int i;
+ u32 size = 0;
+
+ for (i = 0; i < 6; i++)
+ size += pci_resource_len(dc->pdev, i);
+
+ /* Assume card type F32_8 if no match */
+ dc->card_type = size == 2048 ? F32_2 : F32_8;
+
+ dev_info(&dc->pdev->dev, "Card type is: %d\n", dc->card_type);
+}
+
+static void nozomi_setup_private_data(struct nozomi *dc)
+{
+ void __iomem *offset = dc->base_addr + dc->card_type / 2;
+ unsigned int i;
+
+ dc->reg_fcr = (void __iomem *)(offset + R_FCR);
+ dc->reg_iir = (void __iomem *)(offset + R_IIR);
+ dc->reg_ier = (void __iomem *)(offset + R_IER);
+ dc->last_ier = 0;
+ dc->flip = 0;
+
+ dc->port[PORT_MDM].token_dl = MDM_DL;
+ dc->port[PORT_DIAG].token_dl = DIAG_DL;
+ dc->port[PORT_APP1].token_dl = APP1_DL;
+ dc->port[PORT_APP2].token_dl = APP2_DL;
+
+ for (i = 0; i < MAX_PORT; i++)
+ init_waitqueue_head(&dc->port[i].tty_wait);
+}
+
+static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n", dc->card_type);
+}
+static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
+
+static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%u\n", dc->open_ttys);
+}
+static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
+
+static void make_sysfs_files(struct nozomi *dc)
+{
+ if (device_create_file(&dc->pdev->dev, &dev_attr_card_type))
+ dev_err(&dc->pdev->dev,
+ "Could not create sysfs file for card_type\n");
+ if (device_create_file(&dc->pdev->dev, &dev_attr_open_ttys))
+ dev_err(&dc->pdev->dev,
+ "Could not create sysfs file for open_ttys\n");
+}
+
+static void remove_sysfs_files(struct nozomi *dc)
+{
+ device_remove_file(&dc->pdev->dev, &dev_attr_card_type);
+ device_remove_file(&dc->pdev->dev, &dev_attr_open_ttys);
+}
+
+/* Allocate memory for one device */
+static int __devinit nozomi_card_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ resource_size_t start;
+ int ret;
+ struct nozomi *dc = NULL;
+ int ndev_idx;
+ int i;
+
+ dev_dbg(&pdev->dev, "Init, new card found\n");
+
+ for (ndev_idx = 0; ndev_idx < ARRAY_SIZE(ndevs); ndev_idx++)
+ if (!ndevs[ndev_idx])
+ break;
+
+ if (ndev_idx >= ARRAY_SIZE(ndevs)) {
+ dev_err(&pdev->dev, "no free tty range for this card left\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ dc = kzalloc(sizeof(struct nozomi), GFP_KERNEL);
+ if (unlikely(!dc)) {
+ dev_err(&pdev->dev, "Could not allocate memory\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ dc->pdev = pdev;
+
+ /* Find out what card type it is */
+ nozomi_get_card_type(dc);
+
+ ret = pci_enable_device(dc->pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PCI Device\n");
+ goto err_free;
+ }
+
+ start = pci_resource_start(dc->pdev, 0);
+ if (start == 0) {
+ dev_err(&pdev->dev, "No I/O address for card detected\n");
+ ret = -ENODEV;
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
+ (int) /* nozomi_private.io_addr */ 0);
+ goto err_disable_device;
+ }
+
+ dc->base_addr = ioremap(start, dc->card_type);
+ if (!dc->base_addr) {
+ dev_err(&pdev->dev, "Unable to map card MMIO\n");
+ ret = -ENODEV;
+ goto err_rel_regs;
+ }
+
+ dc->send_buf = kmalloc(SEND_BUF_MAX, GFP_KERNEL);
+ if (!dc->send_buf) {
+ dev_err(&pdev->dev, "Could not allocate send buffer?\n");
+ ret = -ENOMEM;
+ goto err_free_sbuf;
+ }
+
+ spin_lock_init(&dc->spin_mutex);
+
+ nozomi_setup_private_data(dc);
+
+ /* Disable all interrupts */
+ dc->last_ier = 0;
+ writew(dc->last_ier, dc->reg_ier);
+
+ ret = request_irq(pdev->irq, &interrupt_handler, IRQF_SHARED,
+ NOZOMI_NAME, dc);
+ if (unlikely(ret)) {
+ dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
+ goto err_free_sbuf;
+ }
+
+ DBG1("base_addr: %p", dc->base_addr);
+
+ make_sysfs_files(dc);
+
+ dc->index_start = ndev_idx * MAX_PORT;
+ ndevs[ndev_idx] = dc;
+
+ for (i = 0; i < MAX_PORT; i++) {
+ mutex_init(&dc->port[i].tty_sem);
+ dc->port[i].tty_open_count = 0;
+ dc->port[i].tty = NULL;
+ tty_register_device(ntty_driver, dc->index_start + i,
+ &pdev->dev);
+ }
+
+ /* Enable RESET interrupt. */
+ dc->last_ier = RESET;
+ writew(dc->last_ier, dc->reg_ier);
+
+ pci_set_drvdata(pdev, dc);
+
+ return 0;
+
+err_free_sbuf:
+ kfree(dc->send_buf);
+ iounmap(dc->base_addr);
+err_rel_regs:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_free:
+ kfree(dc);
+err:
+ return ret;
+}
+
+static void __devexit tty_exit(struct nozomi *dc)
+{
+ unsigned int i;
+
+ DBG1(" ");
+
+ flush_scheduled_work();
+
+ for (i = 0; i < MAX_PORT; ++i)
+ if (dc->port[i].tty && \
+ list_empty(&dc->port[i].tty->hangup_work.entry))
+ tty_hangup(dc->port[i].tty);
+
+ while (dc->open_ttys)
+ msleep(1);
+
+ for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
+ tty_unregister_device(ntty_driver, i);
+}
+
+/* Deallocate memory for one device */
+static void __devexit nozomi_card_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct ctrl_ul ctrl;
+ struct nozomi *dc = pci_get_drvdata(pdev);
+
+ /* Disable all interrupts */
+ dc->last_ier = 0;
+ writew(dc->last_ier, dc->reg_ier);
+
+ tty_exit(dc);
+
+ /* Send 0x0001, command card to resend the reset token. */
+ /* This is to get the reset when the module is reloaded. */
+ ctrl.port = 0x00;
+ ctrl.reserved = 0;
+ ctrl.RTS = 0;
+ ctrl.DTR = 1;
+ DBG1("sending flow control 0x%04X", *((u16 *)&ctrl));
+
+ /* Setup dc->reg addresses to we can use defines here */
+ write_mem32(dc->port[PORT_CTRL].ul_addr[0], (u32 *)&ctrl, 2);
+ writew(CTRL_UL, dc->reg_fcr); /* push the token to the card. */
+
+ remove_sysfs_files(dc);
+
+ free_irq(pdev->irq, dc);
+
+ for (i = 0; i < MAX_PORT; i++)
+ if (dc->port[i].fifo_ul)
+ kfifo_free(dc->port[i].fifo_ul);
+
+ kfree(dc->send_buf);
+
+ iounmap(dc->base_addr);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+
+ ndevs[dc->index_start / MAX_PORT] = NULL;
+
+ kfree(dc);
+}
+
+static void set_rts(const struct tty_struct *tty, int rts)
+{
+ struct port *port = get_port_by_tty(tty);
+
+ port->ctrl_ul.RTS = rts;
+ port->update_flow_control = 1;
+ enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
+}
+
+static void set_dtr(const struct tty_struct *tty, int dtr)
+{
+ struct port *port = get_port_by_tty(tty);
+
+ DBG1("SETTING DTR index: %d, dtr: %d", tty->index, dtr);
+
+ port->ctrl_ul.DTR = dtr;
+ port->update_flow_control = 1;
+ enable_transmit_ul(PORT_CTRL, get_dc_by_tty(tty));
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * TTY code
+ * ----------------------------------------------------------------------------
+ */
+
+/* Called when the userspace process opens the tty, /dev/noz*. */
+static int ntty_open(struct tty_struct *tty, struct file *file)
+{
+ struct port *port = get_port_by_tty(tty);
+ struct nozomi *dc = get_dc_by_tty(tty);
+ unsigned long flags;
+
+ if (!port || !dc)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&port->tty_sem))
+ return -ERESTARTSYS;
+
+ port->tty_open_count++;
+ dc->open_ttys++;
+
+ /* Enable interrupt downlink for channel */
+ if (port->tty_open_count == 1) {
+ tty->low_latency = 1;
+ tty->driver_data = port;
+ port->tty = tty;
+ DBG1("open: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier = dc->last_ier | port->token_dl;
+ writew(dc->last_ier, dc->reg_ier);
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ }
+
+ mutex_unlock(&port->tty_sem);
+
+ return 0;
+}
+
+/* Called when the userspace process close the tty, /dev/noz*. */
+static void ntty_close(struct tty_struct *tty, struct file *file)
+{
+ struct nozomi *dc = get_dc_by_tty(tty);
+ struct port *port = tty->driver_data;
+ unsigned long flags;
+
+ if (!dc || !port)
+ return;
+
+ if (mutex_lock_interruptible(&port->tty_sem))
+ return;
+
+ if (!port->tty_open_count)
+ goto exit;
+
+ dc->open_ttys--;
+ port->tty_open_count--;
+
+ if (port->tty_open_count == 0) {
+ DBG1("close: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier &= ~(port->token_dl);
+ writew(dc->last_ier, dc->reg_ier);
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ }
+
+exit:
+ mutex_unlock(&port->tty_sem);
+}
+
+/*
+ * called when the userspace process writes to the tty (/dev/noz*).
+ * Data is inserted into a fifo, which is then read and transfered to the modem.
+ */
+static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
+ int count)
+{
+ int rval = -EINVAL;
+ struct nozomi *dc = get_dc_by_tty(tty);
+ struct port *port = tty->driver_data;
+ unsigned long flags;
+
+ /* DBG1( "WRITEx: %d, index = %d", count, index); */
+
+ if (!dc || !port)
+ return -ENODEV;
+
+ if (unlikely(!mutex_trylock(&port->tty_sem))) {
+ /*
+ * must test lock as tty layer wraps calls
+ * to this function with BKL
+ */
+ dev_err(&dc->pdev->dev, "Would have deadlocked - "
+ "return EAGAIN\n");
+ return -EAGAIN;
+ }
+
+ if (unlikely(!port->tty_open_count)) {
+ DBG1(" ");
+ goto exit;
+ }
+
+ rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
+
+ /* notify card */
+ if (unlikely(dc == NULL)) {
+ DBG1("No device context?");
+ goto exit;
+ }
+
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ /* CTS is only valid on the modem channel */
+ if (port == &(dc->port[PORT_MDM])) {
+ if (port->ctrl_dl.CTS) {
+ DBG4("Enable interrupt");
+ enable_transmit_ul(tty->index % MAX_PORT, dc);
+ } else {
+ dev_err(&dc->pdev->dev,
+ "CTS not active on modem port?\n");
+ }
+ } else {
+ enable_transmit_ul(tty->index % MAX_PORT, dc);
+ }
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+
+exit:
+ mutex_unlock(&port->tty_sem);
+ return rval;
+}
+
+/*
+ * Calculate how much is left in device
+ * This method is called by the upper tty layer.
+ * #according to sources N_TTY.c it expects a value >= 0 and
+ * does not check for negative values.
+ */
+static int ntty_write_room(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ int room = 0;
+ struct nozomi *dc = get_dc_by_tty(tty);
+
+ if (!dc || !port)
+ return 0;
+ if (!mutex_trylock(&port->tty_sem))
+ return 0;
+
+ if (!port->tty_open_count)
+ goto exit;
+
+ room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
+
+exit:
+ mutex_unlock(&port->tty_sem);
+ return room;
+}
+
+/* Gets io control parameters */
+static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct port *port = tty->driver_data;
+ struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
+ struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
+
+ return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
+ (ctrl_ul->DTR ? TIOCM_DTR : 0) |
+ (ctrl_dl->DCD ? TIOCM_CAR : 0) |
+ (ctrl_dl->RI ? TIOCM_RNG : 0) |
+ (ctrl_dl->DSR ? TIOCM_DSR : 0) |
+ (ctrl_dl->CTS ? TIOCM_CTS : 0);
+}
+
+/* Sets io controls parameters */
+static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ if (set & TIOCM_RTS)
+ set_rts(tty, 1);
+ else if (clear & TIOCM_RTS)
+ set_rts(tty, 0);
+
+ if (set & TIOCM_DTR)
+ set_dtr(tty, 1);
+ else if (clear & TIOCM_DTR)
+ set_dtr(tty, 0);
+
+ return 0;
+}
+
+static int ntty_cflags_changed(struct port *port, unsigned long flags,
+ struct async_icount *cprev)
+{
+ struct async_icount cnow = port->tty_icount;
+ int ret;
+
+ ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((flags & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((flags & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((flags & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
+
+static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
+{
+ struct async_icount cnow = port->tty_icount;
+ struct serial_icounter_struct icount;
+
+ icount.cts = cnow.cts;
+ icount.dsr = cnow.dsr;
+ icount.rng = cnow.rng;
+ icount.dcd = cnow.dcd;
+ icount.rx = cnow.rx;
+ icount.tx = cnow.tx;
+ icount.frame = cnow.frame;
+ icount.overrun = cnow.overrun;
+ icount.parity = cnow.parity;
+ icount.brk = cnow.brk;
+ icount.buf_overrun = cnow.buf_overrun;
+
+ return copy_to_user(argp, &icount, sizeof(icount));
+}
+
+static int ntty_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct port *port = tty->driver_data;
+ void __user *argp = (void __user *)arg;
+ int rval = -ENOIOCTLCMD;
+
+ DBG1("******** IOCTL, cmd: %d", cmd);
+
+ switch (cmd) {
+ case TIOCMIWAIT: {
+ struct async_icount cprev = port->tty_icount;
+
+ rval = wait_event_interruptible(port->tty_wait,
+ ntty_cflags_changed(port, arg, &cprev));
+ break;
+ } case TIOCGICOUNT:
+ rval = ntty_ioctl_tiocgicount(port, argp);
+ break;
+ default:
+ DBG1("ERR: 0x%08X, %d", cmd, cmd);
+ break;
+ };
+
+ return rval;
+}
+
+/*
+ * Called by the upper tty layer when tty buffers are ready
+ * to receive data again after a call to throttle.
+ */
+static void ntty_unthrottle(struct tty_struct *tty)
+{
+ struct nozomi *dc = get_dc_by_tty(tty);
+ unsigned long flags;
+
+ DBG1("UNTHROTTLE");
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ enable_transmit_dl(tty->index % MAX_PORT, dc);
+ set_rts(tty, 1);
+
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+}
+
+/*
+ * Called by the upper tty layer when the tty buffers are almost full.
+ * The driver should stop send more data.
+ */
+static void ntty_throttle(struct tty_struct *tty)
+{
+ struct nozomi *dc = get_dc_by_tty(tty);
+ unsigned long flags;
+
+ DBG1("THROTTLE");
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ set_rts(tty, 0);
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+}
+
+/* just to discard single character writes */
+static void ntty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /* FIXME !!! */
+ DBG2("PUT CHAR Function: %c", c);
+}
+
+/* Returns number of chars in buffer, called by tty layer */
+static s32 ntty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ struct nozomi *dc = get_dc_by_tty(tty);
+ s32 rval;
+
+ if (unlikely(!dc || !port)) {
+ rval = -ENODEV;
+ goto exit_in_buffer;
+ }
+
+ if (unlikely(!port->tty_open_count)) {
+ dev_err(&dc->pdev->dev, "No tty open?\n");
+ rval = -ENODEV;
+ goto exit_in_buffer;
+ }
+
+ rval = __kfifo_len(port->fifo_ul);
+
+exit_in_buffer:
+ return rval;
+}
+
+static struct tty_operations tty_ops = {
+ .ioctl = ntty_ioctl,
+ .open = ntty_open,
+ .close = ntty_close,
+ .write = ntty_write,
+ .write_room = ntty_write_room,
+ .unthrottle = ntty_unthrottle,
+ .throttle = ntty_throttle,
+ .chars_in_buffer = ntty_chars_in_buffer,
+ .put_char = ntty_put_char,
+ .tiocmget = ntty_tiocmget,
+ .tiocmset = ntty_tiocmset,
+};
+
+/* Module initialization */
+static struct pci_driver nozomi_driver = {
+ .name = NOZOMI_NAME,
+ .id_table = nozomi_pci_tbl,
+ .probe = nozomi_card_init,
+ .remove = __devexit_p(nozomi_card_exit),
+};
+
+static __init int nozomi_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Initializing %s\n", VERSION_STRING);
+
+ ntty_driver = alloc_tty_driver(NTTY_TTY_MAXMINORS);
+ if (!ntty_driver)
+ return -ENOMEM;
+
+ ntty_driver->owner = THIS_MODULE;
+ ntty_driver->driver_name = NOZOMI_NAME_TTY;
+ ntty_driver->name = "noz";
+ ntty_driver->major = 0;
+ ntty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ ntty_driver->subtype = SERIAL_TYPE_NORMAL;
+ ntty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ ntty_driver->init_termios = tty_std_termios;
+ ntty_driver->init_termios.c_cflag = B115200 | CS8 | CREAD | \
+ HUPCL | CLOCAL;
+ ntty_driver->init_termios.c_ispeed = 115200;
+ ntty_driver->init_termios.c_ospeed = 115200;
+ tty_set_operations(ntty_driver, &tty_ops);
+
+ ret = tty_register_driver(ntty_driver);
+ if (ret) {
+ printk(KERN_ERR "Nozomi: failed to register ntty driver\n");
+ goto free_tty;
+ }
+
+ ret = pci_register_driver(&nozomi_driver);
+ if (ret) {
+ printk(KERN_ERR "Nozomi: can't register pci driver\n");
+ goto unr_tty;
+ }
+
+ return 0;
+unr_tty:
+ tty_unregister_driver(ntty_driver);
+free_tty:
+ put_tty_driver(ntty_driver);
+ return ret;
+}
+
+static __exit void nozomi_exit(void)
+{
+ printk(KERN_INFO "Unloading %s\n", DRIVER_DESC);
+ pci_unregister_driver(&nozomi_driver);
+ tty_unregister_driver(ntty_driver);
+ put_tty_driver(ntty_driver);
+}
+
+module_init(nozomi_init);
+module_exit(nozomi_exit);
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 0c66b802736a..78b151c4d20f 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -1,5 +1,5 @@
/*
- * Real Time Clock interface for Linux
+ * Real Time Clock interface for Linux
*
* Copyright (C) 1996 Paul Gortmaker
*
@@ -17,7 +17,7 @@
* has been received. If a RTC interrupt has already happened,
* it will output an unsigned long and then block. The output value
* contains the interrupt status in the low byte and the number of
- * interrupts since the last read in the remaining high bytes. The
+ * interrupts since the last read in the remaining high bytes. The
* /dev/rtc interface can also be used with the select(2) call.
*
* This program is free software; you can redistribute it and/or
@@ -104,12 +104,14 @@ static int rtc_has_irq = 1;
#ifndef CONFIG_HPET_EMULATE_RTC
#define is_hpet_enabled() 0
-#define hpet_set_alarm_time(hrs, min, sec) 0
-#define hpet_set_periodic_freq(arg) 0
-#define hpet_mask_rtc_irq_bit(arg) 0
-#define hpet_set_rtc_irq_bit(arg) 0
-#define hpet_rtc_timer_init() do { } while (0)
-#define hpet_rtc_dropped_irq() 0
+#define hpet_set_alarm_time(hrs, min, sec) 0
+#define hpet_set_periodic_freq(arg) 0
+#define hpet_mask_rtc_irq_bit(arg) 0
+#define hpet_set_rtc_irq_bit(arg) 0
+#define hpet_rtc_timer_init() do { } while (0)
+#define hpet_rtc_dropped_irq() 0
+#define hpet_register_irq_handler(h) 0
+#define hpet_unregister_irq_handler(h) 0
#ifdef RTC_IRQ
static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
{
@@ -147,7 +149,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file,
static unsigned int rtc_poll(struct file *file, poll_table *wait);
#endif
-static void get_rtc_alm_time (struct rtc_time *alm_tm);
+static void get_rtc_alm_time(struct rtc_time *alm_tm);
#ifdef RTC_IRQ
static void set_rtc_irq_bit_locked(unsigned char bit);
static void mask_rtc_irq_bit_locked(unsigned char bit);
@@ -185,9 +187,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file);
* rtc_status but before mod_timer is called, which would then reenable the
* timer (but you would need to have an awful timing before you'd trip on it)
*/
-static unsigned long rtc_status = 0; /* bitmapped status byte. */
-static unsigned long rtc_freq = 0; /* Current periodic IRQ rate */
-static unsigned long rtc_irq_data = 0; /* our output to the world */
+static unsigned long rtc_status; /* bitmapped status byte. */
+static unsigned long rtc_freq; /* Current periodic IRQ rate */
+static unsigned long rtc_irq_data; /* our output to the world */
static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
#ifdef RTC_IRQ
@@ -195,7 +197,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */
* rtc_task_lock nests inside rtc_lock.
*/
static DEFINE_SPINLOCK(rtc_task_lock);
-static rtc_task_t *rtc_callback = NULL;
+static rtc_task_t *rtc_callback;
#endif
/*
@@ -205,7 +207,7 @@ static rtc_task_t *rtc_callback = NULL;
static unsigned long epoch = 1900; /* year corresponding to 0x00 */
-static const unsigned char days_in_mo[] =
+static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
/*
@@ -242,7 +244,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
* the last read in the remainder of rtc_irq_data.
*/
- spin_lock (&rtc_lock);
+ spin_lock(&rtc_lock);
rtc_irq_data += 0x100;
rtc_irq_data &= ~0xff;
if (is_hpet_enabled()) {
@@ -259,16 +261,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id)
if (rtc_status & RTC_TIMER_ON)
mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100);
- spin_unlock (&rtc_lock);
+ spin_unlock(&rtc_lock);
/* Now do the rest of the actions */
spin_lock(&rtc_task_lock);
if (rtc_callback)
rtc_callback->func(rtc_callback->private_data);
spin_unlock(&rtc_task_lock);
- wake_up_interruptible(&rtc_wait);
+ wake_up_interruptible(&rtc_wait);
- kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
+ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
return IRQ_HANDLED;
}
@@ -335,7 +337,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
-
+
if (rtc_has_irq == 0)
return -EIO;
@@ -358,11 +360,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
* confusing. And no, xchg() is not the answer. */
__set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irq (&rtc_lock);
+
+ spin_lock_irq(&rtc_lock);
data = rtc_irq_data;
rtc_irq_data = 0;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
if (data != 0)
break;
@@ -378,10 +380,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
schedule();
} while (1);
- if (count == sizeof(unsigned int))
- retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int);
- else
- retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long);
+ if (count == sizeof(unsigned int)) {
+ retval = put_user(data,
+ (unsigned int __user *)buf) ?: sizeof(int);
+ } else {
+ retval = put_user(data,
+ (unsigned long __user *)buf) ?: sizeof(long);
+ }
if (!retval)
retval = count;
out:
@@ -394,7 +399,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf,
static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
{
- struct rtc_time wtime;
+ struct rtc_time wtime;
#ifdef RTC_IRQ
if (rtc_has_irq == 0) {
@@ -426,35 +431,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
}
case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
{
- unsigned long flags; /* can be called from isr via rtc_control() */
- spin_lock_irqsave (&rtc_lock, flags);
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
mask_rtc_irq_bit_locked(RTC_PIE);
if (rtc_status & RTC_TIMER_ON) {
rtc_status &= ~RTC_TIMER_ON;
del_timer(&rtc_irq_timer);
}
- spin_unlock_irqrestore (&rtc_lock, flags);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
return 0;
}
case RTC_PIE_ON: /* Allow periodic ints */
{
- unsigned long flags; /* can be called from isr via rtc_control() */
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
+
/*
* We don't really want Joe User enabling more
* than 64Hz of interrupts on a multi-user machine.
*/
if (!kernel && (rtc_freq > rtc_max_user_freq) &&
- (!capable(CAP_SYS_RESOURCE)))
+ (!capable(CAP_SYS_RESOURCE)))
return -EACCES;
- spin_lock_irqsave (&rtc_lock, flags);
+ spin_lock_irqsave(&rtc_lock, flags);
if (!(rtc_status & RTC_TIMER_ON)) {
mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq +
2*HZ/100);
rtc_status |= RTC_TIMER_ON;
}
set_rtc_irq_bit_locked(RTC_PIE);
- spin_unlock_irqrestore (&rtc_lock, flags);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
return 0;
}
case RTC_UIE_OFF: /* Mask ints from RTC updates. */
@@ -477,7 +488,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
*/
memset(&wtime, 0, sizeof(struct rtc_time));
get_rtc_alm_time(&wtime);
- break;
+ break;
}
case RTC_ALM_SET: /* Store a time into the alarm */
{
@@ -505,16 +516,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
*/
}
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) ||
- RTC_ALWAYS_BCD)
- {
- if (sec < 60) BIN_TO_BCD(sec);
- else sec = 0xff;
-
- if (min < 60) BIN_TO_BCD(min);
- else min = 0xff;
-
- if (hrs < 24) BIN_TO_BCD(hrs);
- else hrs = 0xff;
+ RTC_ALWAYS_BCD) {
+ if (sec < 60)
+ BIN_TO_BCD(sec);
+ else
+ sec = 0xff;
+
+ if (min < 60)
+ BIN_TO_BCD(min);
+ else
+ min = 0xff;
+
+ if (hrs < 24)
+ BIN_TO_BCD(hrs);
+ else
+ hrs = 0xff;
}
CMOS_WRITE(hrs, RTC_HOURS_ALARM);
CMOS_WRITE(min, RTC_MINUTES_ALARM);
@@ -563,11 +579,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
return -EINVAL;
-
+
if ((hrs >= 24) || (min >= 60) || (sec >= 60))
return -EINVAL;
- if ((yrs -= epoch) > 255) /* They are unsigned */
+ yrs -= epoch;
+ if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irq(&rtc_lock);
@@ -635,9 +652,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
{
int tmp = 0;
unsigned char val;
- unsigned long flags; /* can be called from isr via rtc_control() */
+ /* can be called from isr via rtc_control() */
+ unsigned long flags;
- /*
+ /*
* The max we can do is 8192Hz.
*/
if ((arg < 2) || (arg > 8192))
@@ -646,7 +664,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
* We don't really want Joe User generating more
* than 64Hz of interrupts on a multi-user machine.
*/
- if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE)))
+ if (!kernel && (arg > rtc_max_user_freq) &&
+ !capable(CAP_SYS_RESOURCE))
return -EACCES;
while (arg > (1<<tmp))
@@ -674,11 +693,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
#endif
case RTC_EPOCH_READ: /* Read the epoch. */
{
- return put_user (epoch, (unsigned long __user *)arg);
+ return put_user(epoch, (unsigned long __user *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
- /*
+ /*
* There were no RTC clocks before 1900.
*/
if (arg < 1900)
@@ -693,7 +712,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel)
default:
return -ENOTTY;
}
- return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
+ return copy_to_user((void __user *)arg,
+ &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
@@ -712,26 +732,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
* needed here. Or anywhere else in this driver. */
static int rtc_open(struct inode *inode, struct file *file)
{
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
- if(rtc_status & RTC_IS_OPEN)
+ if (rtc_status & RTC_IS_OPEN)
goto out_busy;
rtc_status |= RTC_IS_OPEN;
rtc_irq_data = 0;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
return 0;
out_busy:
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
return -EBUSY;
}
-static int rtc_fasync (int fd, struct file *filp, int on)
-
+static int rtc_fasync(int fd, struct file *filp, int on)
{
- return fasync_helper (fd, filp, on, &rtc_async_queue);
+ return fasync_helper(fd, filp, on, &rtc_async_queue);
}
static int rtc_release(struct inode *inode, struct file *file)
@@ -762,16 +781,16 @@ static int rtc_release(struct inode *inode, struct file *file)
}
spin_unlock_irq(&rtc_lock);
- if (file->f_flags & FASYNC) {
- rtc_fasync (-1, file, 0);
- }
+ if (file->f_flags & FASYNC)
+ rtc_fasync(-1, file, 0);
no_irq:
#endif
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
rtc_irq_data = 0;
rtc_status &= ~RTC_IS_OPEN;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
+
return 0;
}
@@ -786,9 +805,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
poll_wait(file, &rtc_wait, wait);
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
l = rtc_irq_data;
- spin_unlock_irq (&rtc_lock);
+ spin_unlock_irq(&rtc_lock);
if (l != 0)
return POLLIN | POLLRDNORM;
@@ -796,14 +815,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait)
}
#endif
-/*
- * exported stuffs
- */
-
-EXPORT_SYMBOL(rtc_register);
-EXPORT_SYMBOL(rtc_unregister);
-EXPORT_SYMBOL(rtc_control);
-
int rtc_register(rtc_task_t *task)
{
#ifndef RTC_IRQ
@@ -829,6 +840,7 @@ int rtc_register(rtc_task_t *task)
return 0;
#endif
}
+EXPORT_SYMBOL(rtc_register);
int rtc_unregister(rtc_task_t *task)
{
@@ -845,7 +857,7 @@ int rtc_unregister(rtc_task_t *task)
return -ENXIO;
}
rtc_callback = NULL;
-
+
/* disable controls */
if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) {
tmp = CMOS_READ(RTC_CONTROL);
@@ -865,6 +877,7 @@ int rtc_unregister(rtc_task_t *task)
return 0;
#endif
}
+EXPORT_SYMBOL(rtc_unregister);
int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
{
@@ -883,7 +896,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg)
return rtc_do_ioctl(cmd, arg, 1);
#endif
}
-
+EXPORT_SYMBOL(rtc_control);
/*
* The various file operations we support.
@@ -910,11 +923,11 @@ static struct miscdevice rtc_dev = {
#ifdef CONFIG_PROC_FS
static const struct file_operations rtc_proc_fops = {
- .owner = THIS_MODULE,
- .open = rtc_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .owner = THIS_MODULE,
+ .open = rtc_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
#endif
@@ -965,7 +978,7 @@ static int __init rtc_init(void)
#ifdef CONFIG_SPARC32
for_each_ebus(ebus) {
for_each_ebusdev(edev, ebus) {
- if(strcmp(edev->prom_node->name, "rtc") == 0) {
+ if (strcmp(edev->prom_node->name, "rtc") == 0) {
rtc_port = edev->resource[0].start;
rtc_irq = edev->irqs[0];
goto found;
@@ -986,7 +999,8 @@ found:
* XXX Interrupt pin #7 in Espresso is shared between RTC and
* PCI Slot 2 INTA# (and some INTx# in Slot 1).
*/
- if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) {
+ if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc",
+ (void *)&rtc_port)) {
rtc_has_irq = 0;
printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq);
return -EIO;
@@ -1015,16 +1029,26 @@ no_irq:
#ifdef RTC_IRQ
if (is_hpet_enabled()) {
+ int err;
+
rtc_int_handler_ptr = hpet_rtc_interrupt;
+ err = hpet_register_irq_handler(rtc_interrupt);
+ if (err != 0) {
+ printk(KERN_WARNING "hpet_register_irq_handler failed "
+ "in rtc_init().");
+ return err;
+ }
} else {
rtc_int_handler_ptr = rtc_interrupt;
}
- if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) {
+ if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED,
+ "rtc", NULL)) {
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
rtc_has_irq = 0;
printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
rtc_release_region();
+
return -EIO;
}
hpet_rtc_timer_init();
@@ -1036,6 +1060,7 @@ no_irq:
if (misc_register(&rtc_dev)) {
#ifdef RTC_IRQ
free_irq(RTC_IRQ, NULL);
+ hpet_unregister_irq_handler(rtc_interrupt);
rtc_has_irq = 0;
#endif
rtc_release_region();
@@ -1052,21 +1077,21 @@ no_irq:
#if defined(__alpha__) || defined(__mips__)
rtc_freq = HZ;
-
+
/* Each operating system on an Alpha uses its own epoch.
Let's try to guess which one we are using now. */
-
+
if (rtc_is_updating() != 0)
msleep(20);
-
+
spin_lock_irq(&rtc_lock);
year = CMOS_READ(RTC_YEAR);
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
-
+
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
BCD_TO_BIN(year); /* This should never happen... */
-
+
if (year < 20) {
epoch = 2000;
guess = "SRM (post-2000)";
@@ -1087,7 +1112,8 @@ no_irq:
#endif
}
if (guess)
- printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch);
+ printk(KERN_INFO "rtc: %s epoch (%lu) detected\n",
+ guess, epoch);
#endif
#ifdef RTC_IRQ
if (rtc_has_irq == 0)
@@ -1096,8 +1122,12 @@ no_irq:
spin_lock_irq(&rtc_lock);
rtc_freq = 1024;
if (!hpet_set_periodic_freq(rtc_freq)) {
- /* Initialize periodic freq. to CMOS reset default, which is 1024Hz */
- CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT);
+ /*
+ * Initialize periodic frequency to CMOS reset default,
+ * which is 1024Hz
+ */
+ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06),
+ RTC_FREQ_SELECT);
}
spin_unlock_irq(&rtc_lock);
no_irq2:
@@ -1110,20 +1140,22 @@ no_irq2:
return 0;
}
-static void __exit rtc_exit (void)
+static void __exit rtc_exit(void)
{
cleanup_sysctl();
- remove_proc_entry ("driver/rtc", NULL);
+ remove_proc_entry("driver/rtc", NULL);
misc_deregister(&rtc_dev);
#ifdef CONFIG_SPARC32
if (rtc_has_irq)
- free_irq (rtc_irq, &rtc_port);
+ free_irq(rtc_irq, &rtc_port);
#else
rtc_release_region();
#ifdef RTC_IRQ
- if (rtc_has_irq)
- free_irq (RTC_IRQ, NULL);
+ if (rtc_has_irq) {
+ free_irq(RTC_IRQ, NULL);
+ hpet_unregister_irq_handler(hpet_rtc_interrupt);
+ }
#endif
#endif /* CONFIG_SPARC32 */
}
@@ -1133,14 +1165,14 @@ module_exit(rtc_exit);
#ifdef RTC_IRQ
/*
- * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
+ * At IRQ rates >= 4096Hz, an interrupt may get lost altogether.
* (usually during an IDE disk interrupt, with IRQ unmasking off)
* Since the interrupt handler doesn't get called, the IRQ status
* byte doesn't get read, and the RTC stops generating interrupts.
* A timer is set, and will call this function if/when that happens.
* To get it out of this stalled state, we just read the status.
* At least a jiffy of interrupts (rtc_freq/HZ) will have been lost.
- * (You *really* shouldn't be trying to use a non-realtime system
+ * (You *really* shouldn't be trying to use a non-realtime system
* for something that requires a steady > 1KHz signal anyways.)
*/
@@ -1148,7 +1180,7 @@ static void rtc_dropped_irq(unsigned long data)
{
unsigned long freq;
- spin_lock_irq (&rtc_lock);
+ spin_lock_irq(&rtc_lock);
if (hpet_rtc_dropped_irq()) {
spin_unlock_irq(&rtc_lock);
@@ -1167,13 +1199,15 @@ static void rtc_dropped_irq(unsigned long data)
spin_unlock_irq(&rtc_lock);
- if (printk_ratelimit())
- printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq);
+ if (printk_ratelimit()) {
+ printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
+ freq);
+ }
/* Now we have new data */
wake_up_interruptible(&rtc_wait);
- kill_fasync (&rtc_async_queue, SIGIO, POLL_IN);
+ kill_fasync(&rtc_async_queue, SIGIO, POLL_IN);
}
#endif
@@ -1277,7 +1311,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
* can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
- * periodic update complete interrupts, (via ioctl) and then
+ * periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
@@ -1307,8 +1341,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BCD_TO_BIN(rtc_tm->tm_sec);
BCD_TO_BIN(rtc_tm->tm_min);
BCD_TO_BIN(rtc_tm->tm_hour);
@@ -1326,7 +1359,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
- if ((rtc_tm->tm_year += (epoch - 1900)) <= 69)
+ rtc_tm->tm_year += epoch - 1900;
+ if (rtc_tm->tm_year <= 69)
rtc_tm->tm_year += 100;
rtc_tm->tm_mon--;
@@ -1347,8 +1381,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm)
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- {
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BCD_TO_BIN(alm_tm->tm_sec);
BCD_TO_BIN(alm_tm->tm_min);
BCD_TO_BIN(alm_tm->tm_hour);