summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-04-08 14:28:13 -0700
committerDan Williams <dan.j.williams@intel.com>2009-04-08 14:28:13 -0700
commitfd74ea65883c7e6903e9b652795f72b723a2be69 (patch)
tree0792ad598080eae201d2836ac3c5a8fc46d0d03e /drivers
parentc8f517c444e4f9f55b5b5ca202b8404691a35805 (diff)
parent8c6db1bbf80123839ec87bdd6cb364aea384623d (diff)
downloadlinux-fd74ea65883c7e6903e9b652795f72b723a2be69.tar.gz
linux-fd74ea65883c7e6903e9b652795f72b723a2be69.tar.bz2
linux-fd74ea65883c7e6903e9b652795f72b723a2be69.zip
Merge branch 'dmaengine' into async-tx-raid6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/char/agp/amd64-agp.c13
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/hvcs.c9
-rw-r--r--drivers/char/hvsi.c1
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/dmaengine.c60
-rw-r--r--drivers/dma/dmatest.c307
-rw-r--r--drivers/dma/dw_dmac.c333
-rw-r--r--drivers/dma/dw_dmac_regs.h7
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/ioat_dma.c1
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c371
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/mv_xor.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c115
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c11
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/abituguru3.c7
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/it87.c8
-rw-r--r--drivers/hwmon/lm85.c8
-rw-r--r--drivers/hwmon/lm90.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c39
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h1
-rw-r--r--drivers/md/md.c30
-rw-r--r--drivers/mfd/wm8350-core.c5
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c3
-rw-r--r--drivers/mtd/maps/physmap.c19
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/quirks.c31
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/power/ds2760_battery.c11
-rw-r--r--drivers/sbus/char/bbc_i2c.c2
-rw-r--r--drivers/sbus/char/jsflash.c3
-rw-r--r--drivers/video/aty/aty128fb.c10
-rw-r--r--drivers/video/aty/radeon_pm.c10
-rw-r--r--drivers/video/i810/i810_main.c5
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/w1/masters/w1-gpio.c2
49 files changed, 1143 insertions, 350 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 43fa90b837ee..f8f578a71b25 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -303,7 +303,7 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
- unsigned int nid;
+ int nid;
nid = get_nid_for_pfn(pfn);
if (nid < 0)
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 52f4361eb6e4..d765afda9c2a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -271,15 +271,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
nb_order = (nb_order >> 1) & 7;
pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
nb_aper = nb_base << 25;
- if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
- return 0;
- }
/* Northbridge seems to contain crap. Try the AGP bridge. */
pci_read_config_word(agp, cap+0x14, &apsize);
- if (apsize == 0xffff)
+ if (apsize == 0xffff) {
+ if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+ return 0;
return -1;
+ }
apsize &= 0xfff;
/* Some BIOS use weird encodings not in the AGPv3 table. */
@@ -301,6 +301,11 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
order = nb_order;
}
+ if (nb_order >= order) {
+ if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
+ return 0;
+ }
+
dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
aper, 32 << order);
if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c7714185f831..4373adb2119a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -633,13 +633,15 @@ static void intel_i830_init_gtt_entries(void)
break;
}
}
- if (gtt_entries > 0)
+ if (gtt_entries > 0) {
dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
gtt_entries / KB(1), local ? "local" : "stolen");
- else
+ gtt_entries /= KB(4);
+ } else {
dev_info(&agp_bridge->dev->dev,
"no pre-allocated video memory detected\n");
- gtt_entries /= KB(4);
+ gtt_entries = 0;
+ }
intel_private.gtt_entries = gtt_entries;
}
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 6e6eb445d374..c76bccf5354d 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -1139,15 +1139,6 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
hvcsd->tty = tty;
tty->driver_data = hvcsd;
- /*
- * Set this driver to low latency so that we actually have a chance at
- * catching a throttled TTY after we flip_buffer_push. Otherwise the
- * flush_to_async may not execute until after the kernel_thread has
- * yielded and resumed the next flip_buffer_push resulting in data
- * loss.
- */
- tty->low_latency = 1;
-
memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN);
/*
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 406f8742a260..2989056a9e39 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -810,7 +810,6 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
hp = &hvsi_ports[line];
tty->driver_data = hp;
- tty->low_latency = 1; /* avoid throttle/tty_flip_buffer_push race */
mb();
if (hp->state == HVSI_FSP_DIED)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 48ea59e79672..3b3c01b6f1ee 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -98,6 +98,17 @@ config NET_DMA
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
say N.
+config ASYNC_TX_DMA
+ bool "Async_tx: Offload support for the async_tx api"
+ depends on DMA_ENGINE
+ help
+ This allows the async_tx api to take advantage of offload engines for
+ memcpy, memset, xor, and raid6 p+q operations. If your platform has
+ a dma engine that can perform raid operations and you have enabled
+ MD_RAID456 say Y.
+
+ If unsure, say N.
+
config DMATEST
tristate "DMA Test client"
depends on DMA_ENGINE
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 280a9d263eb3..92438e9dacc3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
* published in the general-purpose allocator
*/
dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
err = dma_chan_get(chan);
if (err == -ENODEV) {
@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan), err);
else
break;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
chan->private = NULL;
chan = NULL;
}
@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan)
WARN_ONCE(chan->client_count != 1,
"chan reference count %d != 1\n", chan->client_count);
dma_chan_put(chan);
+ /* drop PRIVATE cap enabled by __dma_request_channel() */
+ if (--chan->device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
chan->private = NULL;
mutex_unlock(&dma_list_mutex);
}
@@ -602,6 +608,24 @@ void dmaengine_put(void)
}
EXPORT_SYMBOL(dmaengine_put);
+static int get_dma_id(struct dma_device *device)
+{
+ int rc;
+
+ idr_retry:
+ if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+ return -ENOMEM;
+ mutex_lock(&dma_list_mutex);
+ rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
+ mutex_unlock(&dma_list_mutex);
+ if (rc == -EAGAIN)
+ goto idr_retry;
+ else if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
@@ -640,27 +664,25 @@ int dma_async_device_register(struct dma_device *device)
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
- atomic_set(idr_ref, 0);
- idr_retry:
- if (!idr_pre_get(&dma_idr, GFP_KERNEL))
- return -ENOMEM;
- mutex_lock(&dma_list_mutex);
- rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
- mutex_unlock(&dma_list_mutex);
- if (rc == -EAGAIN)
- goto idr_retry;
- else if (rc != 0)
+ rc = get_dma_id(device);
+ if (rc != 0) {
+ kfree(idr_ref);
return rc;
+ }
+
+ atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
+ rc = -ENOMEM;
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
- continue;
+ goto err_out;
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
if (chan->dev == NULL) {
free_percpu(chan->local);
- continue;
+ chan->local = NULL;
+ goto err_out;
}
chan->chan_id = chancnt++;
@@ -677,6 +699,8 @@ int dma_async_device_register(struct dma_device *device)
if (rc) {
free_percpu(chan->local);
chan->local = NULL;
+ kfree(chan->dev);
+ atomic_dec(idr_ref);
goto err_out;
}
chan->client_count = 0;
@@ -701,12 +725,23 @@ int dma_async_device_register(struct dma_device *device)
}
}
list_add_tail_rcu(&device->global_node, &dma_device_list);
+ if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+ device->privatecnt++; /* Always private */
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
return 0;
err_out:
+ /* if we never registered a channel just release the idr */
+ if (atomic_read(idr_ref) == 0) {
+ mutex_lock(&dma_list_mutex);
+ idr_remove(&dma_idr, device->dev_id);
+ mutex_unlock(&dma_list_mutex);
+ kfree(idr_ref);
+ return rc;
+ }
+
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->local == NULL)
continue;
@@ -893,6 +928,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
{
tx->chan = chan;
spin_lock_init(&tx->lock);
+ INIT_LIST_HEAD(&tx->tx_list);
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e190d8b30700..a27c0fb1bc11 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -38,6 +38,11 @@ module_param(max_channels, uint, S_IRUGO);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
+static unsigned int xor_sources = 3;
+module_param(xor_sources, uint, S_IRUGO);
+MODULE_PARM_DESC(xor_sources,
+ "Number of xor source buffers (default: 3)");
+
/*
* Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared.
@@ -59,8 +64,9 @@ struct dmatest_thread {
struct list_head node;
struct task_struct *task;
struct dma_chan *chan;
- u8 *srcbuf;
- u8 *dstbuf;
+ u8 **srcs;
+ u8 **dsts;
+ enum dma_transaction_type type;
};
struct dmatest_chan {
@@ -98,30 +104,37 @@ static unsigned long dmatest_random(void)
return buf;
}
-static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len)
+static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len)
{
unsigned int i;
-
- for (i = 0; i < start; i++)
- buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
- for ( ; i < start + len; i++)
- buf[i] = PATTERN_SRC | PATTERN_COPY
- | (~i & PATTERN_COUNT_MASK);;
- for ( ; i < test_buf_size; i++)
- buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_SRC | PATTERN_COPY
+ | (~i & PATTERN_COUNT_MASK);;
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
+ buf++;
+ }
}
-static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len)
+static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len)
{
unsigned int i;
-
- for (i = 0; i < start; i++)
- buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
- for ( ; i < start + len; i++)
- buf[i] = PATTERN_DST | PATTERN_OVERWRITE
- | (~i & PATTERN_COUNT_MASK);
- for ( ; i < test_buf_size; i++)
- buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ u8 *buf;
+
+ for (; (buf = *bufs); bufs++) {
+ for (i = 0; i < start; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < start + len; i++)
+ buf[i] = PATTERN_DST | PATTERN_OVERWRITE
+ | (~i & PATTERN_COUNT_MASK);
+ for ( ; i < test_buf_size; i++)
+ buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
+ }
}
static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
@@ -150,23 +163,30 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
thread_name, index, expected, actual);
}
-static unsigned int dmatest_verify(u8 *buf, unsigned int start,
+static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
unsigned int end, unsigned int counter, u8 pattern,
bool is_srcbuf)
{
unsigned int i;
unsigned int error_count = 0;
u8 actual;
-
- for (i = start; i < end; i++) {
- actual = buf[i];
- if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) {
- if (error_count < 32)
- dmatest_mismatch(actual, pattern, i, counter,
- is_srcbuf);
- error_count++;
+ u8 expected;
+ u8 *buf;
+ unsigned int counter_orig = counter;
+
+ for (; (buf = *bufs); bufs++) {
+ counter = counter_orig;
+ for (i = start; i < end; i++) {
+ actual = buf[i];
+ expected = pattern | (~counter & PATTERN_COUNT_MASK);
+ if (actual != expected) {
+ if (error_count < 32)
+ dmatest_mismatch(actual, pattern, i,
+ counter, is_srcbuf);
+ error_count++;
+ }
+ counter++;
}
- counter++;
}
if (error_count > 32)
@@ -176,12 +196,17 @@ static unsigned int dmatest_verify(u8 *buf, unsigned int start,
return error_count;
}
+static void dmatest_callback(void *completion)
+{
+ complete(completion);
+}
+
/*
* This function repeatedly tests DMA transfers of various lengths and
- * offsets until it is told to exit by kthread_stop(). There may be
- * multiple threads running this function in parallel for a single
- * channel, and there may be multiple channels being tested in
- * parallel.
+ * offsets for a given operation type until it is told to exit by
+ * kthread_stop(). There may be multiple threads running this function
+ * in parallel for a single channel, and there may be multiple channels
+ * being tested in parallel.
*
* Before each test, the source and destination buffer is initialized
* with a known pattern. This pattern is different depending on
@@ -201,25 +226,57 @@ static int dmatest_func(void *data)
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
+ enum dma_ctrl_flags flags;
int ret;
+ int src_cnt;
+ int dst_cnt;
+ int i;
thread_name = current->comm;
ret = -ENOMEM;
- thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL);
- if (!thread->srcbuf)
- goto err_srcbuf;
- thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL);
- if (!thread->dstbuf)
- goto err_dstbuf;
smp_rmb();
chan = thread->chan;
+ if (thread->type == DMA_MEMCPY)
+ src_cnt = dst_cnt = 1;
+ else if (thread->type == DMA_XOR) {
+ src_cnt = xor_sources | 1; /* force odd to ensure dst = src */
+ dst_cnt = 1;
+ } else
+ goto err_srcs;
+
+ thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->srcs)
+ goto err_srcs;
+ for (i = 0; i < src_cnt; i++) {
+ thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->srcs[i])
+ goto err_srcbuf;
+ }
+ thread->srcs[i] = NULL;
+
+ thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
+ if (!thread->dsts)
+ goto err_dsts;
+ for (i = 0; i < dst_cnt; i++) {
+ thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL);
+ if (!thread->dsts[i])
+ goto err_dstbuf;
+ }
+ thread->dsts[i] = NULL;
+
+ set_user_nice(current, 10);
+
+ flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT;
while (!kthread_should_stop()) {
struct dma_device *dev = chan->device;
- struct dma_async_tx_descriptor *tx;
- dma_addr_t dma_src, dma_dest;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_addr_t dma_srcs[src_cnt];
+ dma_addr_t dma_dsts[dst_cnt];
+ struct completion cmp;
+ unsigned long tmo = msecs_to_jiffies(3000);
total_tests++;
@@ -227,22 +284,41 @@ static int dmatest_func(void *data)
src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1);
- dmatest_init_srcbuf(thread->srcbuf, src_off, len);
- dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
+ dmatest_init_srcs(thread->srcs, src_off, len);
+ dmatest_init_dsts(thread->dsts, dst_off, len);
- dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off,
- len, DMA_TO_DEVICE);
+ for (i = 0; i < src_cnt; i++) {
+ u8 *buf = thread->srcs[i] + src_off;
+
+ dma_srcs[i] = dma_map_single(dev->dev, buf, len,
+ DMA_TO_DEVICE);
+ }
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
- dma_dest = dma_map_single(dev->dev, thread->dstbuf,
- test_buf_size, DMA_BIDIRECTIONAL);
+ for (i = 0; i < dst_cnt; i++) {
+ dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
+ test_buf_size,
+ DMA_BIDIRECTIONAL);
+ }
+
+ if (thread->type == DMA_MEMCPY)
+ tx = dev->device_prep_dma_memcpy(chan,
+ dma_dsts[0] + dst_off,
+ dma_srcs[0], len,
+ flags);
+ else if (thread->type == DMA_XOR)
+ tx = dev->device_prep_dma_xor(chan,
+ dma_dsts[0] + dst_off,
+ dma_srcs, xor_sources,
+ len, flags);
- tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
- dma_src, len,
- DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
if (!tx) {
- dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
- dma_unmap_single(dev->dev, dma_dest,
- test_buf_size, DMA_BIDIRECTIONAL);
+ for (i = 0; i < src_cnt; i++)
+ dma_unmap_single(dev->dev, dma_srcs[i], len,
+ DMA_TO_DEVICE);
+ for (i = 0; i < dst_cnt; i++)
+ dma_unmap_single(dev->dev, dma_dsts[i],
+ test_buf_size,
+ DMA_BIDIRECTIONAL);
pr_warning("%s: #%u: prep error with src_off=0x%x "
"dst_off=0x%x len=0x%x\n",
thread_name, total_tests - 1,
@@ -251,7 +327,10 @@ static int dmatest_func(void *data)
failed_tests++;
continue;
}
- tx->callback = NULL;
+
+ init_completion(&cmp);
+ tx->callback = dmatest_callback;
+ tx->callback_param = &cmp;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -263,44 +342,50 @@ static int dmatest_func(void *data)
failed_tests++;
continue;
}
- dma_async_memcpy_issue_pending(chan);
+ dma_async_issue_pending(chan);
- do {
- msleep(1);
- status = dma_async_memcpy_complete(
- chan, cookie, NULL, NULL);
- } while (status == DMA_IN_PROGRESS);
+ tmo = wait_for_completion_timeout(&cmp, tmo);
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (status == DMA_ERROR) {
- pr_warning("%s: #%u: error during copy\n",
- thread_name, total_tests - 1);
+ if (tmo == 0) {
+ pr_warning("%s: #%u: test timed out\n",
+ thread_name, total_tests - 1);
+ failed_tests++;
+ continue;
+ } else if (status != DMA_SUCCESS) {
+ pr_warning("%s: #%u: got completion callback,"
+ " but status is \'%s\'\n",
+ thread_name, total_tests - 1,
+ status == DMA_ERROR ? "error" : "in progress");
failed_tests++;
continue;
}
+
/* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
- dma_unmap_single(dev->dev, dma_dest,
- test_buf_size, DMA_BIDIRECTIONAL);
+ for (i = 0; i < dst_cnt; i++)
+ dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size,
+ DMA_BIDIRECTIONAL);
error_count = 0;
pr_debug("%s: verifying source buffer...\n", thread_name);
- error_count += dmatest_verify(thread->srcbuf, 0, src_off,
+ error_count += dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
- error_count += dmatest_verify(thread->srcbuf, src_off,
+ error_count += dmatest_verify(thread->srcs, src_off,
src_off + len, src_off,
PATTERN_SRC | PATTERN_COPY, true);
- error_count += dmatest_verify(thread->srcbuf, src_off + len,
+ error_count += dmatest_verify(thread->srcs, src_off + len,
test_buf_size, src_off + len,
PATTERN_SRC, true);
pr_debug("%s: verifying dest buffer...\n",
thread->task->comm);
- error_count += dmatest_verify(thread->dstbuf, 0, dst_off,
+ error_count += dmatest_verify(thread->dsts, 0, dst_off,
0, PATTERN_DST, false);
- error_count += dmatest_verify(thread->dstbuf, dst_off,
+ error_count += dmatest_verify(thread->dsts, dst_off,
dst_off + len, src_off,
PATTERN_SRC | PATTERN_COPY, false);
- error_count += dmatest_verify(thread->dstbuf, dst_off + len,
+ error_count += dmatest_verify(thread->dsts, dst_off + len,
test_buf_size, dst_off + len,
PATTERN_DST, false);
@@ -319,10 +404,16 @@ static int dmatest_func(void *data)
}
ret = 0;
- kfree(thread->dstbuf);
+ for (i = 0; thread->dsts[i]; i++)
+ kfree(thread->dsts[i]);
err_dstbuf:
- kfree(thread->srcbuf);
+ kfree(thread->dsts);
+err_dsts:
+ for (i = 0; thread->srcs[i]; i++)
+ kfree(thread->srcs[i]);
err_srcbuf:
+ kfree(thread->srcs);
+err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
return ret;
@@ -344,35 +435,36 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
kfree(dtc);
}
-static int dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type)
{
- struct dmatest_chan *dtc;
- struct dmatest_thread *thread;
- unsigned int i;
-
- dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
- if (!dtc) {
- pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
- return -ENOMEM;
- }
+ struct dmatest_thread *thread;
+ struct dma_chan *chan = dtc->chan;
+ char *op;
+ unsigned int i;
- dtc->chan = chan;
- INIT_LIST_HEAD(&dtc->threads);
+ if (type == DMA_MEMCPY)
+ op = "copy";
+ else if (type == DMA_XOR)
+ op = "xor";
+ else
+ return -EINVAL;
for (i = 0; i < threads_per_chan; i++) {
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
- pr_warning("dmatest: No memory for %s-test%u\n",
- dma_chan_name(chan), i);
+ pr_warning("dmatest: No memory for %s-%s%u\n",
+ dma_chan_name(chan), op, i);
+
break;
}
thread->chan = dtc->chan;
+ thread->type = type;
smp_wmb();
- thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
- dma_chan_name(chan), i);
+ thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
+ dma_chan_name(chan), op, i);
if (IS_ERR(thread->task)) {
- pr_warning("dmatest: Failed to run thread %s-test%u\n",
- dma_chan_name(chan), i);
+ pr_warning("dmatest: Failed to run thread %s-%s%u\n",
+ dma_chan_name(chan), op, i);
kfree(thread);
break;
}
@@ -382,7 +474,36 @@ static int dmatest_add_channel(struct dma_chan *chan)
list_add_tail(&thread->node, &dtc->threads);
}
- pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
+ return i;
+}
+
+static int dmatest_add_channel(struct dma_chan *chan)
+{
+ struct dmatest_chan *dtc;
+ struct dma_device *dma_dev = chan->device;
+ unsigned int thread_count = 0;
+ unsigned int cnt;
+
+ dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
+ if (!dtc) {
+ pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
+ return -ENOMEM;
+ }
+
+ dtc->chan = chan;
+ INIT_LIST_HEAD(&dtc->threads);
+
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
+ cnt = dmatest_add_threads(dtc, DMA_MEMCPY);
+ thread_count += cnt > 0 ?: 0;
+ }
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ cnt = dmatest_add_threads(dtc, DMA_XOR);
+ thread_count += cnt > 0 ?: 0;
+ }
+
+ pr_info("dmatest: Started %u threads using %s\n",
+ thread_count, dma_chan_name(chan));
list_add_tail(&dtc->node, &dmatest_channels);
nr_channels++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a97c07eef7ec..0b8aada08aa8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc);
}
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, SAR);
+}
+EXPORT_SYMBOL(dw_dma_get_src_addr);
+
+inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ return channel_readl(dwc, DAR);
+}
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+/* called with dwc->lock held and all DMAC interrupts disabled */
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ u32 status_block, u32 status_err, u32 status_xfer)
+{
+ if (status_block & dwc->mask) {
+ void (*callback)(void *param);
+ void *callback_param;
+
+ dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ channel_readl(dwc, LLP));
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+ callback = dwc->cdesc->period_callback;
+ callback_param = dwc->cdesc->period_callback_param;
+ if (callback) {
+ spin_unlock(&dwc->lock);
+ callback(callback_param);
+ spin_lock(&dwc->lock);
+ }
+ }
+
+ /*
+ * Error and transfer complete are highly unlikely, and will most
+ * likely be due to a configuration error by the user.
+ */
+ if (unlikely(status_err & dwc->mask) ||
+ unlikely(status_xfer & dwc->mask)) {
+ int i;
+
+ dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+ "interrupt, stopping DMA transfer\n",
+ status_xfer ? "xfer" : "error");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ /* make sure DMA does not restart by loading a new list */
+ channel_writel(dwc, LLP, 0);
+ channel_writel(dwc, CTL_LO, 0);
+ channel_writel(dwc, CTL_HI, 0);
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ for (i = 0; i < dwc->cdesc->periods; i++)
+ dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+ }
+}
+
+/* ------------------------------------------------------------------------- */
+
static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
spin_lock(&dwc->lock);
- if (status_err & (1 << i))
+ if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+ dwc_handle_cyclic(dw, dwc, status_block, status_err,
+ status_xfer);
+ else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if ((status_block | status_xfer) & (1 << i))
dwc_scan_descriptors(dw, dwc);
@@ -826,7 +905,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = dwc_tx_submit;
desc->txd.flags = DMA_CTRL_ACK;
- INIT_LIST_HEAD(&desc->txd.tx_list);
desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
sizeof(desc->lli), DMA_TO_DEVICE);
dwc_desc_put(dwc, desc);
@@ -884,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
}
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+/**
+ * dw_dma_cyclic_start - start the cyclic DMA transfer
+ * @chan: the DMA channel to start
+ *
+ * Must be called with soft interrupts disabled. Returns zero on success or
+ * -errno on failure.
+ */
+int dw_dma_cyclic_start(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+ dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+ return -ENODEV;
+ }
+
+ spin_lock(&dwc->lock);
+
+ /* assert channel is idle */
+ if (dma_readl(dw, CH_EN) & dwc->mask) {
+ dev_err(chan2dev(&dwc->chan),
+ "BUG: Attempted to start non-idle channel\n");
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+ spin_unlock(&dwc->lock);
+ return -EBUSY;
+ }
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ /* setup DMAC channel registers */
+ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+ channel_writel(dwc, CTL_HI, 0);
+
+ channel_set_bit(dw, CH_EN, dwc->mask);
+
+ spin_unlock(&dwc->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_start);
+
+/**
+ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+ * @chan: the DMA channel to stop
+ *
+ * Must be called with soft interrupts disabled.
+ */
+void dw_dma_cyclic_stop(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+ spin_lock(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ spin_unlock(&dwc->lock);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
+
+/**
+ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ *
+ * Must be called before trying to start the transfer. Returns a valid struct
+ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+ */
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_cyclic_desc *cdesc;
+ struct dw_cyclic_desc *retval = NULL;
+ struct dw_desc *desc;
+ struct dw_desc *last = NULL;
+ struct dw_dma_slave *dws = chan->private;
+ unsigned long was_cyclic;
+ unsigned int reg_width;
+ unsigned int periods;
+ unsigned int i;
+
+ spin_lock_bh(&dwc->lock);
+ if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+ spin_unlock_bh(&dwc->lock);
+ dev_dbg(chan2dev(&dwc->chan),
+ "queue and/or active list are not empty\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ spin_unlock_bh(&dwc->lock);
+ if (was_cyclic) {
+ dev_dbg(chan2dev(&dwc->chan),
+ "channel already prepared for cyclic DMA\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ retval = ERR_PTR(-EINVAL);
+ reg_width = dws->reg_width;
+ periods = buf_len / period_len;
+
+ /* Check for too big/unaligned periods and unaligned DMA buffer. */
+ if (period_len > (DWC_MAX_COUNT << reg_width))
+ goto out_err;
+ if (unlikely(period_len & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+ goto out_err;
+ if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ goto out_err;
+
+ retval = ERR_PTR(-ENOMEM);
+
+ if (periods > NR_DESCS_PER_CHANNEL)
+ goto out_err;
+
+ cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+ if (!cdesc)
+ goto out_err;
+
+ cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+ if (!cdesc->desc)
+ goto out_err_alloc;
+
+ for (i = 0; i < periods; i++) {
+ desc = dwc_desc_get(dwc);
+ if (!desc)
+ goto out_err_desc_get;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->lli.dar = dws->tx_reg;
+ desc->lli.sar = buf_addr + (period_len * i);
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_FIX
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_FC_M2P
+ | DWC_CTLL_INT_EN);
+ break;
+ case DMA_FROM_DEVICE:
+ desc->lli.dar = buf_addr + (period_len * i);
+ desc->lli.sar = dws->rx_reg;
+ desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_FIX
+ | DWC_CTLL_FC_P2M
+ | DWC_CTLL_INT_EN);
+ break;
+ default:
+ break;
+ }
+
+ desc->lli.ctlhi = (period_len >> reg_width);
+ cdesc->desc[i] = desc;
+
+ if (last) {
+ last->lli.llp = desc->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan),
+ last->txd.phys, sizeof(last->lli),
+ DMA_TO_DEVICE);
+ }
+
+ last = desc;
+ }
+
+ /* lets make a cyclic list */
+ last->lli.llp = cdesc->desc[0]->txd.phys;
+ dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+ sizeof(last->lli), DMA_TO_DEVICE);
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
+ "period %zu periods %d\n", buf_addr, buf_len,
+ period_len, periods);
+
+ cdesc->periods = periods;
+ dwc->cdesc = cdesc;
+
+ return cdesc;
+
+out_err_desc_get:
+ while (i--)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+out_err_alloc:
+ kfree(cdesc);
+out_err:
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+ return (struct dw_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
+
+/**
+ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+ * @chan: the DMA channel to free
+ */
+void dw_dma_cyclic_free(struct dma_chan *chan)
+{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_cyclic_desc *cdesc = dwc->cdesc;
+ int i;
+
+ dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+
+ if (!cdesc)
+ return;
+
+ spin_lock_bh(&dwc->lock);
+
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+
+ dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+ spin_unlock_bh(&dwc->lock);
+
+ for (i = 0; i < cdesc->periods; i++)
+ dwc_desc_put(dwc, cdesc->desc[i]);
+
+ kfree(cdesc->desc);
+ kfree(cdesc);
+
+ clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_free);
+
/*----------------------------------------------------------------------*/
static void dw_dma_off(struct dw_dma *dw)
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index b252b202c5cf..13a580767031 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -126,6 +126,10 @@ struct dw_dma_regs {
#define DW_REGLEN 0x400
+enum dw_dmac_flags {
+ DW_DMA_IS_CYCLIC = 0,
+};
+
struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
spinlock_t lock;
/* these other elements are all protected by lock */
+ unsigned long flags;
dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
+ struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
};
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
return container_of(chan, struct dw_dma_chan, chan);
}
-
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 86d6da47f558..da8a8ed9e411 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
dma_async_tx_descriptor_init(&desc_sw->async_tx,
&fsl_chan->common);
desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
- INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
desc_sw->async_tx.phys = pdesc;
}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 5905cd36bcd2..e4fc33c1c32f 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
desc_sw->async_tx.tx_submit = ioat2_tx_submit;
break;
}
- INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
desc_sw->hw = desc;
desc_sw->async_tx.phys = phys;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 16adbe61cfb2..2f052265122f 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
slot->async_tx.tx_submit = iop_adma_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
- INIT_LIST_HEAD(&slot->async_tx.tx_list);
hw_desc = (char *) iop_chan->device->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index ae50a9d1a4e6..90773844cc89 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -28,6 +28,9 @@
#define FS_VF_IN_VALID 0x00000002
#define FS_ENC_IN_VALID 0x00000001
+static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
+ bool wait_for_stop);
+
/*
* There can be only one, we could allocate it dynamically, but then we'd have
* to add an extra parameter to some functions, and use something as ugly as
@@ -107,7 +110,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
}
}
-/* Enable / disable direct write to memory by the Camera Sensor Interface */
+/* Enable direct write to memory by the Camera Sensor Interface */
static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
{
uint32_t ic_conf, mask;
@@ -126,6 +129,7 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
idmac_write_icreg(ipu, ic_conf, IC_CONF);
}
+/* Called under spin_lock_irqsave(&ipu_data.lock) */
static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
{
uint32_t ic_conf, mask;
@@ -422,7 +426,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break;
default:
dev_err(ipu_data.dev,
- "mxc ipu: unimplemented pixel format %d\n", pixel_fmt);
+ "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
break;
}
@@ -433,20 +437,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
uint16_t burst_pixels)
{
params->pp.npb = burst_pixels - 1;
-};
+}
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
params->pp.eba0 = buf0;
params->pp.eba1 = buf1;
-};
+}
static void ipu_ch_param_set_rotation(union chan_param_mem *params,
enum ipu_rotate_mode rotate)
{
params->pp.bam = rotate;
-};
+}
static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
uint32_t num_words)
@@ -571,7 +575,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch)
{
/* Channel Parameter Memory */
return 0x10000 | (dma_ch << 4);
-};
+}
static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
bool prio)
@@ -611,7 +615,8 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
/**
* ipu_enable_channel() - enable an IPU channel.
- * @channel: channel ID.
+ * @idmac: IPU DMAC context.
+ * @ichan: IDMAC channel.
* @return: 0 on success or negative error code on failure.
*/
static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
@@ -649,7 +654,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
/**
* ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
- * @channel: channel ID.
+ * @ichan: IDMAC channel.
* @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
* @width: width of buffer in pixels.
* @height: height of buffer in pixels.
@@ -687,7 +692,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
}
/* IC channel's stride must be a multiple of 8 pixels */
- if ((channel <= 13) && (stride % 8)) {
+ if ((channel <= IDMAC_IC_13) && (stride % 8)) {
dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
return -EINVAL;
}
@@ -752,7 +757,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
/**
* ipu_update_channel_buffer() - update physical address of a channel buffer.
- * @channel: channel ID.
+ * @ichan: IDMAC channel.
* @buffer_n: buffer number to update.
* 0 or 1 are the only valid values.
* @phyaddr: buffer physical address.
@@ -760,9 +765,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
* function will fail if the buffer is set to ready.
*/
/* Called under spin_lock(_irqsave)(&ichan->lock) */
-static int ipu_update_channel_buffer(enum ipu_channel channel,
+static int ipu_update_channel_buffer(struct idmac_channel *ichan,
int buffer_n, dma_addr_t phyaddr)
{
+ enum ipu_channel channel = ichan->dma_chan.chan_id;
uint32_t reg;
unsigned long flags;
@@ -771,8 +777,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
if (buffer_n == 0) {
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
if (reg & (1UL << channel)) {
- spin_unlock_irqrestore(&ipu_data.lock, flags);
- return -EACCES;
+ ipu_ic_disable_task(&ipu_data, channel);
+ ichan->status = IPU_CHANNEL_READY;
}
/* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
@@ -782,8 +788,8 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
} else {
reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
if (reg & (1UL << channel)) {
- spin_unlock_irqrestore(&ipu_data.lock, flags);
- return -EACCES;
+ ipu_ic_disable_task(&ipu_data, channel);
+ ichan->status = IPU_CHANNEL_READY;
}
/* Check if double-buffering is already enabled */
@@ -805,6 +811,39 @@ static int ipu_update_channel_buffer(enum ipu_channel channel,
}
/* Called under spin_lock_irqsave(&ichan->lock) */
+static int ipu_submit_buffer(struct idmac_channel *ichan,
+ struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
+{
+ unsigned int chan_id = ichan->dma_chan.chan_id;
+ struct device *dev = &ichan->dma_chan.dev->device;
+ int ret;
+
+ if (async_tx_test_ack(&desc->txd))
+ return -EINTR;
+
+ /*
+ * On first invocation this shouldn't be necessary, the call to
+ * ipu_init_channel_buffer() above will set addresses for us, so we
+ * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
+ * doing it again shouldn't hurt either.
+ */
+ ret = ipu_update_channel_buffer(ichan, buf_idx,
+ sg_dma_address(sg));
+
+ if (ret < 0) {
+ dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
+ sg, chan_id, buf_idx);
+ return ret;
+ }
+
+ ipu_select_buffer(chan_id, buf_idx);
+ dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
+ sg, chan_id, buf_idx);
+
+ return 0;
+}
+
+/* Called under spin_lock_irqsave(&ichan->lock) */
static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
struct idmac_tx_desc *desc)
{
@@ -815,20 +854,10 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
if (!ichan->sg[i]) {
ichan->sg[i] = sg;
- /*
- * On first invocation this shouldn't be necessary, the
- * call to ipu_init_channel_buffer() above will set
- * addresses for us, so we could make it conditional
- * on status >= IPU_CHANNEL_ENABLED, but doing it again
- * shouldn't hurt either.
- */
- ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
- sg_dma_address(sg));
+ ret = ipu_submit_buffer(ichan, desc, sg, i);
if (ret < 0)
return ret;
- ipu_select_buffer(ichan->dma_chan.chan_id, i);
-
sg = sg_next(sg);
}
}
@@ -842,19 +871,22 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
struct idmac_channel *ichan = to_idmac_chan(tx->chan);
struct idmac *idmac = to_idmac(tx->chan->device);
struct ipu *ipu = to_ipu(idmac);
+ struct device *dev = &ichan->dma_chan.dev->device;
dma_cookie_t cookie;
unsigned long flags;
+ int ret;
/* Sanity check */
if (!list_empty(&desc->list)) {
/* The descriptor doesn't belong to client */
- dev_err(&ichan->dma_chan.dev->device,
- "Descriptor %p not prepared!\n", tx);
+ dev_err(dev, "Descriptor %p not prepared!\n", tx);
return -EBUSY;
}
mutex_lock(&ichan->chan_mutex);
+ async_tx_clear_ack(tx);
+
if (ichan->status < IPU_CHANNEL_READY) {
struct idmac_video_param *video = &ichan->params.video;
/*
@@ -878,16 +910,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
goto out;
}
- /* ipu->lock can be taken under ichan->lock, but not v.v. */
- spin_lock_irqsave(&ichan->lock, flags);
-
- /* submit_buffers() atomically verifies and fills empty sg slots */
- cookie = ipu_submit_channel_buffers(ichan, desc);
-
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- if (cookie < 0)
- goto out;
+ dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
cookie = ichan->dma_chan.cookie;
@@ -897,24 +920,40 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
/* from dmaengine.h: "last cookie value returned to client" */
ichan->dma_chan.cookie = cookie;
tx->cookie = cookie;
+
+ /* ipu->lock can be taken under ichan->lock, but not v.v. */
spin_lock_irqsave(&ichan->lock, flags);
+
list_add_tail(&desc->list, &ichan->queue);
+ /* submit_buffers() atomically verifies and fills empty sg slots */
+ ret = ipu_submit_channel_buffers(ichan, desc);
+
spin_unlock_irqrestore(&ichan->lock, flags);
+ if (ret < 0) {
+ cookie = ret;
+ goto dequeue;
+ }
+
if (ichan->status < IPU_CHANNEL_ENABLED) {
- int ret = ipu_enable_channel(idmac, ichan);
+ ret = ipu_enable_channel(idmac, ichan);
if (ret < 0) {
cookie = ret;
- spin_lock_irqsave(&ichan->lock, flags);
- list_del_init(&desc->list);
- spin_unlock_irqrestore(&ichan->lock, flags);
- tx->cookie = cookie;
- ichan->dma_chan.cookie = cookie;
+ goto dequeue;
}
}
dump_idmac_reg(ipu);
+dequeue:
+ if (cookie < 0) {
+ spin_lock_irqsave(&ichan->lock, flags);
+ list_del_init(&desc->list);
+ spin_unlock_irqrestore(&ichan->lock, flags);
+ tx->cookie = cookie;
+ ichan->dma_chan.cookie = cookie;
+ }
+
out:
mutex_unlock(&ichan->chan_mutex);
@@ -944,8 +983,6 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
memset(txd, 0, sizeof(*txd));
dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
txd->tx_submit = idmac_tx_submit;
- txd->chan = &ichan->dma_chan;
- INIT_LIST_HEAD(&txd->tx_list);
list_add(&desc->list, &ichan->free_list);
@@ -1161,6 +1198,24 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
return 0;
}
+static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
+ struct idmac_tx_desc **desc, struct scatterlist *sg)
+{
+ struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
+
+ if (sgnew)
+ /* next sg-element in this list */
+ return sgnew;
+
+ if ((*desc)->list.next == &ichan->queue)
+ /* No more descriptors on the queue */
+ return NULL;
+
+ /* Fetch next descriptor */
+ *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
+ return (*desc)->sg;
+}
+
/*
* We have several possibilities here:
* current BUF next BUF
@@ -1176,23 +1231,46 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
static irqreturn_t idmac_interrupt(int irq, void *dev_id)
{
struct idmac_channel *ichan = dev_id;
+ struct device *dev = &ichan->dma_chan.dev->device;
unsigned int chan_id = ichan->dma_chan.chan_id;
struct scatterlist **sg, *sgnext, *sgnew = NULL;
/* Next transfer descriptor */
- struct idmac_tx_desc *desc = NULL, *descnew;
+ struct idmac_tx_desc *desc, *descnew;
dma_async_tx_callback callback;
void *callback_param;
bool done = false;
- u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY),
- ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY),
- curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
+ u32 ready0, ready1, curbuf, err;
+ unsigned long flags;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
- pr_debug("IDMAC irq %d\n", irq);
+ dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
+
+ spin_lock_irqsave(&ipu_data.lock, flags);
+
+ ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
+ ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
+ curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
+ err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
+
+ if (err & (1 << chan_id)) {
+ idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
+ spin_unlock_irqrestore(&ipu_data.lock, flags);
+ /*
+ * Doing this
+ * ichan->sg[0] = ichan->sg[1] = NULL;
+ * you can force channel re-enable on the next tx_submit(), but
+ * this is dirty - think about descriptors with multiple
+ * sg elements.
+ */
+ dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
+ chan_id, ready0, ready1, curbuf);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&ipu_data.lock, flags);
+
/* Other interrupts do not interfere with this channel */
spin_lock(&ichan->lock);
-
if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
int i = 100;
@@ -1207,19 +1285,23 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (!i) {
spin_unlock(&ichan->lock);
- dev_dbg(ichan->dma_chan.device->dev,
+ dev_dbg(dev,
"IRQ on active buffer on channel %x, active "
"%d, ready %x, %x, current %x!\n", chan_id,
ichan->active_buffer, ready0, ready1, curbuf);
return IRQ_NONE;
- }
+ } else
+ dev_dbg(dev,
+ "Buffer deactivated on channel %x, active "
+ "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
+ ichan->active_buffer, ready0, ready1, curbuf, i);
}
if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
(!ichan->active_buffer && (ready0 >> chan_id) & 1)
)) {
spin_unlock(&ichan->lock);
- dev_dbg(ichan->dma_chan.device->dev,
+ dev_dbg(dev,
"IRQ with active buffer still ready on channel %x, "
"active %d, ready %x, %x!\n", chan_id,
ichan->active_buffer, ready0, ready1);
@@ -1227,8 +1309,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
}
if (unlikely(list_empty(&ichan->queue))) {
+ ichan->sg[ichan->active_buffer] = NULL;
spin_unlock(&ichan->lock);
- dev_err(ichan->dma_chan.device->dev,
+ dev_err(dev,
"IRQ without queued buffers on channel %x, active %d, "
"ready %x, %x!\n", chan_id,
ichan->active_buffer, ready0, ready1);
@@ -1243,40 +1326,44 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
sg = &ichan->sg[ichan->active_buffer];
sgnext = ichan->sg[!ichan->active_buffer];
+ if (!*sg) {
+ spin_unlock(&ichan->lock);
+ return IRQ_HANDLED;
+ }
+
+ desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
+ descnew = desc;
+
+ dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
+ irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
+
+ /* Find the descriptor of sgnext */
+ sgnew = idmac_sg_next(ichan, &descnew, *sg);
+ if (sgnext != sgnew)
+ dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
+
/*
* if sgnext == NULL sg must be the last element in a scatterlist and
* queue must be empty
*/
if (unlikely(!sgnext)) {
- if (unlikely(sg_next(*sg))) {
- dev_err(ichan->dma_chan.device->dev,
- "Broken buffer-update locking on channel %x!\n",
- chan_id);
- /* We'll let the user catch up */
+ if (!WARN_ON(sg_next(*sg)))
+ dev_dbg(dev, "Underrun on channel %x\n", chan_id);
+ ichan->sg[!ichan->active_buffer] = sgnew;
+
+ if (unlikely(sgnew)) {
+ ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
} else {
- /* Underrun */
+ spin_lock_irqsave(&ipu_data.lock, flags);
ipu_ic_disable_task(&ipu_data, chan_id);
- dev_dbg(ichan->dma_chan.device->dev,
- "Underrun on channel %x\n", chan_id);
+ spin_unlock_irqrestore(&ipu_data.lock, flags);
ichan->status = IPU_CHANNEL_READY;
/* Continue to check for complete descriptor */
}
}
- desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
-
- /* First calculate and submit the next sg element */
- if (likely(sgnext))
- sgnew = sg_next(sgnext);
-
- if (unlikely(!sgnew)) {
- /* Start a new scatterlist, if any queued */
- if (likely(desc->list.next != &ichan->queue)) {
- descnew = list_entry(desc->list.next,
- struct idmac_tx_desc, list);
- sgnew = &descnew->sg[0];
- }
- }
+ /* Calculate and submit the next sg element */
+ sgnew = idmac_sg_next(ichan, &descnew, sgnew);
if (unlikely(!sg_next(*sg)) || !sgnext) {
/*
@@ -1289,17 +1376,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
*sg = sgnew;
- if (likely(sgnew)) {
- int ret;
-
- ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer,
- sg_dma_address(*sg));
- if (ret < 0)
- dev_err(ichan->dma_chan.device->dev,
- "Failed to update buffer on channel %x buffer %d!\n",
- chan_id, ichan->active_buffer);
- else
- ipu_select_buffer(chan_id, ichan->active_buffer);
+ if (likely(sgnew) &&
+ ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+ spin_unlock(&ichan->lock);
+ callback(callback_param);
+ spin_lock(&ichan->lock);
}
/* Flip the active buffer - even if update above failed */
@@ -1327,13 +1410,20 @@ static void ipu_gc_tasklet(unsigned long arg)
struct idmac_channel *ichan = ipu->channel + i;
struct idmac_tx_desc *desc;
unsigned long flags;
- int j;
+ struct scatterlist *sg;
+ int j, k;
for (j = 0; j < ichan->n_tx_desc; j++) {
desc = ichan->desc + j;
spin_lock_irqsave(&ichan->lock, flags);
if (async_tx_test_ack(&desc->txd)) {
list_move(&desc->list, &ichan->free_list);
+ for_each_sg(desc->sg, sg, desc->sg_len, k) {
+ if (ichan->sg[0] == sg)
+ ichan->sg[0] = NULL;
+ else if (ichan->sg[1] == sg)
+ ichan->sg[1] = NULL;
+ }
async_tx_clear_ack(&desc->txd);
}
spin_unlock_irqrestore(&ichan->lock, flags);
@@ -1341,13 +1431,7 @@ static void ipu_gc_tasklet(unsigned long arg)
}
}
-/*
- * At the time .device_alloc_chan_resources() method is called, we cannot know,
- * whether the client will accept the channel. Thus we must only check, if we
- * can satisfy client's request but the only real criterion to verify, whether
- * the client has accepted our offer is the client_count. That's why we have to
- * perform the rest of our allocation tasks on the first call to this function.
- */
+/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
enum dma_data_direction direction, unsigned long tx_flags)
@@ -1358,8 +1442,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
unsigned long flags;
/* We only can handle these three channels so far */
- if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 &&
- ichan->dma_chan.chan_id != IDMAC_IC_7)
+ if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
+ chan->chan_id != IDMAC_IC_7)
return NULL;
if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
@@ -1400,7 +1484,7 @@ static void idmac_issue_pending(struct dma_chan *chan)
/* This is not always needed, but doesn't hurt either */
spin_lock_irqsave(&ipu->lock, flags);
- ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer);
+ ipu_select_buffer(chan->chan_id, ichan->active_buffer);
spin_unlock_irqrestore(&ipu->lock, flags);
/*
@@ -1432,8 +1516,7 @@ static void __idmac_terminate_all(struct dma_chan *chan)
struct idmac_tx_desc *desc = ichan->desc + i;
if (list_empty(&desc->list))
/* Descriptor was prepared, but not submitted */
- list_add(&desc->list,
- &ichan->free_list);
+ list_add(&desc->list, &ichan->free_list);
async_tx_clear_ack(&desc->txd);
}
@@ -1458,6 +1541,28 @@ static void idmac_terminate_all(struct dma_chan *chan)
mutex_unlock(&ichan->chan_mutex);
}
+#ifdef DEBUG
+static irqreturn_t ic_sof_irq(int irq, void *dev_id)
+{
+ struct idmac_channel *ichan = dev_id;
+ printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
+ irq, ichan->dma_chan.chan_id);
+ disable_irq(irq);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ic_eof_irq(int irq, void *dev_id)
+{
+ struct idmac_channel *ichan = dev_id;
+ printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
+ irq, ichan->dma_chan.chan_id);
+ disable_irq(irq);
+ return IRQ_HANDLED;
+}
+
+static int ic_sof = -EINVAL, ic_eof = -EINVAL;
+#endif
+
static int idmac_alloc_chan_resources(struct dma_chan *chan)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
@@ -1471,31 +1576,49 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
chan->cookie = 1;
ichan->completed = -ENXIO;
- ret = ipu_irq_map(ichan->dma_chan.chan_id);
+ ret = ipu_irq_map(chan->chan_id);
if (ret < 0)
goto eimap;
ichan->eof_irq = ret;
+
+ /*
+ * Important to first disable the channel, because maybe someone
+ * used it before us, e.g., the bootloader
+ */
+ ipu_disable_channel(idmac, ichan, true);
+
+ ret = ipu_init_channel(idmac, ichan);
+ if (ret < 0)
+ goto eichan;
+
ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
ichan->eof_name, ichan);
if (ret < 0)
goto erirq;
- ret = ipu_init_channel(idmac, ichan);
- if (ret < 0)
- goto eichan;
+#ifdef DEBUG
+ if (chan->chan_id == IDMAC_IC_7) {
+ ic_sof = ipu_irq_map(69);
+ if (ic_sof > 0)
+ request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
+ ic_eof = ipu_irq_map(70);
+ if (ic_eof > 0)
+ request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
+ }
+#endif
ichan->status = IPU_CHANNEL_INITIALIZED;
- dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
- ichan->dma_chan.chan_id, ichan->eof_irq);
+ dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
+ chan->chan_id, ichan->eof_irq);
return ret;
-eichan:
- free_irq(ichan->eof_irq, ichan);
erirq:
- ipu_irq_unmap(ichan->dma_chan.chan_id);
+ ipu_uninit_channel(idmac, ichan);
+eichan:
+ ipu_irq_unmap(chan->chan_id);
eimap:
return ret;
}
@@ -1510,8 +1633,22 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
__idmac_terminate_all(chan);
if (ichan->status > IPU_CHANNEL_FREE) {
+#ifdef DEBUG
+ if (chan->chan_id == IDMAC_IC_7) {
+ if (ic_sof > 0) {
+ free_irq(ic_sof, ichan);
+ ipu_irq_unmap(69);
+ ic_sof = -EINVAL;
+ }
+ if (ic_eof > 0) {
+ free_irq(ic_eof, ichan);
+ ipu_irq_unmap(70);
+ ic_eof = -EINVAL;
+ }
+ }
+#endif
free_irq(ichan->eof_irq, ichan);
- ipu_irq_unmap(ichan->dma_chan.chan_id);
+ ipu_irq_unmap(chan->chan_id);
}
ichan->status = IPU_CHANNEL_FREE;
@@ -1573,7 +1710,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
dma_chan->device = &idmac->dma;
dma_chan->cookie = 1;
dma_chan->chan_id = i;
- list_add_tail(&ichan->dma_chan.device_node, &dma->channels);
+ list_add_tail(&dma_chan->device_node, &dma->channels);
}
idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
@@ -1581,7 +1718,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
return dma_async_device_register(&idmac->dma);
}
-static void ipu_idmac_exit(struct ipu *ipu)
+static void __exit ipu_idmac_exit(struct ipu *ipu)
{
int i;
struct idmac *idmac = &ipu->idmac;
@@ -1600,7 +1737,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
* IPU common probe / remove
*/
-static int ipu_probe(struct platform_device *pdev)
+static int __init ipu_probe(struct platform_device *pdev)
{
struct ipu_platform_data *pdata = pdev->dev.platform_data;
struct resource *mem_ipu, *mem_ic;
@@ -1700,7 +1837,7 @@ err_noirq:
return ret;
}
-static int ipu_remove(struct platform_device *pdev)
+static int __exit ipu_remove(struct platform_device *pdev)
{
struct ipu *ipu = platform_get_drvdata(pdev);
@@ -1725,7 +1862,7 @@ static struct platform_driver ipu_platform_driver = {
.name = "ipu-core",
.owner = THIS_MODULE,
},
- .remove = ipu_remove,
+ .remove = __exit_p(ipu_remove),
};
static int __init ipu_init(void)
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 83f532cc767f..dd8ebc75b667 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = {
};
/* Install the IRQ handler */
-int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
+int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
{
struct ipu_platform_data *pdata = dev->dev.platform_data;
unsigned int irq, irq_base, i;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index cb7f26fb9f18..ddab94f51224 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
- INIT_LIST_HEAD(&slot->async_tx.tx_list);
hw_desc = (char *) mv_chan->device->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6dab63bdc4c1..6d21b9e48b89 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1105,7 +1105,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1024 * 1024,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
- DRM_INFO("MTRR allocation failed\n. Graphics "
+ DRM_INFO("MTRR allocation failed. Graphics "
"performance may suffer.\n");
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 17fa40858d26..d6cc9861e0a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -279,7 +279,6 @@ typedef struct drm_i915_private {
u8 saveAR_INDEX;
u8 saveAR[21];
u8 saveDACMASK;
- u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
struct {
@@ -457,6 +456,12 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
+
+ /**
+ * Used for checking the object doesn't appear more than once
+ * in an execbuffer object list.
+ */
+ int in_execbuffer;
};
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 85685bfd12da..37427e4016cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1476,7 +1476,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int regnum = obj_priv->fence_reg;
int tile_width;
- uint32_t val;
+ uint32_t fence_reg, val;
uint32_t pitch_val;
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
@@ -1503,7 +1503,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ if (regnum < 8)
+ fence_reg = FENCE_REG_830_0 + (regnum * 4);
+ else
+ fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
+ I915_WRITE(fence_reg, val);
}
static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -1557,7 +1561,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_i915_fence_reg *reg = NULL;
- int i, ret;
+ struct drm_i915_gem_object *old_obj_priv = NULL;
+ int i, ret, avail;
switch (obj_priv->tiling_mode) {
case I915_TILING_NONE:
@@ -1580,25 +1585,46 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
}
/* First try to find a free reg */
+try_again:
+ avail = 0;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
break;
+
+ old_obj_priv = reg->obj->driver_private;
+ if (!old_obj_priv->pin_count)
+ avail++;
}
/* None available, try to steal one or wait for a user to finish */
if (i == dev_priv->num_fence_regs) {
- struct drm_i915_gem_object *old_obj_priv = NULL;
+ uint32_t seqno = dev_priv->mm.next_gem_seqno;
loff_t offset;
-try_again:
- /* Could try to use LRU here instead... */
+ if (avail == 0)
+ return -ENOMEM;
+
for (i = dev_priv->fence_reg_start;
i < dev_priv->num_fence_regs; i++) {
+ uint32_t this_seqno;
+
reg = &dev_priv->fence_regs[i];
old_obj_priv = reg->obj->driver_private;
- if (!old_obj_priv->pin_count)
+
+ if (old_obj_priv->pin_count)
+ continue;
+
+ /* i915 uses fences for GPU access to tiled buffers */
+ if (IS_I965G(dev) || !old_obj_priv->active)
break;
+
+ /* find the seqno of the first available fence */
+ this_seqno = old_obj_priv->last_rendering_seqno;
+ if (this_seqno != 0 &&
+ reg->obj->write_domain == 0 &&
+ i915_seqno_passed(seqno, this_seqno))
+ seqno = this_seqno;
}
/*
@@ -1606,15 +1632,25 @@ try_again:
* objects to finish before trying again.
*/
if (i == dev_priv->num_fence_regs) {
- ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
- if (ret) {
- WARN(ret != -ERESTARTSYS,
- "switch to GTT domain failed: %d\n", ret);
- return ret;
+ if (seqno == dev_priv->mm.next_gem_seqno) {
+ i915_gem_flush(dev,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev,
+ I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
}
+
+ ret = i915_wait_request(dev, seqno);
+ if (ret)
+ return ret;
goto try_again;
}
+ BUG_ON(old_obj_priv->active ||
+ (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
+
/*
* Zap this virtual mapping so we can set up a fence again
* for this object next time we need it.
@@ -1655,8 +1691,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
if (IS_I965G(dev))
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- else
- I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
+ else {
+ uint32_t fence_reg;
+
+ if (obj_priv->fence_reg < 8)
+ fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ else
+ fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
+ 8) * 4;
+
+ I915_WRITE(fence_reg, 0);
+ }
dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
@@ -2469,6 +2514,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
+ struct drm_i915_gem_object *obj_priv;
int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
@@ -2533,6 +2579,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = -EBADF;
goto err;
}
+
+ obj_priv = object_list[i]->driver_private;
+ if (obj_priv->in_execbuffer) {
+ DRM_ERROR("Object %p appears more than once in object list\n",
+ object_list[i]);
+ ret = -EBADF;
+ goto err;
+ }
+ obj_priv->in_execbuffer = true;
}
/* Pin and relocate */
@@ -2674,8 +2729,13 @@ err:
for (i = 0; i < pinned; i++)
i915_gem_object_unpin(object_list[i]);
- for (i = 0; i < args->buffer_count; i++)
+ for (i = 0; i < args->buffer_count; i++) {
+ if (object_list[i]) {
+ obj_priv = object_list[i]->driver_private;
+ obj_priv->in_execbuffer = false;
+ }
drm_gem_object_unreference(object_list[i]);
+ }
mutex_unlock(&dev->struct_mutex);
@@ -2712,17 +2772,24 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret != 0) {
if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to bind: %d", ret);
+ DRM_ERROR("Failure to bind: %d\n", ret);
+ return ret;
+ }
+ }
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle tiled surfaces.
+ */
+ if (!IS_I965G(dev) &&
+ obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence_reg(obj, true);
+ if (ret != 0) {
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to install fence: %d\n",
+ ret);
return ret;
}
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle tiled surfaces.
- */
- if (!IS_I965G(dev) &&
- obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_get_fence_reg(obj, true);
}
obj_priv->pin_count++;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9d6539a868b3..90600d899413 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -184,6 +184,7 @@
* Fence registers
*/
#define FENCE_REG_830_0 0x2000
+#define FENCE_REG_945_8 0x3000
#define I830_FENCE_START_MASK 0x07f80000
#define I830_FENCE_TILING_Y_SHIFT 12
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5d84027ee8f3..d669cc2b42c0 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -119,11 +119,6 @@ static void i915_save_vga(struct drm_device *dev)
/* VGA color palette registers */
dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
- /* DACCRX automatically increments during read */
- I915_WRITE8(VGA_DACRX, 0);
- /* Read 3 bytes of color data from each index */
- for (i = 0; i < 256 * 3; i++)
- dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
@@ -225,12 +220,6 @@ static void i915_restore_vga(struct drm_device *dev)
/* VGA color palette registers */
I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
- /* DACCRX automatically increments during read */
- I915_WRITE8(VGA_DACWX, 0);
- /* Read 3 bytes of color data from each index */
- for (i = 0; i < 256 * 3; i++)
- I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
-
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b84bf066879b..b4eea0292c1a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -543,8 +543,8 @@ config SENSORS_LM90
help
If you say yes here you get support for National Semiconductor LM90,
LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, and Maxim
- MAX6646, MAX6647, MAX6649, MAX6657, MAX6658, MAX6659, MAX6680 and
- MAX6681 sensor chips.
+ MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
+ MAX6680, MAX6681 and MAX6692 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index e52b38806d03..ad2b3431b725 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -760,8 +760,11 @@ static int abituguru3_read_increment_offset(struct abituguru3_data *data,
for (i = 0; i < offset_count; i++)
if ((x = abituguru3_read(data, bank, offset + i, count,
- buf + i * count)) != count)
- return i * count + (i && (x < 0)) ? 0 : x;
+ buf + i * count)) != count) {
+ if (x < 0)
+ return x;
+ return i * count + x;
+ }
return i * count;
}
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 1692de369969..18a1ba888165 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -617,7 +617,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
static int f75375_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct f75375_data *data = i2c_get_clientdata(client);
+ struct f75375_data *data;
struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
int err;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 95a99c590da2..9157247fed8e 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -213,7 +213,7 @@ static inline u16 FAN16_TO_REG(long rpm)
#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\
((val)+500)/1000),-128,127))
-#define TEMP_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000)
+#define TEMP_FROM_REG(val) ((val) * 1000)
#define PWM_TO_REG(val) ((val) >> 1)
#define PWM_FROM_REG(val) (((val)&0x7f) << 1)
@@ -267,9 +267,9 @@ struct it87_data {
u8 has_fan; /* Bitfield, fans enabled */
u16 fan[5]; /* Register values, possibly combined */
u16 fan_min[5]; /* Register values, possibly combined */
- u8 temp[3]; /* Register value */
- u8 temp_high[3]; /* Register value */
- u8 temp_low[3]; /* Register value */
+ s8 temp[3]; /* Register value */
+ s8 temp_high[3]; /* Register value */
+ s8 temp_low[3]; /* Register value */
u8 sensor; /* Register value */
u8 fan_div[3]; /* Register encoding, shifted right */
u8 vid; /* Register encoding, combined */
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index cfc1ee90f5a3..b251d8674b41 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -72,6 +72,7 @@ I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100,
#define LM85_COMPANY_SMSC 0x5c
#define LM85_VERSTEP_VMASK 0xf0
#define LM85_VERSTEP_GENERIC 0x60
+#define LM85_VERSTEP_GENERIC2 0x70
#define LM85_VERSTEP_LM85C 0x60
#define LM85_VERSTEP_LM85B 0x62
#define LM85_VERSTEP_ADM1027 0x60
@@ -334,6 +335,7 @@ static struct lm85_data *lm85_update_device(struct device *dev);
static const struct i2c_device_id lm85_id[] = {
{ "adm1027", adm1027 },
{ "adt7463", adt7463 },
+ { "adt7468", adt7468 },
{ "lm85", any_chip },
{ "lm85b", lm85b },
{ "lm85c", lm85c },
@@ -408,7 +410,8 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
struct lm85_data *data = lm85_update_device(dev);
int vid;
- if (data->type == adt7463 && (data->vid & 0x80)) {
+ if ((data->type == adt7463 || data->type == adt7468) &&
+ (data->vid & 0x80)) {
/* 6-pin VID (VRM 10) */
vid = vid_from_reg(data->vid & 0x3f, data->vrm);
} else {
@@ -1153,7 +1156,8 @@ static int lm85_detect(struct i2c_client *client, int kind,
address, company, verstep);
/* All supported chips have the version in common */
- if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC) {
+ if ((verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC &&
+ (verstep & LM85_VERSTEP_VMASK) != LM85_VERSTEP_GENERIC2) {
dev_dbg(&adapter->dev, "Autodetection failed: "
"unsupported version\n");
return -ENODEV;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 96a701866726..1aff7575799d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -32,10 +32,10 @@
* supported by this driver. These chips lack the remote temperature
* offset feature.
*
- * This driver also supports the MAX6646, MAX6647 and MAX6649 chips
- * made by Maxim. These are again similar to the LM86, but they use
- * unsigned temperature values and can report temperatures from 0 to
- * 145 degrees.
+ * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
+ * MAX6692 chips made by Maxim. These are again similar to the LM86,
+ * but they use unsigned temperature values and can report temperatures
+ * from 0 to 145 degrees.
*
* This driver also supports the MAX6680 and MAX6681, two other sensor
* chips made by Maxim. These are quite similar to the other Maxim
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index a01b4488208b..4a65b96db2c8 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2490,12 +2490,14 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
int ret = 0;
struct nes_vnic *nesvnic;
struct nes_device *nesdev;
+ struct nes_ib_device *nesibdev;
nesvnic = to_nesvnic(nesqp->ibqp.device);
if (!nesvnic)
return -EINVAL;
nesdev = nesvnic->nesdev;
+ nesibdev = nesvnic->nesibdev;
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
atomic_read(&nesvnic->netdev->refcnt));
@@ -2507,6 +2509,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
} else {
/* Need to free the Last Streaming Mode Message */
if (nesqp->ietf_frame) {
+ if (nesqp->lsmm_mr)
+ nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
pci_free_consistent(nesdev->pcidev,
nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
nesqp->ietf_frame, nesqp->ietf_frame_pbase);
@@ -2543,6 +2547,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
u32 crc_value;
int ret;
int passive_state;
+ struct nes_ib_device *nesibdev;
+ struct ib_mr *ibmr = NULL;
+ struct ib_phys_buf ibphysbuf;
+ struct nes_pd *nespd;
+
+
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -2601,6 +2611,26 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (cm_id->remote_addr.sin_addr.s_addr !=
cm_id->local_addr.sin_addr.s_addr) {
u64temp = (unsigned long)nesqp;
+ nesibdev = nesvnic->nesibdev;
+ nespd = nesqp->nespd;
+ ibphysbuf.addr = nesqp->ietf_frame_pbase;
+ ibphysbuf.size = conn_param->private_data_len +
+ sizeof(struct ietf_mpa_frame);
+ ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
+ &ibphysbuf, 1,
+ IB_ACCESS_LOCAL_WRITE,
+ (u64 *)&nesqp->ietf_frame);
+ if (!ibmr) {
+ nes_debug(NES_DBG_CM, "Unable to register memory region"
+ "for lSMM for cm_node = %p \n",
+ cm_node);
+ return -ENOMEM;
+ }
+
+ ibmr->pd = &nespd->ibpd;
+ ibmr->device = nespd->ibpd.device;
+ nesqp->lsmm_mr = ibmr;
+
u64temp |= NES_SW_CONTEXT_ALIGN>>1;
set_wqe_64bit_value(wqe->wqe_words,
NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
@@ -2611,14 +2641,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
cpu_to_le32(conn_param->private_data_len +
sizeof(struct ietf_mpa_frame));
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
- cpu_to_le32((u32)nesqp->ietf_frame_pbase);
- wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
- cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+ (u64)nesqp->ietf_frame);
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
cpu_to_le32(conn_param->private_data_len +
sizeof(struct ietf_mpa_frame));
- wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 4fdb72454f94..d93a6562817c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1360,8 +1360,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
+ if (!udata) {
nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
+ }
nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
u64temp = (u64)nesqp->hwqp.sq_pbase;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 6c6b4da5184f..ae0ca9bc83bd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -134,6 +134,7 @@ struct nes_qp {
struct ietf_mpa_frame *ietf_frame;
dma_addr_t ietf_frame_pbase;
wait_queue_head_t state_waitq;
+ struct ib_mr *lsmm_mr;
unsigned long socket;
struct nes_hw_qp hwqp;
struct work_struct work;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2be574c0a27a..ed5727c089a9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -269,12 +269,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
return mddev;
}
-static void mddev_delayed_delete(struct work_struct *ws)
-{
- mddev_t *mddev = container_of(ws, mddev_t, del_work);
- kobject_del(&mddev->kobj);
- kobject_put(&mddev->kobj);
-}
+static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(mddev_t *mddev)
{
@@ -3811,6 +3806,21 @@ static struct kobj_type md_ktype = {
int mdp_major = 0;
+static void mddev_delayed_delete(struct work_struct *ws)
+{
+ mddev_t *mddev = container_of(ws, mddev_t, del_work);
+
+ if (mddev->private == &md_redundancy_group) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ mddev->private = NULL;
+ }
+ kobject_del(&mddev->kobj);
+ kobject_put(&mddev->kobj);
+}
+
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
@@ -4313,13 +4323,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
- if (mddev->pers->sync_request) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- }
module_put(mddev->pers->owner);
+ if (mddev->pers->sync_request)
+ mddev->private = &md_redundancy_group;
mddev->pers = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent(mddev->sysfs_state);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 84d5ea1ec171..b457a05b28d9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -1383,6 +1383,11 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
wm8350->power.rev_g_coeff = 1;
break;
+ case 1:
+ dev_info(wm8350->dev, "WM8351 Rev B\n");
+ wm8350->power.rev_g_coeff = 1;
+ break;
+
default:
dev_err(wm8350->dev, "Unknown WM8351 CHIP_REV\n");
ret = -ENODEV;
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index f4a67c65d301..2db166b7096f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -793,8 +793,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
host->mem->start + host->sdidata);
if (!setup_ok) {
- s3c2410_dma_config(host->dma, 4,
- (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI));
+ s3c2410_dma_config(host->dma, 4, 0);
s3c2410_dma_set_buffdone_fn(host->dma,
s3cmci_dma_done_callback);
s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index d44f741ae229..6d9f810565c8 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -821,7 +821,8 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
if (!(info->flags & IS_POW2PS))
return info;
}
- }
+ } else
+ return info;
}
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 4b122e7ab4b3..229718222db7 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -46,16 +46,19 @@ static int physmap_flash_remove(struct platform_device *dev)
physmap_data = dev->dev.platform_data;
+ if (info->cmtd) {
#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts) {
- del_mtd_partitions(info->cmtd);
- kfree(info->parts);
- } else if (physmap_data->nr_parts)
- del_mtd_partitions(info->cmtd);
- else
- del_mtd_device(info->cmtd);
+ if (info->nr_parts || physmap_data->nr_parts)
+ del_mtd_partitions(info->cmtd);
+ else
+ del_mtd_device(info->cmtd);
#else
- del_mtd_device(info->cmtd);
+ del_mtd_device(info->cmtd);
+#endif
+ }
+#ifdef CONFIG_MTD_PARTITIONS
+ if (info->nr_parts)
+ kfree(info->parts);
#endif
#ifdef CONFIG_MTD_CONCAT
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index d4fb4acdbebd..4e9bd380a5c2 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2649,8 +2649,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
int err = -ENODEV;
sbus_dp = to_of_device(op->dev.parent)->node;
- if (is_qfe)
- sbus_dp = to_of_device(op->dev.parent->parent)->node;
/* We can match PCI devices too, do not accept those here. */
if (strcmp(sbus_dp->name, "sbus"))
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index eacfb13998bb..9aa4fe100a0d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -143,7 +143,7 @@ config HOTPLUG_PCI_SHPC
config HOTPLUG_PCI_RPA
tristate "RPA PCI Hotplug driver"
- depends on PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE
+ depends on PPC_PSERIES && EEH && !HOTPLUG_PCI_FAKE
help
Say Y here if you have a RPA system that supports PCI Hotplug.
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index d0c973685868..382575007382 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -133,6 +133,9 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
bool enable)
{
set_device_error_reporting(dev, &enable);
+
+ if (!dev->subordinate)
+ return;
pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 248b4db91552..5ea566e20b37 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -103,6 +103,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
static void pcie_portdrv_remove (struct pci_dev *dev)
{
pcie_port_device_remove(dev);
+ pci_disable_device(dev);
kfree(pci_get_drvdata(dev));
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f20d55368edb..92b9efe9bcaf 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -23,6 +23,7 @@
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/dmi.h>
+#include <linux/pci-aspm.h>
#include "pci.h"
int isa_dma_bridge_buggy;
@@ -1749,6 +1750,30 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
+/*
+ * The 82575 and 82598 may experience data corruption issues when transitioning
+ * out of L0S. To prevent this we need to disable L0S on the pci-e link
+ */
+static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "Disabling L0s\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+
static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
{
/* rev 1 ncr53c810 chips don't set the class at all which means
@@ -2097,7 +2122,7 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
- dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
+ dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags & ~HT_MSI_FLAGS_ENABLE);
@@ -2141,6 +2166,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
int pos;
int found;
+ /* Enabling HT MSI mapping on this device breaks MCP51 */
+ if (dev->device == 0x270)
+ return;
+
/* check if there is HT MSI cap or enabled on this device */
found = ht_check_msi_mapping(dev);
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 94c9f911824e..6bcca616a704 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1297,7 +1297,7 @@ static int __init acer_wmi_init(void)
set_quirks();
- if (!acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
+ if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
interface->capability &= ~ACER_CAP_BRIGHTNESS;
printk(ACER_INFO "Brightness must be controlled by "
"generic video driver\n");
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 1d768928e0bb..a52d4a11652d 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -180,10 +180,13 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
di->empty_uAh = battery_interpolate(scale, di->temp_C / 10);
di->empty_uAh *= 1000; /* convert to µAh */
- /* From Maxim Application Note 131: remaining capacity =
- * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */
- di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) /
- (di->full_active_uAh - di->empty_uAh);
+ if (di->full_active_uAh == di->empty_uAh)
+ di->rem_capacity = 0;
+ else
+ /* From Maxim Application Note 131: remaining capacity =
+ * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */
+ di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) /
+ (di->full_active_uAh - di->empty_uAh);
if (di->rem_capacity < 0)
di->rem_capacity = 0;
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index f08e169ba1b5..7e30e5f6e032 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -129,7 +129,7 @@ static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status)
bp->waiting = 1;
add_wait_queue(&bp->wq, &wait);
while (limit-- > 0) {
- unsigned long val;
+ long val;
val = wait_event_interruptible_timeout(
bp->wq,
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a9a9893a5f95..e6d1fc8c54f1 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -38,9 +38,6 @@
#include <linux/string.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
-
-#define MAJOR_NR JSFD_MAJOR
-
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 2181ce4d7ebd..35e8eb02b9e9 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1853,13 +1853,14 @@ static void aty128_bl_exit(struct backlight_device *bd)
* Initialisation
*/
-#ifdef CONFIG_PPC_PMAC
+#ifdef CONFIG_PPC_PMAC__disabled
static void aty128_early_resume(void *data)
{
struct aty128fb_par *par = data;
if (try_acquire_console_sem())
return;
+ pci_restore_state(par->pdev);
aty128_do_resume(par->pdev);
release_console_sem();
}
@@ -1907,7 +1908,14 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
/* Indicate sleep capability */
if (par->chip_gen == rage_M3) {
pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1);
+#if 0 /* Disable the early video resume hack for now as it's causing problems, among
+ * others we now rely on the PCI core restoring the config space for us, which
+ * isn't the case with that hack, and that code path causes various things to
+ * be called with interrupts off while they shouldn't. I'm leaving the code in
+ * as it can be useful for debugging purposes
+ */
pmac_set_early_video_resume(aty128_early_resume, par);
+#endif
}
/* Find default mode */
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index ca5f0dc28546..81603f85e17e 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2762,12 +2762,13 @@ int radeonfb_pci_resume(struct pci_dev *pdev)
return rc;
}
-#ifdef CONFIG_PPC_OF
+#ifdef CONFIG_PPC_OF__disabled
static void radeonfb_early_resume(void *data)
{
struct radeonfb_info *rinfo = data;
rinfo->no_schedule = 1;
+ pci_restore_state(rinfo->pdev);
radeonfb_pci_resume(rinfo->pdev);
rinfo->no_schedule = 0;
}
@@ -2834,7 +2835,14 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
*/
if (rinfo->pm_mode != radeon_pm_none) {
pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, rinfo->of_node, 0, 1);
+#if 0 /* Disable the early video resume hack for now as it's causing problems, among
+ * others we now rely on the PCI core restoring the config space for us, which
+ * isn't the case with that hack, and that code path causes various things to
+ * be called with interrupts off while they shouldn't. I'm leaving the code in
+ * as it can be useful for debugging purposes
+ */
pmac_set_early_video_resume(radeonfb_early_resume, rinfo);
+#endif
}
#if 0
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index a24e680d2b9c..2e940199fc89 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -993,6 +993,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
struct i810fb_par *par = info->par;
int line_length, vidmem, mode_valid = 0, retval = 0;
u32 vyres = var->yres_virtual, vxres = var->xres_virtual;
+
/*
* Memory limit
*/
@@ -1002,12 +1003,12 @@ static int i810_check_params(struct fb_var_screeninfo *var,
if (vidmem > par->fb.size) {
vyres = par->fb.size/line_length;
if (vyres < var->yres) {
- vyres = yres;
+ vyres = info->var.yres;
vxres = par->fb.size/vyres;
vxres /= var->bits_per_pixel >> 3;
line_length = get_line_length(par, vxres,
var->bits_per_pixel);
- vidmem = line_length * yres;
+ vidmem = line_length * info->var.yres;
if (vxres < var->xres) {
printk("i810fb: required video memory, "
"%d bytes, for %dx%d-%d (virtual) "
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 0e2b8fd24df1..2c5d069e5f06 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -446,7 +446,6 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
{
struct sh_mobile_lcdc_chan *ch;
struct sh_mobile_lcdc_board_cfg *board_cfg;
- unsigned long tmp;
int k;
/* tell the board code to disable the panel */
@@ -456,9 +455,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
if (board_cfg->display_off)
board_cfg->display_off(board_cfg->board_data);
- /* cleanup deferred io if SYS bus */
- tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
- if (ch->ldmt1r_value & (1 << 12) && tmp) {
+ /* cleanup deferred io if enabled */
+ if (ch->info.fbdefio) {
fb_deferred_io_cleanup(&ch->info);
ch->info.fbdefio = NULL;
}
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 9e1138a75e8b..a411702413d6 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -39,7 +39,7 @@ static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
- return gpio_get_value(pdata->pin);
+ return gpio_get_value(pdata->pin) ? 1 : 0;
}
static int __init w1_gpio_probe(struct platform_device *pdev)