diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-01 10:58:11 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-01 10:58:11 +0100 |
commit | e2b06d71bd3f50ac7098b3f2492ea95470b713db (patch) | |
tree | 107c48a9d0782279fde07b6fda44994b96b9ac36 /drivers | |
parent | 1692cd60d999b00a0491692dab0286e6011abd36 (diff) | |
parent | 18566acac18f5784347bc5fe636a26897d1c963b (diff) | |
download | linux-e2b06d71bd3f50ac7098b3f2492ea95470b713db.tar.gz linux-e2b06d71bd3f50ac7098b3f2492ea95470b713db.tar.bz2 linux-e2b06d71bd3f50ac7098b3f2492ea95470b713db.zip |
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Chris Wilson wants the new fence tracepoint added in
commit 8c96c678011eeb1676da18f203e90dea7e0d69d2
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue Jan 24 11:57:58 2017 +0000
dma/fence: Export enable-signaling tracepoint for emission by drivers
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers')
539 files changed, 11292 insertions, 10161 deletions
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig index 4ef4c5caed4f..8a8e403644d6 100644 --- a/drivers/auxdisplay/Kconfig +++ b/drivers/auxdisplay/Kconfig @@ -132,9 +132,9 @@ config HT16K33 tristate "Holtek Ht16K33 LED controller with keyscan" depends on FB && OF && I2C && INPUT select FB_SYS_FOPS - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT select INPUT_MATRIXKMAP select FB_BACKLIGHT help diff --git a/drivers/base/base.h b/drivers/base/base.h index ada9dce34e6d..e19b1008e5fb 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -141,8 +141,6 @@ extern void device_unblock_probing(void); extern struct kset *devices_kset; extern void devices_kset_move_last(struct device *dev); -extern struct device_attribute dev_attr_deferred_probe; - #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) extern void module_add_driver(struct module *mod, struct device_driver *drv); extern void module_remove_driver(struct device_driver *drv); diff --git a/drivers/base/core.c b/drivers/base/core.c index 020ea7f05520..8c25e68e67d7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev) goto err_remove_dev_groups; } - error = device_create_file(dev, &dev_attr_deferred_probe); - if (error) - goto err_remove_online; - return 0; - err_remove_online: - device_remove_file(dev, &dev_attr_online); err_remove_dev_groups: device_remove_groups(dev, dev->groups); err_remove_type_groups: @@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev) struct class *class = dev->class; const struct device_type *type = dev->type; - device_remove_file(dev, &dev_attr_deferred_probe); device_remove_file(dev, &dev_attr_online); device_remove_groups(dev, dev->groups); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index a8b258e5407b..a1fbf55c4d3a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static atomic_t deferred_trigger_count = ATOMIC_INIT(0); -static ssize_t deferred_probe_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - bool value; - - mutex_lock(&deferred_probe_mutex); - value = !list_empty(&dev->p->deferred_probe); - mutex_unlock(&deferred_probe_mutex); - - return sprintf(buf, "%d\n", value); -} -DEVICE_ATTR_RO(deferred_probe); - /* * In some cases, like suspend to RAM or hibernation, It might be reasonable * to prohibit probing of devices as it could be unsafe. diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8ab8ea1253e6..dacb6a8418aa 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev, sprintf(buf, "%s", zone->name); /* MMOP_ONLINE_KERNEL */ - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); + zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } /* MMOP_ONLINE_MOVABLE */ - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); + zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 38c576f76d36..9fd06eeb1a17 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index, static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) { struct request *req = blk_mq_rq_from_pdu(cmd); - int result, flags; + int result; struct nbd_request request; unsigned long size = blk_rq_bytes(req); struct bio *bio; @@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) if (type != NBD_CMD_WRITE) return 0; - flags = 0; bio = req->bio; while (bio) { struct bio *next = bio->bi_next; @@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) bio_for_each_segment(bvec, bio, iter) { bool is_last = !next && bio_iter_last(bvec, iter); + int flags = is_last ? 0 : MSG_MORE; - if (is_last) - flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", cmd, bvec.bv_len); result = sock_send_bvec(nbd, index, &bvec, flags); @@ -1042,6 +1040,7 @@ static int __init nbd_init(void) return -ENOMEM; for (i = 0; i < nbds_max; i++) { + struct request_queue *q; struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) goto out; @@ -1067,12 +1066,13 @@ static int __init nbd_init(void) * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ - disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set); - if (!disk->queue) { + q = blk_mq_init_queue(&nbd_dev[i].tag_set); + if (IS_ERR(q)) { blk_mq_free_tag_set(&nbd_dev[i].tag_set); put_disk(disk); goto out; } + disk->queue = q; /* * Tell the block layer that we are not a rotational device diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5545a679abd8..10332c24f961 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -56,6 +56,7 @@ struct virtblk_req { struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; u8 status; + u8 sense[SCSI_SENSE_BUFFERSIZE]; struct scatterlist sg[]; }; @@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq, } if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { - sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); + memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); + sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE); sgs[num_out + num_in++] = &sense; sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); sgs[num_out + num_in++] = &inhdr; @@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev) if (err) goto out_put_disk; - q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); + q = blk_mq_init_queue(&vblk->tag_set); if (IS_ERR(q)) { err = -ENOMEM; goto out_free_tags; } + vblk->disk->queue = q; q->queuedata = vblk; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 15f58ab44d0b..e5ab7d9e8c45 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -25,6 +25,7 @@ #include <linux/genhd.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/backing-dev.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/err.h> @@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec) return bvec->bv_len != PAGE_SIZE; } +static void zram_revalidate_disk(struct zram *zram) +{ + revalidate_disk(zram->disk); + /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */ + zram->disk->queue->backing_dev_info.capabilities |= + BDI_CAP_STABLE_WRITES; +} + /* * Check if request is within bounds and aligned on zram logical blocks. */ @@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev, zram->comp = comp; zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); + zram_revalidate_disk(zram); up_write(&zram->init_lock); - /* - * Revalidate disk out of the init_lock to avoid lockdep splat. - * It's okay because disk's capacity is protected by init_lock - * so that revalidate_disk always sees up-to-date capacity. - */ - revalidate_disk(zram->disk); - return len; out_destroy_comp: @@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev, /* Make sure all the pending I/O are finished */ fsync_bdev(bdev); zram_reset_device(zram); - revalidate_disk(zram->disk); + zram_revalidate_disk(zram); bdput(bdev); mutex_lock(&bdev->bd_mutex); diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 5bb1985ec484..6d9cc2d39d22 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf, char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ int err = 0; - if (!pfn_valid(PFN_DOWN(p))) - return -EIO; - read = 0; if (p < (unsigned long) high_memory) { low_count = count; @@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, * by the kernel or data corruption may occur */ kbuf = xlate_dev_kmem_ptr((void *)p); + if (!virt_addr_valid(kbuf)) + return -ENXIO; if (copy_to_user(buf, kbuf, sz)) return -EFAULT; @@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, * corruption may occur. */ ptr = xlate_dev_kmem_ptr((void *)p); + if (!virt_addr_valid(ptr)) + return -ENXIO; copied = copy_from_user(ptr, buf, sz); if (copied) { @@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; - if (!pfn_valid(PFN_DOWN(p))) - return -EIO; - if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 02819e0703c8..87885d146dbb 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp) struct pardevice *pdev = NULL; char *name; struct pardev_cb ppdev_cb; + int rc = 0; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) @@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp) port = parport_find_number(minor); if (!port) { pr_warn("%s: no associated port!\n", name); - kfree(name); - return -ENXIO; + rc = -ENXIO; + goto err; } memset(&ppdev_cb, 0, sizeof(ppdev_cb)); @@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp) ppdev_cb.private = pp; pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); parport_put_port(port); - kfree(name); if (!pdev) { pr_warn("%s: failed to register device!\n", name); - return -ENXIO; + rc = -ENXIO; + goto err; } pp->pdev = pdev; dev_dbg(&pdev->dev, "registered pardevice\n"); - return 0; +err: + kfree(name); + return rc; } static enum ieee1284_phase init_phase(int mode) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 8b00e79c2683..17857beb4892 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 8c8b495cbf0d..cdc092a1d9ef 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", - GATE_BUS_TOP, 27, 0, 0), + GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), }; static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", - GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), + GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0), GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2", GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d", GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d", - GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0), GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", GATE_BUS_TOP, 5, 0, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", - GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", @@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", - GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), + GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0), GATE(0, "aclk400_isp", "mout_user_aclk400_isp", GATE_BUS_TOP, 16, 0, 0), GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", GATE_BUS_TOP, 17, 0, 0), GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", - GATE_BUS_TOP, 18, 0, 0), + GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0), GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", GATE_BUS_TOP, 28, 0, 0), GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", GATE_BUS_TOP, 29, 0, 0), GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", - SRC_MASK_TOP2, 24, 0, 0), + SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0), GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, 0, 0), diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 4da1dc2278bd..670ff0f25b67 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) if (mct_int_type == MCT_INT_SPI) { if (evt->irq != -1) disable_irq_nosync(evt->irq); + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index a1bfc098ea10..d1f1f456f5c4 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -28,6 +28,7 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on); EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); +EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); /* * fence context counter: each execution context should have its own diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index e00c9b022964..5a37b9fcf40d 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -24,5 +24,5 @@ config DW_DMAC_PCI select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the - platfroms that enumerate it as a PCI device. For example, + platforms that enumerate it as a PCI device. For example, Intel Medfield has integrated this GPDMA controller. diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 8e67895bcca3..abcc51b343ce 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -64,6 +64,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e #define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f +#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 90eddd9f07e4..cc5259b881d4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) }, + /* I/OAT v3.3 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, @@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev) } } +static inline bool is_skx_ioat(struct pci_dev *pdev) +{ + return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false; +} + static bool is_xeon_cb32(struct pci_dev *pdev) { return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || - is_hsw_ioat(pdev) || is_bdx_ioat(pdev); + is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev); } bool is_bwd_ioat(struct pci_dev *pdev) @@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ ioat_chan->completion = dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, - GFP_KERNEL, &ioat_chan->completion_dma); + GFP_NOWAIT, &ioat_chan->completion_dma); if (!ioat_chan->completion) return -ENOMEM; @@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); order = IOAT_MAX_ORDER; - ring = ioat_alloc_ring(c, order, GFP_KERNEL); + ring = ioat_alloc_ring(c, order, GFP_NOWAIT); if (!ring) return -ENOMEM; @@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) device->version = readb(device->reg_base + IOAT_VER_OFFSET); if (device->version >= IOAT_VER_3_0) { + if (is_skx_ioat(pdev)) + device->version = IOAT_VER_3_2; err = ioat3_dma_probe(device, ioat_dca_enabled); if (device->version >= IOAT_VER_3_3) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ac68666cd3f4..daf479cce691 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->ccr |= CCR_DST_AMODE_POSTINC; if (port_window) { d->ccr |= CCR_SRC_AMODE_DBLIDX; - d->ei = 1; - /* - * One frame covers the port_window and by configure - * the source frame index to be -1 * (port_window - 1) - * we instruct the sDMA that after a frame is processed - * it should move back to the start of the window. - */ - d->fi = -(port_window_bytes - 1); if (port_window_bytes >= 64) - d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; + d->csdp |= CSDP_SRC_BURST_64; else if (port_window_bytes >= 32) - d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED; + d->csdp |= CSDP_SRC_BURST_32; else if (port_window_bytes >= 16) - d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED; + d->csdp |= CSDP_SRC_BURST_16; + } else { d->ccr |= CCR_SRC_AMODE_CONSTANT; } @@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->ccr |= CCR_SRC_AMODE_POSTINC; if (port_window) { d->ccr |= CCR_DST_AMODE_DBLIDX; + d->ei = 1; + /* + * One frame covers the port_window and by configure + * the source frame index to be -1 * (port_window - 1) + * we instruct the sDMA that after a frame is processed + * it should move back to the start of the window. + */ + d->fi = -(port_window_bytes - 1); if (port_window_bytes >= 64) - d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; + d->csdp |= CSDP_DST_BURST_64; else if (port_window_bytes >= 32) - d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED; + d->csdp |= CSDP_DST_BURST_32; else if (port_window_bytes >= 16) - d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED; + d->csdp |= CSDP_DST_BURST_16; } else { d->ccr |= CCR_DST_AMODE_CONSTANT; } @@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( osg->addr = sg_dma_address(sgent); osg->en = en; osg->fn = sg_dma_len(sgent) / frame_bytes; - if (port_window && dir == DMA_MEM_TO_DEV) { + if (port_window && dir == DMA_DEV_TO_MEM) { osg->ei = 1; /* * One frame covers the port_window and by configure @@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev) struct omap_dmadev *od; struct resource *res; int rc, i, irq; + u32 lch_count; od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); if (!od) @@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev) spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); - if (!pdev->dev.of_node) { - od->dma_requests = od->plat->dma_attr->lch_count; - if (unlikely(!od->dma_requests)) - od->dma_requests = OMAP_SDMA_REQUESTS; - } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests", - &od->dma_requests)) { + /* Number of DMA requests */ + od->dma_requests = OMAP_SDMA_REQUESTS; + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-requests", + &od->dma_requests)) { dev_info(&pdev->dev, "Missing dma-requests property, using %u.\n", OMAP_SDMA_REQUESTS); - od->dma_requests = OMAP_SDMA_REQUESTS; } - od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests, - sizeof(*od->lch_map), GFP_KERNEL); + /* Number of available logical channels */ + if (!pdev->dev.of_node) { + lch_count = od->plat->dma_attr->lch_count; + if (unlikely(!lch_count)) + lch_count = OMAP_SDMA_CHANNELS; + } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels", + &lch_count)) { + dev_info(&pdev->dev, + "Missing dma-channels property, using %u.\n", + OMAP_SDMA_CHANNELS); + lch_count = OMAP_SDMA_CHANNELS; + } + + od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map), + GFP_KERNEL); if (!od->lch_map) return -ENOMEM; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 87fd01539fcb..740bbb942594 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -448,6 +448,9 @@ struct dma_pl330_chan { /* for cyclic capability */ bool cyclic; + + /* for runtime pm tracking */ + bool active; }; struct pl330_dmac { @@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data) _stop(pch->thread); spin_unlock(&pch->thread->dmac->lock); power_down = true; + pch->active = false; } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); @@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data) desc->status = PREP; list_move_tail(&desc->node, &pch->work_list); if (power_down) { + pch->active = true; spin_lock(&pch->thread->dmac->lock); _start(pch->thread); spin_unlock(&pch->thread->dmac->lock); @@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan) unsigned long flags; struct pl330_dmac *pl330 = pch->dmac; LIST_HEAD(list); + bool power_down = false; pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); @@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan) pch->thread->req[0].desc = NULL; pch->thread->req[1].desc = NULL; pch->thread->req_running = -1; + power_down = pch->active; + pch->active = false; /* Mark all desc done */ list_for_each_entry(desc, &pch->submitted_list, node) { @@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); + if (power_down) + pm_runtime_put_autosuspend(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; @@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan) * updated on work_list emptiness status. */ WARN_ON(list_empty(&pch->submitted_list)); + pch->active = true; pm_runtime_get_sync(pch->dmac->ddma.dev); } list_splice_tail_init(&pch->submitted_list, &pch->work_list); diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2e441d0ccd79..4c357d475465 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac *dmac = to_rcar_dmac(chan->device); + struct rcar_dmac_chan_map *map = &rchan->map; struct rcar_dmac_desc_page *page, *_page; struct rcar_dmac_desc *desc; LIST_HEAD(list); @@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) free_page((unsigned long)page); } + /* Remove slave mapping if present. */ + if (map->slave.xfer_size) { + dma_unmap_resource(chan->device->dev, map->addr, + map->slave.xfer_size, map->dir, 0); + map->slave.xfer_size = 0; + } + pm_runtime_put(chan->device->dev); } diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 3688d0873a3e..3056ce7f8c69 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, struct virt_dma_desc *vdesc; enum dma_status status; unsigned long flags; - u32 residue; + u32 residue = 0; status = dma_cookie_status(c, cookie, state); if ((status == DMA_COMPLETE) || (!state)) @@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, spin_lock_irqsave(&chan->vchan.lock, flags); vdesc = vchan_find_desc(&chan->vchan, cookie); - if (cookie == chan->desc->vdesc.tx.cookie) { + if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) residue = stm32_dma_desc_residue(chan, chan->desc, chan->next_sg); - } else if (vdesc) { + else if (vdesc) residue = stm32_dma_desc_residue(chan, to_stm32_dma_desc(vdesc), 0); - } else { - residue = 0; - } - dma_set_residue(state, residue); spin_unlock_irqrestore(&chan->vchan.lock, flags); @@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, struct stm32_dma_chan *chan; struct dma_chan *c; - if (dma_spec->args_count < 3) + if (dma_spec->args_count < 4) return NULL; cfg.channel_id = dma_spec->args[0]; cfg.request_line = dma_spec->args[1]; cfg.stream_config = dma_spec->args[2]; - cfg.threshold = 0; + cfg.threshold = dma_spec->args[3]; if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) return NULL; - if (dma_spec->args_count > 3) - cfg.threshold = dma_spec->args[3]; - chan = &dmadev->chan[cfg.channel_id]; c = dma_get_slave_channel(&chan->vchan.chan); diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 3f24aeb48c0e..2403475a37cf 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) match = of_match_node(ti_am335x_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); return -EINVAL; } @@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) match = of_match_node(ti_dra7_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); + of_node_put(dma_node); return -EINVAL; } diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 78298460d168..7c1e3a7b14e0 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) dev_err(&edev->dev, "out of memory in extcon_set_state\n"); kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE); - return 0; + return -ENOMEM; } length = name_show(&edev->dev, NULL, prop_buf); diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index 520a40e5e0e4..6c7d60c239b5 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -71,8 +71,7 @@ void __init efi_fake_memmap(void) } /* allocate memory for new EFI memmap */ - new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map, - PAGE_SIZE); + new_memmap_phy = efi_memmap_alloc(new_nr_map); if (!new_memmap_phy) return; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index b98824e3800a..0e2a96b12cb3 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle); unsigned long get_dram_base(efi_system_table_t *sys_table_arg); -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size, - efi_memory_desc_t *memory_map, - unsigned long map_size, unsigned long desc_size, - u32 desc_ver); - efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, void *handle, unsigned long *new_fdt_addr, diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index a6a93116a8f0..921dfa047202 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,13 +16,10 @@ #include "efistub.h" -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size, - efi_memory_desc_t *memory_map, - unsigned long map_size, unsigned long desc_size, - u32 desc_ver) +static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, + unsigned long orig_fdt_size, + void *fdt, int new_fdt_size, char *cmdline_ptr, + u64 initrd_addr, u64 initrd_size) { int node, num_rsv; int status; @@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (status) goto fdt_set_fail; - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map); + fdt_val64 = U64_MAX; /* placeholder */ status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", &fdt_val64, sizeof(fdt_val64)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(map_size); + fdt_val32 = U32_MAX; /* placeholder */ status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", &fdt_val32, sizeof(fdt_val32)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(desc_size); status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", &fdt_val32, sizeof(fdt_val32)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(desc_ver); status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", &fdt_val32, sizeof(fdt_val32)); if (status) @@ -148,6 +143,43 @@ fdt_set_fail: return EFI_LOAD_ERROR; } +static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) +{ + int node = fdt_path_offset(fdt, "/chosen"); + u64 fdt_val64; + u32 fdt_val32; + int err; + + if (node < 0) + return EFI_LOAD_ERROR; + + fdt_val64 = cpu_to_fdt64((unsigned long)*map->map); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start", + &fdt_val64, sizeof(fdt_val64)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->map_size); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->desc_size); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->desc_ver); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + return EFI_SUCCESS; +} + #ifndef EFI_FDT_ALIGN #define EFI_FDT_ALIGN EFI_PAGE_SIZE #endif @@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, goto fail; } - /* - * Now that we have done our final memory allocation (and free) - * we can get the memory map key needed for - * exit_boot_services(). - */ - status = efi_get_memory_map(sys_table, &map); - if (status != EFI_SUCCESS) - goto fail_free_new_fdt; - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, new_fdt_size, - cmdline_ptr, initrd_addr, initrd_size, - memory_map, map_size, desc_size, desc_ver); + cmdline_ptr, initrd_addr, initrd_size); /* Succeeding the first time is the expected case. */ if (status == EFI_SUCCESS) @@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, /* * We need to allocate more space for the new * device tree, so free existing buffer that is - * too small. Also free memory map, as we will need - * to get new one that reflects the free/alloc we do - * on the device tree buffer. + * too small. */ efi_free(sys_table, new_fdt_size, *new_fdt_addr); - sys_table->boottime->free_pool(memory_map); new_fdt_size += EFI_PAGE_SIZE; } else { pr_efi_err(sys_table, "Unable to construct new device tree.\n"); - goto fail_free_mmap; + goto fail_free_new_fdt; } } - sys_table->boottime->free_pool(memory_map); priv.runtime_map = runtime_map; priv.runtime_entry_count = &runtime_entry_count; status = efi_exit_boot_services(sys_table, handle, &map, &priv, @@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; + status = update_fdt_memmap((void *)*new_fdt_addr, &map); + if (status != EFI_SUCCESS) { + /* + * The kernel won't get far without the memory map, but + * may still be able to print something meaningful so + * return success here. + */ + return EFI_SUCCESS; + } + /* Install the new virtual address map */ svam = sys_table->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, @@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, pr_efi_err(sys_table, "Exit boot services failed.\n"); -fail_free_mmap: - sys_table->boottime->free_pool(memory_map); - fail_free_new_fdt: efi_free(sys_table, new_fdt_size, *new_fdt_addr); diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index f03ddecd232b..78686443cb37 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -9,6 +9,44 @@ #include <linux/efi.h> #include <linux/io.h> #include <asm/early_ioremap.h> +#include <linux/memblock.h> +#include <linux/slab.h> + +static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) +{ + return memblock_alloc(size, 0); +} + +static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) +{ + unsigned int order = get_order(size); + struct page *p = alloc_pages(GFP_KERNEL, order); + + if (!p) + return 0; + + return PFN_PHYS(page_to_pfn(p)); +} + +/** + * efi_memmap_alloc - Allocate memory for the EFI memory map + * @num_entries: Number of entries in the allocated map. + * + * Depending on whether mm_init() has already been invoked or not, + * either memblock or "normal" page allocation is used. + * + * Returns the physical address of the allocated memory map on + * success, zero on failure. + */ +phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) +{ + unsigned long size = num_entries * efi.memmap.desc_size; + + if (slab_is_available()) + return __efi_memmap_alloc_late(size); + + return __efi_memmap_alloc_early(size); +} /** * __efi_memmap_init - Common code for mapping the EFI memory map diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index 1e8fde8cb803..2292742eac8f 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) return 0; } -static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) +static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) { struct irq_chip_generic *gc; struct irq_chip_type *ct; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f4c26c7826cd..a07ae9e37930 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip) /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); + gpiochip_free_hogs(chip); /* Numb the device, cancelling all outstanding operations */ gdev->chip = NULL; gpiochip_irqchip_remove(chip); acpi_gpiochip_remove(chip); gpiochip_remove_pin_ranges(chip); - gpiochip_free_hogs(chip); of_gpiochip_remove(chip); /* * We accept no more calls into the driver from this point, so @@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) } /** - * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip + * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip * @gpiochip: the gpiochip to add the irqchip to * @irqchip: the irqchip to add to the gpiochip * @first_irq: if not dynamically assigned, the base (first) IRQ to @@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * the pins on the gpiochip can generate a unique IRQ. Everything else * need to be open coded. */ -int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, - struct irq_chip *irqchip, - unsigned int first_irq, - irq_flow_handler_t handler, - unsigned int type, - bool nested, - struct lock_class_key *lock_key) +int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, + struct irq_chip *irqchip, + unsigned int first_irq, + irq_flow_handler_t handler, + unsigned int type, + bool nested, + struct lock_class_key *lock_key) { struct device_node *of_node; bool irq_base_set = false; @@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, return 0; } -EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); +EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key); #else /* CONFIG_GPIOLIB_IRQCHIP */ diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 6f3f9e68c218..90bc65d07a35 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -138,7 +138,7 @@ config DRM_KMS_CMA_HELPER config DRM_VM bool - depends on DRM + depends on DRM && MMU source "drivers/gpu/drm/i2c/Kconfig" @@ -267,7 +267,7 @@ source "drivers/gpu/drm/meson/Kconfig" menuconfig DRM_LEGACY bool "Enable legacy drivers (DANGEROUS)" - depends on DRM + depends on DRM && MMU select DRM_VM help Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 41bd2bf28f4c..2814aad81752 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ - amdgpu_gtt_mgr.o amdgpu_vram_mgr.o + amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ @@ -34,7 +34,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o amdgpu-y += \ - vi.o + vi.o mxgpu_vi.o # add GMC block amdgpu-y += \ @@ -52,8 +52,7 @@ amdgpu-y += \ # add SMC block amdgpu-y += \ amdgpu_dpm.o \ - amdgpu_powerplay.o \ - cz_smc.o cz_dpm.o + amdgpu_powerplay.o # add DCE block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f4f371fbce16..94a64e3bc682 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -91,7 +91,6 @@ extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; -extern int amdgpu_powerplay; extern int amdgpu_no_evict; extern int amdgpu_direct_gma_size; extern unsigned amdgpu_pcie_gen_cap; @@ -184,12 +183,18 @@ enum amdgpu_thermal_irq { AMDGPU_THERMAL_IRQ_LAST }; +enum amdgpu_kiq_irq { + AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, + AMDGPU_CP_KIQ_IRQ_LAST +}; + int amdgpu_set_clockgating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_clockgating_state state); int amdgpu_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state); +void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags); int amdgpu_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); bool amdgpu_is_idle(struct amdgpu_device *adev, @@ -352,7 +357,7 @@ struct amdgpu_bo_va_mapping { struct list_head list; struct interval_tree_node it; uint64_t offset; - uint32_t flags; + uint64_t flags; }; /* bo virtual addresses in a specific vm */ @@ -776,14 +781,20 @@ struct amdgpu_mec { u32 num_queue; }; +struct amdgpu_kiq { + u64 eop_gpu_addr; + struct amdgpu_bo *eop_obj; + struct amdgpu_ring ring; + struct amdgpu_irq_src irq; +}; + /* * GPU scratch registers structures, functions & helpers */ struct amdgpu_scratch { unsigned num_reg; uint32_t reg_base; - bool free[32]; - uint32_t reg[32]; + uint32_t free_mask; }; /* @@ -851,6 +862,7 @@ struct amdgpu_gfx { struct amdgpu_gca_config config; struct amdgpu_rlc rlc; struct amdgpu_mec mec; + struct amdgpu_kiq kiq; struct amdgpu_scratch scratch; const struct firmware *me_fw; /* ME firmware */ uint32_t me_fw_version; @@ -894,8 +906,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct dma_fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ib, struct dma_fence *last_vm_update, - struct amdgpu_job *job, struct dma_fence **f); + struct amdgpu_ib *ibs, struct amdgpu_job *job, + struct dma_fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); @@ -938,6 +950,7 @@ struct amdgpu_cs_parser { #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */ #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ +#define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */ struct amdgpu_job { struct amd_sched_job base; @@ -1024,6 +1037,7 @@ struct amdgpu_uvd { bool use_ctx_buf; struct amd_sched_entity entity; uint32_t srbm_soft_reset; + bool is_powergated; }; /* @@ -1052,6 +1066,7 @@ struct amdgpu_vce { struct amd_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; + bool is_powergated; }; /* @@ -1133,7 +1148,6 @@ int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); #if defined(CONFIG_DEBUG_FS) int amdgpu_debugfs_init(struct drm_minor *minor); -void amdgpu_debugfs_cleanup(struct drm_minor *minor); #endif int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); @@ -1178,7 +1192,6 @@ struct amdgpu_asic_funcs { bool (*read_disabled_bios)(struct amdgpu_device *adev); bool (*read_bios_from_rom)(struct amdgpu_device *adev, u8 *bios, u32 length_bytes); - void (*detect_hw_virtualization) (struct amdgpu_device *adev); int (*read_register)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); @@ -1333,7 +1346,6 @@ struct amdgpu_device { /* BIOS */ uint8_t *bios; uint32_t bios_size; - bool is_atom_bios; struct amdgpu_bo *stollen_vga_memory; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -1463,7 +1475,7 @@ struct amdgpu_device { /* amdkfd interface */ struct kfd_dev *kfd; - struct amdgpu_virtualization virtualization; + struct amdgpu_virt virt; /* link all shadow bo */ struct list_head shadow_list; @@ -1576,6 +1588,37 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) ring->count_dw--; } +static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw) +{ + unsigned occupied, chunk1, chunk2; + void *dst; + + if (ring->count_dw < count_dw) { + DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); + } else { + occupied = ring->wptr & ring->ptr_mask; + dst = (void *)&ring->ring[occupied]; + chunk1 = ring->ptr_mask + 1 - occupied; + chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; + chunk2 = count_dw - chunk1; + chunk1 <<= 2; + chunk2 <<= 2; + + if (chunk1) + memcpy(dst, src, chunk1); + + if (chunk2) { + src += chunk1; + dst = (void *)ring->ring; + memcpy(dst, src, chunk2); + } + + ring->wptr += count_dw; + ring->wptr &= ring->ptr_mask; + ring->count_dw -= count_dw; + } +} + static inline struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) { @@ -1605,7 +1648,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) -#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) @@ -1627,6 +1669,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) +#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) +#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 5796539a0bcb..ef79551b4cb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -672,12 +672,10 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && enc->enc_priv) { - if (adev->is_atom_bios) { - struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; - if (dig->bl_dev) { - atif->encoder_for_bl = enc; - break; - } + struct amdgpu_encoder_atom_dig *dig = enc->enc_priv; + if (dig->bl_dev) { + atif->encoder_for_bl = enc; + break; } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 8ec1967a850b..d9def01f276e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -42,6 +42,51 @@ #define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA) #define AMD_VBIOS_LENGTH(p) ((p)[2] << 9) +/* Check if current bios is an ATOM BIOS. + * Return true if it is ATOM BIOS. Otherwise, return false. + */ +static bool check_atom_bios(uint8_t *bios, size_t size) +{ + uint16_t tmp, bios_header_start; + + if (!bios || size < 0x49) { + DRM_INFO("vbios mem is null or mem size is wrong\n"); + return false; + } + + if (!AMD_IS_VALID_VBIOS(bios)) { + DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]); + return false; + } + + tmp = bios[0x18] | (bios[0x19] << 8); + if (bios[tmp + 0x14] != 0x0) { + DRM_INFO("Not an x86 BIOS ROM\n"); + return false; + } + + bios_header_start = bios[0x48] | (bios[0x49] << 8); + if (!bios_header_start) { + DRM_INFO("Can't locate bios header\n"); + return false; + } + + tmp = bios_header_start + 4; + if (size < tmp) { + DRM_INFO("BIOS header is broken\n"); + return false; + } + + if (!memcmp(bios + tmp, "ATOM", 4) || + !memcmp(bios + tmp, "MOTA", 4)) { + DRM_DEBUG("ATOMBIOS detected\n"); + return true; + } + + return false; +} + + /* If you boot an IGP board with a discrete card as the primary, * the IGP rom is not accessible via the rom bar as the IGP rom is * part of the system bios. On boot, the system bios puts a @@ -65,10 +110,6 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) return false; } - if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) { - iounmap(bios); - return false; - } adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { iounmap(bios); @@ -77,12 +118,18 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) adev->bios_size = size; memcpy_fromio(adev->bios, bios, size); iounmap(bios); + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); + return false; + } + return true; } bool amdgpu_read_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios, val[2]; + uint8_t __iomem *bios; size_t size; adev->bios = NULL; @@ -92,13 +139,6 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) return false; } - val[0] = readb(&bios[0]); - val[1] = readb(&bios[1]); - - if (size == 0 || !AMD_IS_VALID_VBIOS(val)) { - pci_unmap_rom(adev->pdev, bios); - return false; - } adev->bios = kzalloc(size, GFP_KERNEL); if (adev->bios == NULL) { pci_unmap_rom(adev->pdev, bios); @@ -107,6 +147,12 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) adev->bios_size = size; memcpy_fromio(adev->bios, bios, size); pci_unmap_rom(adev->pdev, bios); + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); + return false; + } + return true; } @@ -140,7 +186,14 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) adev->bios_size = len; /* read complete BIOS */ - return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len); + amdgpu_asic_read_bios_from_rom(adev, adev->bios, len); + + if (!check_atom_bios(adev->bios, len)) { + kfree(adev->bios); + return false; + } + + return true; } static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) @@ -155,13 +208,17 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) return false; } - if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) { + adev->bios = kzalloc(size, GFP_KERNEL); + if (adev->bios == NULL) return false; - } - adev->bios = kmemdup(bios, size, GFP_KERNEL); - if (adev->bios == NULL) { + + memcpy_fromio(adev->bios, bios, size); + + if (!check_atom_bios(adev->bios, size)) { + kfree(adev->bios); return false; } + adev->bios_size = size; return true; @@ -273,7 +330,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) break; } - if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) { + if (!check_atom_bios(adev->bios, size)) { kfree(adev->bios); return false; } @@ -298,53 +355,59 @@ static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) #ifdef CONFIG_ACPI static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) { - bool ret = false; struct acpi_table_header *hdr; acpi_size tbl_size; UEFI_ACPI_VFCT *vfct; - GOP_VBIOS_CONTENT *vbios; - VFCT_IMAGE_HEADER *vhdr; + unsigned offset; if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); - goto out_unmap; + return false; } vfct = (UEFI_ACPI_VFCT *)hdr; - if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { - DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); - goto out_unmap; - } + offset = vfct->VBIOSImageOffset; - vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); - vhdr = &vbios->VbiosHeader; - DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", - vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, - vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); - - if (vhdr->PCIBus != adev->pdev->bus->number || - vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) || - vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) || - vhdr->VendorID != adev->pdev->vendor || - vhdr->DeviceID != adev->pdev->device) { - DRM_INFO("ACPI VFCT table is not for this card\n"); - goto out_unmap; - } + while (offset < tbl_size) { + GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset); + VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader; - if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { - DRM_ERROR("ACPI VFCT image truncated\n"); - goto out_unmap; - } + offset += sizeof(VFCT_IMAGE_HEADER); + if (offset > tbl_size) { + DRM_ERROR("ACPI VFCT image header truncated\n"); + return false; + } + + offset += vhdr->ImageLength; + if (offset > tbl_size) { + DRM_ERROR("ACPI VFCT image truncated\n"); + return false; + } - adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); - adev->bios_size = vhdr->ImageLength; - ret = !!adev->bios; + if (vhdr->ImageLength && + vhdr->PCIBus == adev->pdev->bus->number && + vhdr->PCIDevice == PCI_SLOT(adev->pdev->devfn) && + vhdr->PCIFunction == PCI_FUNC(adev->pdev->devfn) && + vhdr->VendorID == adev->pdev->vendor && + vhdr->DeviceID == adev->pdev->device) { + adev->bios = kmemdup(&vbios->VbiosContent, + vhdr->ImageLength, + GFP_KERNEL); + + if (!check_atom_bios(adev->bios, vhdr->ImageLength)) { + kfree(adev->bios); + return false; + } + adev->bios_size = vhdr->ImageLength; + return true; + } + } -out_unmap: - return ret; + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + return false; } #else static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) @@ -355,57 +418,27 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) bool amdgpu_get_bios(struct amdgpu_device *adev) { - bool r; - uint16_t tmp, bios_header_start; + if (amdgpu_atrm_get_bios(adev)) + return true; - r = amdgpu_atrm_get_bios(adev); - if (!r) - r = amdgpu_acpi_vfct_bios(adev); - if (!r) - r = igp_read_bios_from_vram(adev); - if (!r) - r = amdgpu_read_bios(adev); - if (!r) { - r = amdgpu_read_bios_from_rom(adev); - } - if (!r) { - r = amdgpu_read_disabled_bios(adev); - } - if (!r) { - r = amdgpu_read_platform_bios(adev); - } - if (!r || adev->bios == NULL) { - DRM_ERROR("Unable to locate a BIOS ROM\n"); - adev->bios = NULL; - return false; - } - if (!AMD_IS_VALID_VBIOS(adev->bios)) { - printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]); - goto free_bios; - } + if (amdgpu_acpi_vfct_bios(adev)) + return true; - tmp = RBIOS16(0x18); - if (RBIOS8(tmp + 0x14) != 0x0) { - DRM_INFO("Not an x86 BIOS ROM, not using.\n"); - goto free_bios; - } + if (igp_read_bios_from_vram(adev)) + return true; - bios_header_start = RBIOS16(0x48); - if (!bios_header_start) { - goto free_bios; - } - tmp = bios_header_start + 4; - if (!memcmp(adev->bios + tmp, "ATOM", 4) || - !memcmp(adev->bios + tmp, "MOTA", 4)) { - adev->is_atom_bios = true; - } else { - adev->is_atom_bios = false; - } + if (amdgpu_read_bios(adev)) + return true; - DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM"); - return true; -free_bios: - kfree(adev->bios); - adev->bios = NULL; + if (amdgpu_read_bios_from_rom(adev)) + return true; + + if (amdgpu_read_disabled_bios(adev)) + return true; + + if (amdgpu_read_platform_bios(adev)) + return true; + + DRM_ERROR("Unable to locate a BIOS ROM\n"); return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 9ada56c16a58..a5df1ef306d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -713,6 +713,7 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode CGS_FUNC_ADEV; if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { release_firmware(adev->pm.fw); + adev->pm.fw = NULL; return 0; } /* cannot release other firmware because they are not created by cgs */ @@ -762,6 +763,23 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, return fw_version; } +static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device, + bool en) +{ + CGS_FUNC_ADEV; + + if (adev->gfx.rlc.funcs->enter_safe_mode == NULL || + adev->gfx.rlc.funcs->exit_safe_mode == NULL) + return 0; + + if (en) + adev->gfx.rlc.funcs->enter_safe_mode(adev); + else + adev->gfx.rlc.funcs->exit_safe_mode(adev); + + return 0; +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -808,6 +826,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, const uint8_t *src; const struct smc_firmware_header_v1_0 *hdr; + if (CGS_UCODE_ID_SMU_SK == type) + amdgpu_cgs_rel_firmware(cgs_device, CGS_UCODE_ID_SMU); + if (!adev->pm.fw) { switch (adev->asic_type) { case CHIP_TOPAZ: @@ -840,6 +861,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, else if (type == CGS_UCODE_ID_SMU_SK) strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); break; + case CHIP_POLARIS12: + strcpy(fw_name, "amdgpu/polaris12_smc.bin"); + break; default: DRM_ERROR("SMC firmware not supported\n"); return -EINVAL; @@ -1197,51 +1221,52 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, } static const struct cgs_ops amdgpu_cgs_ops = { - amdgpu_cgs_gpu_mem_info, - amdgpu_cgs_gmap_kmem, - amdgpu_cgs_gunmap_kmem, - amdgpu_cgs_alloc_gpu_mem, - amdgpu_cgs_free_gpu_mem, - amdgpu_cgs_gmap_gpu_mem, - amdgpu_cgs_gunmap_gpu_mem, - amdgpu_cgs_kmap_gpu_mem, - amdgpu_cgs_kunmap_gpu_mem, - amdgpu_cgs_read_register, - amdgpu_cgs_write_register, - amdgpu_cgs_read_ind_register, - amdgpu_cgs_write_ind_register, - amdgpu_cgs_read_pci_config_byte, - amdgpu_cgs_read_pci_config_word, - amdgpu_cgs_read_pci_config_dword, - amdgpu_cgs_write_pci_config_byte, - amdgpu_cgs_write_pci_config_word, - amdgpu_cgs_write_pci_config_dword, - amdgpu_cgs_get_pci_resource, - amdgpu_cgs_atom_get_data_table, - amdgpu_cgs_atom_get_cmd_table_revs, - amdgpu_cgs_atom_exec_cmd_table, - amdgpu_cgs_create_pm_request, - amdgpu_cgs_destroy_pm_request, - amdgpu_cgs_set_pm_request, - amdgpu_cgs_pm_request_clock, - amdgpu_cgs_pm_request_engine, - amdgpu_cgs_pm_query_clock_limits, - amdgpu_cgs_set_camera_voltages, - amdgpu_cgs_get_firmware_info, - amdgpu_cgs_rel_firmware, - amdgpu_cgs_set_powergating_state, - amdgpu_cgs_set_clockgating_state, - amdgpu_cgs_get_active_displays_info, - amdgpu_cgs_notify_dpm_enabled, - amdgpu_cgs_call_acpi_method, - amdgpu_cgs_query_system_info, - amdgpu_cgs_is_virtualization_enabled + .gpu_mem_info = amdgpu_cgs_gpu_mem_info, + .gmap_kmem = amdgpu_cgs_gmap_kmem, + .gunmap_kmem = amdgpu_cgs_gunmap_kmem, + .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem, + .free_gpu_mem = amdgpu_cgs_free_gpu_mem, + .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem, + .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem, + .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem, + .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem, + .read_register = amdgpu_cgs_read_register, + .write_register = amdgpu_cgs_write_register, + .read_ind_register = amdgpu_cgs_read_ind_register, + .write_ind_register = amdgpu_cgs_write_ind_register, + .read_pci_config_byte = amdgpu_cgs_read_pci_config_byte, + .read_pci_config_word = amdgpu_cgs_read_pci_config_word, + .read_pci_config_dword = amdgpu_cgs_read_pci_config_dword, + .write_pci_config_byte = amdgpu_cgs_write_pci_config_byte, + .write_pci_config_word = amdgpu_cgs_write_pci_config_word, + .write_pci_config_dword = amdgpu_cgs_write_pci_config_dword, + .get_pci_resource = amdgpu_cgs_get_pci_resource, + .atom_get_data_table = amdgpu_cgs_atom_get_data_table, + .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs, + .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table, + .create_pm_request = amdgpu_cgs_create_pm_request, + .destroy_pm_request = amdgpu_cgs_destroy_pm_request, + .set_pm_request = amdgpu_cgs_set_pm_request, + .pm_request_clock = amdgpu_cgs_pm_request_clock, + .pm_request_engine = amdgpu_cgs_pm_request_engine, + .pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits, + .set_camera_voltages = amdgpu_cgs_set_camera_voltages, + .get_firmware_info = amdgpu_cgs_get_firmware_info, + .rel_firmware = amdgpu_cgs_rel_firmware, + .set_powergating_state = amdgpu_cgs_set_powergating_state, + .set_clockgating_state = amdgpu_cgs_set_clockgating_state, + .get_active_displays_info = amdgpu_cgs_get_active_displays_info, + .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled, + .call_acpi_method = amdgpu_cgs_call_acpi_method, + .query_system_info = amdgpu_cgs_query_system_info, + .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, + .enter_safe_mode = amdgpu_cgs_enter_safe_mode, }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_add_irq_source, - amdgpu_cgs_irq_get, - amdgpu_cgs_irq_put + .add_irq_source = amdgpu_cgs_add_irq_source, + .irq_get = amdgpu_cgs_irq_get, + .irq_put = amdgpu_cgs_irq_put }; struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 29d6d84d1c28..cf2e8c4e9b8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -75,10 +75,10 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, *out_ring = &adev->uvd.ring; break; case AMDGPU_HW_IP_VCE: - if (ring < 2){ + if (ring < adev->vce.num_rings){ *out_ring = &adev->vce.ring[ring]; } else { - DRM_ERROR("only two VCE rings are supported\n"); + DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings); return -EINVAL; } break; @@ -771,6 +771,20 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (r) return r; + if (amdgpu_sriov_vf(adev)) { + struct dma_fence *f; + bo_va = vm->csa_bo_va; + BUG_ON(!bo_va); + r = amdgpu_vm_bo_update(adev, bo_va, false); + if (r) + return r; + + f = bo_va->last_pt_update; + r = amdgpu_sync_fence(adev, &p->job->sync, f); + if (r) + return r; + } + if (p->bo_list) { for (i = 0; i < p->bo_list->num_entries; i++) { struct dma_fence *f; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 60bd4afe45c8..944ba0d3874a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = { "STONEY", "POLARIS10", "POLARIS11", + "POLARIS12", "LAST", }; @@ -93,6 +94,11 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; + if (amdgpu_sriov_runtime(adev)) { + BUG_ON(in_interrupt()); + return amdgpu_virt_kiq_rreg(adev, reg); + } + if ((reg * 4) < adev->rmmio_size && !always_indirect) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -112,6 +118,11 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if (amdgpu_sriov_runtime(adev)) { + BUG_ON(in_interrupt()); + return amdgpu_virt_kiq_wreg(adev, reg, v); + } + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -884,7 +895,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) atom_card_info->ioreg_read = cail_ioreg_read; atom_card_info->ioreg_write = cail_ioreg_write; } else { - DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); + DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); atom_card_info->ioreg_read = cail_reg_read; atom_card_info->ioreg_write = cail_reg_write; } @@ -1130,6 +1141,18 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, return r; } +void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (adev->ip_blocks[i].version->funcs->get_clockgating_state) + adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); + } +} + int amdgpu_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type) { @@ -1234,7 +1257,8 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) pciaddstr_tmp = pciaddstr; while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { pciaddname = strsep(&pciaddname_tmp, ","); - if (!strcmp(pci_address_name, pciaddname)) { + if (!strcmp("all", pciaddname) + || !strcmp(pci_address_name, pciaddname)) { long num_crtc; int res = -1; @@ -1277,6 +1301,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) case CHIP_FIJI: case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: case CHIP_CARRIZO: case CHIP_STONEY: if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) @@ -1321,6 +1346,12 @@ static int amdgpu_early_init(struct amdgpu_device *adev) return -EINVAL; } + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_request_full_gpu(adev, true); + if (r) + return r; + } + for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d\n", i); @@ -1381,6 +1412,15 @@ static int amdgpu_init(struct amdgpu_device *adev) return r; } adev->ip_blocks[i].status.hw = true; + + /* right after GMC hw init, we create CSA */ + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_allocate_static_csa(adev); + if (r) { + DRM_ERROR("allocate CSA failed %d\n", r); + return r; + } + } } } @@ -1514,6 +1554,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = false; } + if (amdgpu_sriov_vf(adev)) { + amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); + amdgpu_virt_release_full_gpu(adev, false); + } + return 0; } @@ -1521,6 +1566,9 @@ int amdgpu_suspend(struct amdgpu_device *adev) { int i, r; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + /* ungate SMC block first */ r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, AMD_CG_STATE_UNGATE); @@ -1549,6 +1597,9 @@ int amdgpu_suspend(struct amdgpu_device *adev) } } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, false); + return 0; } @@ -1573,7 +1624,7 @@ static int amdgpu_resume(struct amdgpu_device *adev) static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { if (amdgpu_atombios_has_gpu_virtualization_table(adev)) - adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; + adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } /** @@ -1603,7 +1654,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->pdev = pdev; adev->flags = flags; adev->asic_type = flags & AMD_ASIC_MASK; - adev->is_atom_bios = false; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->mc.gtt_size = 512 * 1024 * 1024; adev->accel_working = false; @@ -1693,7 +1743,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } if (adev->rio_mem == NULL) - DRM_ERROR("Unable to find PCI I/O BAR\n"); + DRM_INFO("PCI I/O BAR is not found.\n"); /* early init functions */ r = amdgpu_early_init(adev); @@ -1718,12 +1768,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = -EINVAL; goto failed; } - /* Must be an ATOMBIOS */ - if (!adev->is_atom_bios) { - dev_err(adev->dev, "Expecting atombios for GPU\n"); - r = -EINVAL; - goto failed; - } + r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); @@ -1850,8 +1895,6 @@ failed: return r; } -static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); - /** * amdgpu_device_fini - tear down the driver * @@ -1891,7 +1934,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) if (adev->asic_type >= CHIP_BONAIRE) amdgpu_doorbell_fini(adev); amdgpu_debugfs_regs_cleanup(adev); - amdgpu_debugfs_remove_files(adev); } @@ -2250,6 +2292,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) int resched; bool need_full_reset; + if (amdgpu_sriov_vf(adev)) + return 0; + if (!amdgpu_check_soft_reset(adev)) { DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); return 0; @@ -2505,19 +2550,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, return 0; } -static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < adev->debugfs_count; i++) { - drm_debugfs_remove_files(adev->debugfs[i].files, - adev->debugfs[i].num_files, - adev->ddev->primary); - } -#endif -} - #if defined(CONFIG_DEBUG_FS) static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, @@ -2851,7 +2883,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 2; + config[no_regs++] = 3; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -2885,6 +2917,12 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->family; config[no_regs++] = adev->external_rev_id; + /* rev==3 */ + config[no_regs++] = adev->pdev->device; + config[no_regs++] = adev->pdev->revision; + config[no_regs++] = adev->pdev->subsystem_device; + config[no_regs++] = adev->pdev->subsystem_vendor; + while (size && (*pos < no_regs * 4)) { uint32_t value; @@ -3151,10 +3189,6 @@ int amdgpu_debugfs_init(struct drm_minor *minor) { return 0; } - -void amdgpu_debugfs_cleanup(struct drm_minor *minor) -{ -} #else static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index d2036df145b3..39fc388f222a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -138,10 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) kfree(work); } -int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags, uint32_t target) + +static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work) +{ + int i; + + amdgpu_bo_unref(&work->old_abo); + dma_fence_put(work->excl); + for (i = 0; i < work->shared_count; ++i) + dma_fence_put(work->shared[i]); + kfree(work->shared); + kfree(work); +} + +static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work, + struct amdgpu_bo *new_abo) +{ + amdgpu_bo_unreserve(new_abo); + amdgpu_flip_work_cleanup(work); +} + +static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work, + struct amdgpu_bo *new_abo) +{ + if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) + DRM_ERROR("failed to unpin new abo in error path\n"); + amdgpu_flip_cleanup_unreserve(work, new_abo); +} + +void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work, + struct amdgpu_bo *new_abo) +{ + if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { + DRM_ERROR("failed to reserve new abo in error path\n"); + amdgpu_flip_work_cleanup(work); + return; + } + amdgpu_flip_cleanup_unpin(work, new_abo); +} + +int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + uint32_t target, + struct amdgpu_flip_work **work_p, + struct amdgpu_bo **new_abo_p) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; @@ -154,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, unsigned long flags; u64 tiling_flags; u64 base; - int i, r; + int r; work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) @@ -189,7 +231,6 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base); if (unlikely(r != 0)) { - r = -EINVAL; DRM_ERROR("failed to pin new abo buffer before flip\n"); goto unreserve; } @@ -216,41 +257,79 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; goto pflip_cleanup; + } + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + *work_p = work; + *new_abo_p = new_abo; + + return 0; + +pflip_cleanup: + amdgpu_crtc_cleanup_flip_ctx(work, new_abo); + return r; +unpin: + amdgpu_flip_cleanup_unpin(work, new_abo); + return r; + +unreserve: + amdgpu_flip_cleanup_unreserve(work, new_abo); + return r; + +cleanup: + amdgpu_flip_work_cleanup(work); + return r; + +} + +void amdgpu_crtc_submit_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct amdgpu_flip_work *work, + struct amdgpu_bo *new_abo) +{ + unsigned long flags; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; amdgpu_crtc->pflip_works = work; - - DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n", - amdgpu_crtc->crtc_id, amdgpu_crtc, work); /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + DRM_DEBUG_DRIVER( + "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, work); + amdgpu_flip_work_func(&work->flip_work.work); - return 0; +} -pflip_cleanup: - if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { - DRM_ERROR("failed to reserve new abo in error path\n"); - goto cleanup; - } -unpin: - if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { - DRM_ERROR("failed to unpin new abo in error path\n"); - } -unreserve: - amdgpu_bo_unreserve(new_abo); +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + uint32_t target) +{ + struct amdgpu_bo *new_abo; + struct amdgpu_flip_work *work; + int r; -cleanup: - amdgpu_bo_unref(&work->old_abo); - dma_fence_put(work->excl); - for (i = 0; i < work->shared_count; ++i) - dma_fence_put(work->shared[i]); - kfree(work->shared); - kfree(work); + r = amdgpu_crtc_prepare_flip(crtc, + fb, + event, + page_flip_flags, + target, + &work, + &new_abo); + if (r) + return r; - return r; + amdgpu_crtc_submit_flip(crtc, fb, work, new_abo); + + return 0; } int amdgpu_crtc_set_config(struct drm_mode_set *set) @@ -582,12 +661,10 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev) { int sz; - if (adev->is_atom_bios) { - adev->mode_info.coherent_mode_property = - drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); - if (!adev->mode_info.coherent_mode_property) - return -ENOMEM; - } + adev->mode_info.coherent_mode_property = + drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); + if (!adev->mode_info.coherent_mode_property) + return -ENOMEM; adev->mode_info.load_detect_property = drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 955d6f21e2b3..fa2b55681422 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -241,13 +241,6 @@ enum amdgpu_pcie_gen { AMDGPU_PCIE_GEN_INVALID = 0xffff }; -enum amdgpu_dpm_forced_level { - AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, - AMDGPU_DPM_FORCED_LEVEL_LOW = 1, - AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, - AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, -}; - struct amdgpu_dpm_funcs { int (*get_temperature)(struct amdgpu_device *adev); int (*pre_set_power_state)(struct amdgpu_device *adev); @@ -258,7 +251,7 @@ struct amdgpu_dpm_funcs { u32 (*get_mclk)(struct amdgpu_device *adev, bool low); void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); - int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); + int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level); bool (*vblank_too_short)(struct amdgpu_device *adev); void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); void (*powergate_vce)(struct amdgpu_device *adev, bool gate); @@ -353,9 +346,6 @@ struct amdgpu_dpm_funcs { #define amdgpu_dpm_get_current_power_state(adev) \ (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) -#define amdgpu_dpm_get_performance_level(adev) \ - (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) - #define amdgpu_dpm_get_pp_num_states(adev, data) \ (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) @@ -393,6 +383,11 @@ struct amdgpu_dpm_funcs { (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \ (adev)->pm.funcs->get_vce_clock_state((adev), (i))) +#define amdgpu_dpm_get_performance_level(adev) \ + ((adev)->pp_enabled ? \ + (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \ + (adev)->pm.dpm.forced_level) + struct amdgpu_dpm { struct amdgpu_ps *ps; /* number of valid power states */ @@ -440,7 +435,7 @@ struct amdgpu_dpm { /* thermal handling */ struct amdgpu_dpm_thermal thermal; /* forced levels */ - enum amdgpu_dpm_forced_level forced_level; + enum amd_dpm_forced_level forced_level; }; struct amdgpu_pm { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8cb937b2bfcc..75fc376ba735 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -90,7 +90,6 @@ int amdgpu_vram_page_split = 1024; int amdgpu_exp_hw_support = 0; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; -int amdgpu_powerplay = -1; int amdgpu_no_evict = 0; int amdgpu_direct_gma_size = 0; unsigned amdgpu_pcie_gen_cap = 0; @@ -179,9 +178,6 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); -MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); -module_param_named(powerplay, amdgpu_powerplay, int, 0444); - MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444); @@ -418,6 +414,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + /* Polaris12 */ + {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0, 0, 0} }; @@ -679,7 +682,6 @@ static struct drm_driver kms_driver = { DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, - .dev_priv_size = 0, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .preclose = amdgpu_driver_preclose_kms, @@ -694,7 +696,6 @@ static struct drm_driver kms_driver = { .get_scanout_position = amdgpu_get_crtc_scanoutpos, #if defined(CONFIG_DEBUG_FS) .debugfs_init = amdgpu_debugfs_init, - .debugfs_cleanup = amdgpu_debugfs_cleanup, #endif .irq_preinstall = amdgpu_irq_preinstall, .irq_postinstall = amdgpu_irq_postinstall, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index cd62f6ffde2a..9bd1b4eae32e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -471,12 +471,15 @@ out: static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) { - unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - /* if anything is swapped out don't swap it in here, just abort and wait for the next CS */ + if (!amdgpu_bo_gpu_accessible(bo)) + return -ERESTARTSYS; + + if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) + return -ERESTARTSYS; - return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0; + return 0; } /** @@ -496,7 +499,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct amdgpu_bo_list_entry vm_pd; struct ww_acquire_ctx ticket; struct list_head list, duplicates; - unsigned domain; int r; INIT_LIST_HEAD(&list); @@ -514,12 +516,18 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, goto error_print; list_for_each_entry(entry, &list, head) { - domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); + struct amdgpu_bo *bo = + container_of(entry->bo, struct amdgpu_bo, tbo); + /* if anything is swapped out don't swap it in here, just abort and wait for the next CS */ - if (domain == AMDGPU_GEM_DOMAIN_CPU) + if (!amdgpu_bo_gpu_accessible(bo)) + goto error_unreserve; + + if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) goto error_unreserve; } + r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 01a42b6a69a4..19943356cca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -42,12 +42,12 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) { int i; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - if (adev->gfx.scratch.free[i]) { - adev->gfx.scratch.free[i] = false; - *reg = adev->gfx.scratch.reg[i]; - return 0; - } + i = ffs(adev->gfx.scratch.free_mask); + if (i != 0 && i <= adev->gfx.scratch.num_reg) { + i--; + adev->gfx.scratch.free_mask &= ~(1u << i); + *reg = adev->gfx.scratch.reg_base + i; + return 0; } return -EINVAL; } @@ -62,14 +62,7 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) */ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) { - int i; - - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - if (adev->gfx.scratch.reg[i] == reg) { - adev->gfx.scratch.free[i] = true; - return; - } - } + adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index c6c125d31161..e4eb6dd3798a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -243,9 +243,9 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = { - amdgpu_gtt_mgr_init, - amdgpu_gtt_mgr_fini, - amdgpu_gtt_mgr_new, - amdgpu_gtt_mgr_del, - amdgpu_gtt_mgr_debug + .init = amdgpu_gtt_mgr_init, + .takedown = amdgpu_gtt_mgr_fini, + .get_node = amdgpu_gtt_mgr_new, + .put_node = amdgpu_gtt_mgr_del, + .debug = amdgpu_gtt_mgr_debug }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 91d367399956..f2739995c335 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -231,8 +231,7 @@ void amdgpu_i2c_init(struct amdgpu_device *adev) if (amdgpu_hw_i2c) DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); - if (adev->is_atom_bios) - amdgpu_atombios_i2c_init(adev); + amdgpu_atombios_i2c_init(adev); } /* remove all the buses */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 216a9572d946..e02a70dd37b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, - struct amdgpu_job *job, struct dma_fence **f) + struct amdgpu_ib *ibs, struct amdgpu_job *job, + struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; @@ -175,15 +175,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); - /* always set cond_exec_polling to CONTINUE */ - *ring->cond_exe_cpu_addr = 1; - skip_preamble = ring->current_ctx == fence_ctx; need_ctx_switch = ring->current_ctx != fence_ctx; if (job && ring->funcs->emit_cntxcntl) { if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; status |= job->preamble_status; + + if (vm) + status |= AMDGPU_VM_DOMAIN; amdgpu_ring_emit_cntxcntl(ring, status); } @@ -193,7 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* drop preamble IBs if we don't have a context switch */ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble && - !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST)) + !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && + !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ continue; amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, @@ -223,7 +224,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, amdgpu_ring_patch_cond_exec(ring, patch_offset); ring->current_ctx = fence_ctx; - if (ring->funcs->emit_switch_buffer) + if (vm && ring->funcs->emit_switch_buffer) amdgpu_ring_emit_switch_buffer(ring); amdgpu_ring_commit(ring); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index a0de6286c453..86a12424c162 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -170,8 +170,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, - job->sync.last_vm_update, job, &fence); + r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8aef25828888..61d94c745672 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -60,6 +60,9 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + if (amdgpu_device_is_px(dev)) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); @@ -138,6 +141,9 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_put_autosuspend(dev->dev); } + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, true); + out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ @@ -569,6 +575,27 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; } } + case AMDGPU_INFO_NUM_HANDLES: { + struct drm_amdgpu_info_num_handles handle; + + switch (info->query_hw_ip.type) { + case AMDGPU_HW_IP_UVD: + /* Starting Polaris, we support unlimited UVD handles */ + if (adev->asic_type < CHIP_POLARIS10) { + handle.uvd_max_handles = adev->uvd.max_handles; + handle.uvd_used_handles = amdgpu_uvd_used_handles(adev); + + return copy_to_user(out, &handle, + min((size_t)size, sizeof(handle))) ? -EFAULT : 0; + } else { + return -ENODATA; + } + + break; + default: + return -EINVAL; + } + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->query); return -EINVAL; @@ -628,6 +655,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) goto out_suspend; } + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_map_static_csa(adev, &fpriv->vm); + if (r) + goto out_suspend; + } + mutex_init(&fpriv->bo_list_lock); idr_init(&fpriv->bo_list_handles); @@ -666,6 +699,14 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_uvd_free_handles(adev, file_priv); amdgpu_vce_free_handles(adev, file_priv); + if (amdgpu_sriov_vf(adev)) { + /* TODO: how to handle reserve failure */ + BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false)); + amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); + fpriv->vm.csa_bo_va = NULL; + amdgpu_bo_unreserve(adev->virt.csa_obj); + } + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b60346792bf8..c12497bd3889 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -595,6 +595,21 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags, uint32_t target); +void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work, + struct amdgpu_bo *new_abo); +int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + uint32_t target, + struct amdgpu_flip_work **work, + struct amdgpu_bo **new_abo); + +void amdgpu_crtc_submit_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct amdgpu_flip_work *work, + struct amdgpu_bo *new_abo); + extern const struct drm_mode_config_funcs amdgpu_mode_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index bf79b73e1538..d1aa291b2638 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -363,11 +363,31 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, bo->flags = flags; +#ifdef CONFIG_X86_32 + /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit + * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 + */ + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) + /* Don't try to enable write-combining when it can't work, or things + * may be slow + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 + */ + +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ + thanks to write-combining + + if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; +#else /* For architectures that don't support WC memory, * mask out the WC flag from the BO */ if (!drm_arch_can_wc_memory()) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; +#endif amdgpu_fill_placement_to_bo(bo, placement); /* Kernel allocation are uninterruptible */ @@ -386,6 +406,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (unlikely(r != 0)) return r; + bo->tbo.priority = ilog2(bo->tbo.num_pages); + if (kernel) + bo->tbo.priority *= 2; + bo->tbo.priority = min(bo->tbo.priority, (unsigned)(TTM_MAX_BO_PRIORITY - 1)); + if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { struct dma_fence *fence; @@ -408,7 +433,8 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, return 0; fail_unreserve: - ww_mutex_unlock(&bo->tbo.resv->lock); + if (!resv) + ww_mutex_unlock(&bo->tbo.resv->lock); amdgpu_bo_unref(&bo); return r; } @@ -472,7 +498,16 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { + if (!resv) { + r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); + WARN_ON(r != 0); + } + r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); + + if (!resv) + ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); + if (r) amdgpu_bo_unref(bo_ptr); } @@ -849,6 +884,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, } void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *new_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); @@ -861,6 +897,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, abo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_vm_bo_invalidate(adev, abo); + /* remember the eviction */ + if (evict) + atomic64_inc(&adev->num_evictions); + /* update statistics */ if (!new_mem) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 5cbf59ec0f68..15a723adca76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -114,6 +114,15 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.vma_node); } +/** + * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that + * is accessible to the GPU. + */ +static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) +{ + return bo->tbo.mem.mem_type != TTM_PL_SYSTEM; +} + int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, @@ -155,7 +164,8 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, size_t buffer_size, uint32_t *metadata_size, uint64_t *flags); void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, - struct ttm_mem_reg *new_mem); + bool evict, + struct ttm_mem_reg *new_mem); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 723ae682bf25..a61882ddc804 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -34,6 +34,28 @@ static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); +static const struct cg_flag_name clocks[] = { + {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, + {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, + {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, + {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, + {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, + {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, + {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, + {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, + {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, + {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, + {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, + {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, + {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, + {0, NULL}, +}; + void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) { if (adev->pp_enabled) @@ -112,28 +134,23 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; + enum amd_dpm_forced_level level; if ((adev->flags & AMD_IS_PX) && (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return snprintf(buf, PAGE_SIZE, "off\n"); - if (adev->pp_enabled) { - enum amd_dpm_forced_level level; - - level = amdgpu_dpm_get_performance_level(adev); - return snprintf(buf, PAGE_SIZE, "%s\n", - (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : - (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : - (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown"); - } else { - enum amdgpu_dpm_forced_level level; - - level = adev->pm.dpm.forced_level; - return snprintf(buf, PAGE_SIZE, "%s\n", - (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : - (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); - } + level = amdgpu_dpm_get_performance_level(adev); + return snprintf(buf, PAGE_SIZE, "%s\n", + (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : + (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : + (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : + (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : + (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : + "unknown"); } static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, @@ -143,7 +160,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; - enum amdgpu_dpm_forced_level level; + enum amd_dpm_forced_level level; + enum amd_dpm_forced_level current_level; int ret = 0; /* Can't force performance level when the card is off */ @@ -151,19 +169,34 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; + current_level = amdgpu_dpm_get_performance_level(adev); + if (strncmp("low", buf, strlen("low")) == 0) { - level = AMDGPU_DPM_FORCED_LEVEL_LOW; + level = AMD_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { - level = AMDGPU_DPM_FORCED_LEVEL_HIGH; + level = AMD_DPM_FORCED_LEVEL_HIGH; } else if (strncmp("auto", buf, strlen("auto")) == 0) { - level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + level = AMD_DPM_FORCED_LEVEL_AUTO; } else if (strncmp("manual", buf, strlen("manual")) == 0) { - level = AMDGPU_DPM_FORCED_LEVEL_MANUAL; - } else { + level = AMD_DPM_FORCED_LEVEL_MANUAL; + } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { + level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; + } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { + level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; + } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { + level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; + } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { + level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; + } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { + level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + } else { count = -EINVAL; goto fail; } + if (current_level == level) + return count; + if (adev->pp_enabled) amdgpu_dpm_force_performance_level(adev, level); else { @@ -180,6 +213,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, adev->pm.dpm.forced_level = level; mutex_unlock(&adev->pm.mutex); } + fail: return count; } @@ -1060,9 +1094,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) if (adev->pm.funcs->force_performance_level) { if (adev->pm.dpm.thermal_active) { - enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; + enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; /* force low perf level for thermal */ - amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW); + amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); /* save the user's level */ adev->pm.dpm.forced_level = level; } else { @@ -1351,12 +1385,27 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a return 0; } +static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) +{ + int i; + + for (i = 0; clocks[i].flag; i++) + seq_printf(m, "\t%s: %s\n", clocks[i].name, + (flags & clocks[i].flag) ? "On" : "Off"); +} + static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; struct drm_device *ddev = adev->ddev; + u32 flags = 0; + + amdgpu_get_clockgating_state(adev, &flags); + seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); + amdgpu_parse_cg_state(m, flags); + seq_printf(m, "\n"); if (!adev->pm.dpm_enabled) { seq_printf(m, "dpm not enabled\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index 5fd7734f15ca..c19c4d138751 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h @@ -24,6 +24,12 @@ #ifndef __AMDGPU_PM_H__ #define __AMDGPU_PM_H__ +struct cg_flag_name +{ + u32 flag; + const char *name; +}; + int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); void amdgpu_pm_print_power_states(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index fc592c2b0e16..8856eccc37fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -34,97 +34,83 @@ #include "cik_dpm.h" #include "vi_dpm.h" -static int amdgpu_powerplay_init(struct amdgpu_device *adev) +static int amdgpu_create_pp_handle(struct amdgpu_device *adev) { - int ret = 0; + struct amd_pp_init pp_init; struct amd_powerplay *amd_pp; + int ret; amd_pp = &(adev->powerplay); - - if (adev->pp_enabled) { - struct amd_pp_init *pp_init; - - pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL); - - if (pp_init == NULL) - return -ENOMEM; - - pp_init->chip_family = adev->family; - pp_init->chip_id = adev->asic_type; - pp_init->device = amdgpu_cgs_create_device(adev); - ret = amd_powerplay_init(pp_init, amd_pp); - kfree(pp_init); - } else { - amd_pp->pp_handle = (void *)adev; - - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_SI - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: - amd_pp->ip_funcs = &si_dpm_ip_funcs; - break; -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_BONAIRE: - case CHIP_HAWAII: - amd_pp->ip_funcs = &ci_dpm_ip_funcs; - break; - case CHIP_KABINI: - case CHIP_MULLINS: - case CHIP_KAVERI: - amd_pp->ip_funcs = &kv_dpm_ip_funcs; - break; -#endif - case CHIP_CARRIZO: - case CHIP_STONEY: - amd_pp->ip_funcs = &cz_dpm_ip_funcs; - break; - default: - ret = -EINVAL; - break; - } - } - return ret; + pp_init.chip_family = adev->family; + pp_init.chip_id = adev->asic_type; + pp_init.pm_en = amdgpu_dpm != 0 ? true : false; + pp_init.feature_mask = amdgpu_pp_feature_mask; + pp_init.device = amdgpu_cgs_create_device(adev); + ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle)); + if (ret) + return -EINVAL; + return 0; } static int amdgpu_pp_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amd_powerplay *amd_pp; int ret = 0; + amd_pp = &(adev->powerplay); + adev->pp_enabled = false; + amd_pp->pp_handle = (void *)adev; + switch (adev->asic_type) { case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: case CHIP_TONGA: case CHIP_FIJI: case CHIP_TOPAZ: - adev->pp_enabled = true; - break; case CHIP_CARRIZO: case CHIP_STONEY: - adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; + adev->pp_enabled = true; + if (amdgpu_create_pp_handle(adev)) + return -EINVAL; + amd_pp->ip_funcs = &pp_ip_funcs; + amd_pp->pp_funcs = &pp_dpm_funcs; break; /* These chips don't have powerplay implemenations */ +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: + amd_pp->ip_funcs = &si_dpm_ip_funcs; + break; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: + amd_pp->ip_funcs = &ci_dpm_ip_funcs; + break; case CHIP_KABINI: case CHIP_MULLINS: case CHIP_KAVERI: + amd_pp->ip_funcs = &kv_dpm_ip_funcs; + break; +#endif default: - adev->pp_enabled = false; + ret = -EINVAL; break; } - ret = amdgpu_powerplay_init(adev); - if (ret) - return ret; - if (adev->powerplay.ip_funcs->early_init) ret = adev->powerplay.ip_funcs->early_init( adev->powerplay.pp_handle); + + if (ret == PP_DPM_DISABLED) { + adev->pm.dpm_enabled = false; + return 0; + } return ret; } @@ -184,6 +170,11 @@ static int amdgpu_pp_hw_init(void *handle) ret = adev->powerplay.ip_funcs->hw_init( adev->powerplay.pp_handle); + if (ret == PP_DPM_DISABLED) { + adev->pm.dpm_enabled = false; + return 0; + } + if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev)) adev->pm.dpm_enabled = true; @@ -209,14 +200,14 @@ static void amdgpu_pp_late_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pp_enabled) { - amdgpu_pm_sysfs_fini(adev); - amd_powerplay_fini(adev->powerplay.pp_handle); - } - if (adev->powerplay.ip_funcs->late_fini) adev->powerplay.ip_funcs->late_fini( adev->powerplay.pp_handle); + + if (adev->pp_enabled && adev->pm.dpm_enabled) + amdgpu_pm_sysfs_fini(adev); + + amd_powerplay_destroy(adev->powerplay.pp_handle); } static int amdgpu_pp_suspend(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index a47628395914..7c842b7f1004 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -207,6 +207,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; + /* always set cond_exec_polling to CONTINUE */ + *ring->cond_exe_cpu_addr = 1; r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { @@ -307,7 +309,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, while (size) { if (*pos >= (ring->ring_size + 12)) return result; - + value = ring->ring[(*pos - 12)/4]; r = put_user(value, (uint32_t*)buf); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 574f0b79c690..2345b39878c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -135,6 +135,8 @@ struct amdgpu_ring_funcs { void (*end_use)(struct amdgpu_ring *ring); void (*emit_switch_buffer) (struct amdgpu_ring *ring); void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); + void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); + void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); }; struct amdgpu_ring { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index bb964a8ff938..a18ae1e97860 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -24,7 +24,7 @@ TRACE_EVENT(amdgpu_mm_rreg, __entry->reg = reg; __entry->value = value; ), - TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + TP_printk("0x%04lx, 0x%08lx, 0x%08lx", (unsigned long)__entry->did, (unsigned long)__entry->reg, (unsigned long)__entry->value) @@ -43,7 +43,7 @@ TRACE_EVENT(amdgpu_mm_wreg, __entry->reg = reg; __entry->value = value; ), - TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + TP_printk("0x%04lx, 0x%08lx, 0x%08lx", (unsigned long)__entry->did, (unsigned long)__entry->reg, (unsigned long)__entry->value) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c695b6c55361..1154b0a8881d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -466,10 +466,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, adev = amdgpu_ttm_adev(bo->bdev); - /* remember the eviction */ - if (evict) - atomic64_inc(&adev->num_evictions); - if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { amdgpu_move_null(bo, new_mem); return 0; @@ -552,6 +548,8 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ mem->bus.addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); + if (!mem->bus.addr) + return -ENOMEM; /* * Alpha: Use just the bus offset plus @@ -1052,56 +1050,6 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, return flags; } -static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - unsigned i, j; - - for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { - struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; - - for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) - if (&tbo->lru == lru->lru[j]) - lru->lru[j] = tbo->lru.prev; - - if (&tbo->swap == lru->swap_lru) - lru->swap_lru = tbo->swap.prev; - } -} - -static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - unsigned log2_size = min(ilog2(tbo->num_pages), - AMDGPU_TTM_LRU_SIZE - 1); - - return &adev->mman.log2_size[log2_size]; -} - -static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) -{ - struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); - struct list_head *res = lru->lru[tbo->mem.mem_type]; - - lru->lru[tbo->mem.mem_type] = &tbo->lru; - while ((++lru)->lru[tbo->mem.mem_type] == res) - lru->lru[tbo->mem.mem_type] = &tbo->lru; - - return res; -} - -static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) -{ - struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); - struct list_head *res = lru->swap_lru; - - lru->swap_lru = &tbo->swap; - while ((++lru)->swap_lru == res) - lru->swap_lru = &tbo->swap; - - return res; -} - static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { @@ -1140,14 +1088,10 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, - .lru_removal = &amdgpu_ttm_lru_removal, - .lru_tail = &amdgpu_ttm_lru_tail, - .swap_lru_tail = &amdgpu_ttm_swap_lru_tail, }; int amdgpu_ttm_init(struct amdgpu_device *adev) { - unsigned i, j; int r; r = amdgpu_ttm_global_init(adev); @@ -1165,19 +1109,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_ERROR("failed initializing buffer object driver(%d).\n", r); return r; } - - for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { - struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; - - for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) - lru->lru[j] = &adev->mman.bdev.man[j].lru; - lru->swap_lru = &adev->mman.bdev.glob->swap_lru; - } - - for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) - adev->mman.guard.lru[j] = NULL; - adev->mman.guard.swap_lru = NULL; - adev->mman.initialized = true; r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->mc.real_vram_size >> PAGE_SHIFT); @@ -1365,7 +1296,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, WARN_ON(job->ibs[0].length_dw > num_dw); if (direct_submit) { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, - NULL, NULL, fence); + NULL, fence); job->fence = dma_fence_get(*fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 98ee384f0fca..6bdede8ff12b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -34,13 +34,6 @@ #define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1) #define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2) -#define AMDGPU_TTM_LRU_SIZE 20 - -struct amdgpu_mman_lru { - struct list_head *lru[TTM_NUM_MEM_TYPES]; - struct list_head *swap_lru; -}; - struct amdgpu_mman { struct ttm_bo_global_ref bo_global_ref; struct drm_global_reference mem_global_ref; @@ -58,11 +51,6 @@ struct amdgpu_mman { struct amdgpu_ring *buffer_funcs_ring; /* Scheduler entity for buffer moves */ struct amd_sched_entity entity; - - /* custom LRU management */ - struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; - /* guard for log2_size array, don't add anything in between */ - struct amdgpu_mman_lru guard; }; extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index a81dfaeeb8c0..6f62ac473064 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -65,6 +65,7 @@ #define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" +#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); +MODULE_FIRMWARE(FIRMWARE_POLARIS12); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_POLARIS11: fw_name = FIRMWARE_POLARIS11; break; + case CHIP_POLARIS12: + fw_name = FIRMWARE_POLARIS12; + break; default: return -EINVAL; } @@ -971,7 +976,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) goto err_free; @@ -1173,3 +1178,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: return r; } + +/** + * amdgpu_uvd_used_handles - returns used UVD handles + * + * @adev: amdgpu_device pointer + * + * Returns the number of UVD handles in use + */ +uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) +{ + unsigned i; + uint32_t used_handles = 0; + + for (i = 0; i < adev->uvd.max_handles; ++i) { + /* + * Handles can be freed in any order, and not + * necessarily linear. So we need to count + * all non-zero handles. + */ + if (atomic_read(&adev->uvd.handles[i])) + used_handles++; + } + + return used_handles; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 6249ba1bde2a..c10682baccae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -38,5 +38,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring); int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout); +uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 69b66b9e7f57..79bc9c7aad45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -52,6 +52,7 @@ #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" +#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); +MODULE_FIRMWARE(FIRMWARE_POLARIS12); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_POLARIS11: fw_name = FIRMWARE_POLARIS11; break; + case CHIP_POLARIS12: + fw_name = FIRMWARE_POLARIS12; + break; default: return -EINVAL; @@ -450,7 +455,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) goto err; @@ -513,7 +518,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c new file mode 100644 index 000000000000..3fd951c71d1b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -0,0 +1,222 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" + +int amdgpu_allocate_static_csa(struct amdgpu_device *adev) +{ + int r; + void *ptr; + + r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, + &adev->virt.csa_vmid0_addr, &ptr); + if (r) + return r; + + memset(ptr, 0, AMDGPU_CSA_SIZE); + return 0; +} + +/* + * amdgpu_map_static_csa should be called during amdgpu_vm_init + * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" + * to this VM, and each command submission of GFX should use this virtual + * address within META_DATA init package to support SRIOV gfx preemption. + */ + +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + int r; + struct amdgpu_bo_va *bo_va; + struct ww_acquire_ctx ticket; + struct list_head list; + struct amdgpu_bo_list_entry pd; + struct ttm_validate_buffer csa_tv; + + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&csa_tv.head); + csa_tv.bo = &adev->virt.csa_obj->tbo; + csa_tv.shared = true; + + list_add(&csa_tv.head, &list); + amdgpu_vm_get_pd_bo(vm, &list, &pd); + + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); + if (r) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + return r; + } + + bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); + if (!bo_va) { + ttm_eu_backoff_reservation(&ticket, &list); + DRM_ERROR("failed to create bo_va for static CSA\n"); + return -ENOMEM; + } + + r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); + + if (r) { + DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); + amdgpu_vm_bo_rmv(adev, bo_va); + ttm_eu_backoff_reservation(&ticket, &list); + kfree(bo_va); + return r; + } + + vm->csa_bo_va = bo_va; + ttm_eu_backoff_reservation(&ticket, &list); + return 0; +} + +void amdgpu_virt_init_setting(struct amdgpu_device *adev) +{ + /* enable virtual display */ + adev->mode_info.num_crtc = 1; + adev->enable_virtual_display = true; + + mutex_init(&adev->virt.lock); +} + +uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +{ + signed long r; + uint32_t val; + struct dma_fence *f; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + mutex_lock(&adev->virt.lock); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_hdp_flush(ring); + amdgpu_ring_emit_rreg(ring, reg); + amdgpu_ring_emit_hdp_invalidate(ring); + amdgpu_fence_emit(ring, &f); + amdgpu_ring_commit(ring); + mutex_unlock(&adev->virt.lock); + + r = dma_fence_wait(f, false); + if (r) + DRM_ERROR("wait for kiq fence error: %ld.\n", r); + dma_fence_put(f); + + val = adev->wb.wb[adev->virt.reg_val_offs]; + + return val; +} + +void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ + signed long r; + struct dma_fence *f; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_wreg); + + mutex_lock(&adev->virt.lock); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_hdp_flush(ring); + amdgpu_ring_emit_wreg(ring, reg, v); + amdgpu_ring_emit_hdp_invalidate(ring); + amdgpu_fence_emit(ring, &f); + amdgpu_ring_commit(ring); + mutex_unlock(&adev->virt.lock); + + r = dma_fence_wait(f, false); + if (r) + DRM_ERROR("wait for kiq fence error: %ld.\n", r); + dma_fence_put(f); +} + +/** + * amdgpu_virt_request_full_gpu() - request full gpu access + * @amdgpu: amdgpu device. + * @init: is driver init time. + * When start to init/fini driver, first need to request full gpu access. + * Return: Zero if request success, otherwise will return error. + */ +int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) +{ + struct amdgpu_virt *virt = &adev->virt; + int r; + + if (virt->ops && virt->ops->req_full_gpu) { + r = virt->ops->req_full_gpu(adev, init); + if (r) + return r; + + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + } + + return 0; +} + +/** + * amdgpu_virt_release_full_gpu() - release full gpu access + * @amdgpu: amdgpu device. + * @init: is driver init time. + * When finishing driver init/fini, need to release full gpu access. + * Return: Zero if release success, otherwise will returen error. + */ +int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) +{ + struct amdgpu_virt *virt = &adev->virt; + int r; + + if (virt->ops && virt->ops->rel_full_gpu) { + r = virt->ops->rel_full_gpu(adev, init); + if (r) + return r; + + adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; + } + return 0; +} + +/** + * amdgpu_virt_reset_gpu() - reset gpu + * @amdgpu: amdgpu device. + * Send reset command to GPU hypervisor to reset GPU that VM is using + * Return: Zero if reset success, otherwise will return error. + */ +int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) +{ + struct amdgpu_virt *virt = &adev->virt; + int r; + + if (virt->ops && virt->ops->reset_gpu) { + r = virt->ops->reset_gpu(adev); + if (r) + return r; + + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 2c37a374917f..675e12c42532 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -28,22 +28,48 @@ #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ +#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ + +/** + * struct amdgpu_virt_ops - amdgpu device virt operations + */ +struct amdgpu_virt_ops { + int (*req_full_gpu)(struct amdgpu_device *adev, bool init); + int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); + int (*reset_gpu)(struct amdgpu_device *adev); +}; + /* GPU virtualization */ -struct amdgpu_virtualization { - uint32_t virtual_caps; +struct amdgpu_virt { + uint32_t caps; + struct amdgpu_bo *csa_obj; + uint64_t csa_vmid0_addr; + bool chained_ib_support; + uint32_t reg_val_offs; + struct mutex lock; + struct amdgpu_irq_src ack_irq; + struct amdgpu_irq_src rcv_irq; + struct delayed_work flr_work; + const struct amdgpu_virt_ops *ops; }; +#define AMDGPU_CSA_SIZE (8 * 1024) +#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE) + #define amdgpu_sriov_enabled(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) +((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) #define amdgpu_sriov_vf(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF) +((adev)->virt.caps & AMDGPU_SRIOV_CAPS_IS_VF) #define amdgpu_sriov_bios(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) +((adev)->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS) + +#define amdgpu_sriov_runtime(adev) \ +((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) #define amdgpu_passthrough(adev) \ -((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE) +((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) static inline bool is_virtual_machine(void) { @@ -54,4 +80,14 @@ static inline bool is_virtual_machine(void) #endif } -#endif
\ No newline at end of file +struct amdgpu_vm; +int amdgpu_allocate_static_csa(struct amdgpu_device *adev); +int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm); +void amdgpu_virt_init_setting(struct amdgpu_device *adev); +uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); +void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); +int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); +int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); +int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1dda9321bd5a..bd0d33125c18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1293,7 +1293,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t saddr, uint64_t offset, - uint64_t size, uint32_t flags) + uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->vm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index adbc2f5e5c7f..18c72c0b478d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -111,6 +111,8 @@ struct amdgpu_vm { /* client id */ u64 client_id; + /* each VM will map on CSA */ + struct amdgpu_bo_va *csa_bo_va; }; struct amdgpu_vm_id { @@ -195,7 +197,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr, uint64_t offset, - uint64_t size, uint32_t flags); + uint64_t size, uint64_t flags); int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr); diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index c32eca26155c..2af26d2da127 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -181,9 +181,6 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode if (!amdgpu_encoder->enc_priv) return; - if (!adev->is_atom_bios) - return; - if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; @@ -236,9 +233,6 @@ amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder) if (!amdgpu_encoder->enc_priv) return; - if (!adev->is_atom_bios) - return; - if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index bda9e3de191e..9498e78b90d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -889,7 +889,16 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) pi->uvd_power_gated = gate; - ci_update_uvd_dpm(adev, gate); + if (gate) { + /* stop the UVD block */ + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + ci_update_uvd_dpm(adev, gate); + } else { + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); + ci_update_uvd_dpm(adev, gate); + } } static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) @@ -4336,13 +4345,13 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev, static int ci_dpm_force_performance_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) + enum amd_dpm_forced_level level) { struct ci_power_info *pi = ci_get_pi(adev); u32 tmp, levels, i; int ret; - if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { + if (level == AMD_DPM_FORCED_LEVEL_HIGH) { if ((!pi->pcie_dpm_key_disabled) && pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { levels = 0; @@ -4403,7 +4412,7 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev, } } } - } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { + } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { if ((!pi->sclk_dpm_key_disabled) && pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { levels = ci_get_lowest_enabled_level(adev, @@ -4452,7 +4461,7 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev, udelay(1); } } - } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { + } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { if (!pi->pcie_dpm_key_disabled) { PPSMC_Result smc_result; @@ -6262,7 +6271,7 @@ static int ci_dpm_sw_init(void *handle) /* default to balanced state */ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; adev->pm.default_sclk = adev->clock.default_sclk; adev->pm.default_mclk = adev->clock.default_mclk; adev->pm.current_sclk = adev->clock.default_sclk; @@ -6571,8 +6580,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev, { struct ci_power_info *pi = ci_get_pi(adev); - if (adev->pm.dpm.forced_level - != AMDGPU_DPM_FORCED_LEVEL_MANUAL) + if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO | + AMD_DPM_FORCED_LEVEL_LOW | + AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; switch (type) { @@ -6739,12 +6749,3 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; } - -const struct amdgpu_ip_block_version ci_dpm_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 7, - .minor = 0, - .rev = 0, - .funcs = &ci_dpm_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 302df85893ab..7da688b0d27d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1627,14 +1627,13 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev) static void cik_detect_hw_virtualization(struct amdgpu_device *adev) { if (is_virtual_machine()) /* passthrough mode */ - adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, .read_bios_from_rom = &cik_read_bios_from_rom, - .detect_hw_virtualization = cik_detect_hw_virtualization, .read_register = &cik_read_register, .reset = &cik_asic_reset, .set_vga_state = &cik_vga_set_state, @@ -1890,6 +1889,8 @@ static const struct amdgpu_ip_block_version cik_common_ip_block = int cik_set_ip_blocks(struct amdgpu_device *adev) { + cik_detect_hw_virtualization(adev); + switch (adev->asic_type) { case CHIP_BONAIRE: amdgpu_ip_block_add(adev, &cik_common_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 4c34dbc7a254..810bba533975 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -651,7 +651,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h index 66e39cdb5cb0..66e39cdb5cb0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/si/clearstate_si.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c deleted file mode 100644 index ba2b66be9022..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ /dev/null @@ -1,2320 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include <linux/firmware.h> -#include <linux/seq_file.h> -#include "drmP.h" -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_atombios.h" -#include "vid.h" -#include "vi_dpm.h" -#include "amdgpu_dpm.h" -#include "cz_dpm.h" -#include "cz_ppsmc.h" -#include "atom.h" - -#include "smu/smu_8_0_d.h" -#include "smu/smu_8_0_sh_mask.h" -#include "gca/gfx_8_0_d.h" -#include "gca/gfx_8_0_sh_mask.h" -#include "gmc/gmc_8_1_d.h" -#include "bif/bif_5_1_d.h" -#include "gfx_v8_0.h" - -static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); -static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); -static void cz_dpm_fini(struct amdgpu_device *adev); - -static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) -{ - struct cz_ps *ps = rps->ps_priv; - - return ps; -} - -static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev, - uint16_t voltage) -{ - uint16_t tmp = 6200 - voltage * 25; - - return tmp; -} - -static void cz_construct_max_power_limits_table(struct amdgpu_device *adev, - struct amdgpu_clock_and_voltage_limits *table) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (dep_table->count > 0) { - table->sclk = dep_table->entries[dep_table->count - 1].clk; - table->vddc = cz_convert_8bit_index_to_voltage(adev, - dep_table->entries[dep_table->count - 1].v); - } - - table->mclk = pi->sys_info.nbp_memory_clock[0]; - -} - -union igp_info { - struct _ATOM_INTEGRATED_SYSTEM_INFO info; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; -}; - -static int cz_parse_sys_info_table(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); - union igp_info *igp_info; - u8 frev, crev; - u16 data_offset; - int i = 0; - - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - igp_info = (union igp_info *)(mode_info->atom_context->bios + - data_offset); - - if (crev != 9) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); - return -EINVAL; - } - pi->sys_info.bootup_sclk = - le32_to_cpu(igp_info->info_9.ulBootUpEngineClock); - pi->sys_info.bootup_uma_clk = - le32_to_cpu(igp_info->info_9.ulBootUpUMAClock); - pi->sys_info.dentist_vco_freq = - le32_to_cpu(igp_info->info_9.ulDentistVCOFreq); - pi->sys_info.bootup_nb_voltage_index = - le16_to_cpu(igp_info->info_9.usBootUpNBVoltage); - - if (igp_info->info_9.ucHtcTmpLmt == 0) - pi->sys_info.htc_tmp_lmt = 203; - else - pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt; - - if (igp_info->info_9.ucHtcHystLmt == 0) - pi->sys_info.htc_hyst_lmt = 5; - else - pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt; - - if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); - return -EINVAL; - } - - if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) && - pi->enable_nb_ps_policy) - pi->sys_info.nb_dpm_enable = true; - else - pi->sys_info.nb_dpm_enable = false; - - for (i = 0; i < CZ_NUM_NBPSTATES; i++) { - if (i < CZ_NUM_NBPMEMORY_CLOCK) - pi->sys_info.nbp_memory_clock[i] = - le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]); - pi->sys_info.nbp_n_clock[i] = - le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]); - } - - for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++) - pi->sys_info.display_clock[i] = - le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK); - - for (i = 0; i < CZ_NUM_NBPSTATES; i++) - pi->sys_info.nbp_voltage_index[i] = - le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]); - - if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) - pi->caps_enable_dfs_bypass = true; - - pi->sys_info.uma_channel_number = - igp_info->info_9.ucUMAChannelNumber; - - cz_construct_max_power_limits_table(adev, - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); - } - - return 0; -} - -static void cz_patch_voltage_values(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *acp_table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - if (uvd_table->count) { - for (i = 0; i < uvd_table->count; i++) - uvd_table->entries[i].v = - cz_convert_8bit_index_to_voltage(adev, - uvd_table->entries[i].v); - } - - if (vce_table->count) { - for (i = 0; i < vce_table->count; i++) - vce_table->entries[i].v = - cz_convert_8bit_index_to_voltage(adev, - vce_table->entries[i].v); - } - - if (acp_table->count) { - for (i = 0; i < acp_table->count; i++) - acp_table->entries[i].v = - cz_convert_8bit_index_to_voltage(adev, - acp_table->entries[i].v); - } - -} - -static void cz_construct_boot_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->boot_pl.sclk = pi->sys_info.bootup_sclk; - pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; - pi->boot_pl.ds_divider_index = 0; - pi->boot_pl.ss_divider_index = 0; - pi->boot_pl.allow_gnb_slow = 1; - pi->boot_pl.force_nbp_state = 0; - pi->boot_pl.display_wm = 0; - pi->boot_pl.vce_wm = 0; - -} - -static void cz_patch_boot_state(struct amdgpu_device *adev, - struct cz_ps *ps) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - ps->num_levels = 1; - ps->levels[0] = pi->boot_pl; -} - -union pplib_clock_info { - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; - struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo; -}; - -static void cz_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = cz_get_ps(rps); - struct cz_pl *pl = &ps->levels[index]; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - pl->sclk = table->entries[clock_info->carrizo.index].clk; - pl->vddc_index = table->entries[clock_info->carrizo.index].v; - - ps->num_levels = index + 1; - - if (pi->caps_sclk_ds) { - pl->ds_divider_index = 5; - pl->ss_divider_index = 5; - } - -} - -static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - struct cz_ps *ps = cz_get_ps(rps); - - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - adev->pm.dpm.boot_ps = rps; - cz_patch_boot_state(adev, ps); - } - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; - -} - -union power_info { - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static int cz_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct cz_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * - state_array->ucNumEntries, GFP_KERNEL); - - if (!adev->pm.dpm.ps) - return -ENOMEM; - - power_state_offset = (u8 *)state_array->states; - adev->pm.dpm.platform_caps = - le32_to_cpu(power_info->pplib.ulPlatformCaps); - adev->pm.dpm.backbias_response_time = - le16_to_cpu(power_info->pplib.usBackbiasTime); - adev->pm.dpm.voltage_response_time = - le16_to_cpu(power_info->pplib.usVoltageTime); - - for (i = 0; i < state_array->ucNumEntries; i++) { - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - - ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL); - if (ps == NULL) { - for (j = 0; j < i; j++) - kfree(adev->pm.dpm.ps[j].ps_priv); - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - - adev->pm.dpm.ps[i].ps_priv = ps; - k = 0; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= CZ_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * - clock_info_array->ucEntrySize]; - cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i], - k, clock_info); - k++; - } - cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - return 0; -} - -static int cz_process_firmware_header(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - u32 tmp; - int ret; - - ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, - DpmTable), - &tmp, pi->sram_end); - - if (ret == 0) - pi->dpm_table_start = tmp; - - return ret; -} - -static int cz_dpm_init(struct amdgpu_device *adev) -{ - struct cz_power_info *pi; - int ret, i; - - pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL); - if (NULL == pi) - return -ENOMEM; - - adev->pm.dpm.priv = pi; - - ret = amdgpu_get_platform_caps(adev); - if (ret) - goto err; - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) - goto err; - - pi->sram_end = SMC_RAM_END; - - /* set up DPM defaults */ - for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) - pi->active_target[i] = CZ_AT_DFLT; - - pi->mgcg_cgtt_local0 = 0x0; - pi->mgcg_cgtt_local1 = 0x0; - pi->clock_slow_down_step = 25000; - pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = false; - pi->caps_power_containment = true; - pi->caps_cac = true; - pi->didt_enabled = false; - if (pi->didt_enabled) { - pi->caps_sq_ramping = true; - pi->caps_db_ramping = true; - pi->caps_td_ramping = true; - pi->caps_tcp_ramping = true; - } - if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) - pi->caps_sclk_ds = true; - else - pi->caps_sclk_ds = false; - - pi->voting_clients = 0x00c00033; - pi->auto_thermal_throttling_enabled = true; - pi->bapm_enabled = false; - pi->disable_nb_ps3_in_battery = false; - pi->voltage_drop_threshold = 0; - pi->caps_sclk_throttle_low_notification = false; - pi->gfx_pg_threshold = 500; - pi->caps_fps = true; - /* uvd */ - pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; - pi->caps_uvd_dpm = true; - /* vce */ - pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; - pi->caps_vce_dpm = true; - /* acp */ - pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; - pi->caps_acp_dpm = true; - - pi->caps_stable_power_state = false; - pi->nb_dpm_enabled_by_driver = true; - pi->nb_dpm_enabled = false; - pi->caps_voltage_island = false; - /* flags which indicate need to upload pptable */ - pi->need_pptable_upload = true; - - ret = cz_parse_sys_info_table(adev); - if (ret) - goto err; - - cz_patch_voltage_values(adev); - cz_construct_boot_state(adev); - - ret = cz_parse_power_table(adev); - if (ret) - goto err; - - ret = cz_process_firmware_header(adev); - if (ret) - goto err; - - pi->dpm_enabled = true; - pi->uvd_dynamic_pg = false; - - return 0; -err: - cz_dpm_fini(adev); - return ret; -} - -static void cz_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) - kfree(adev->pm.dpm.ps[i].ps_priv); - - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - amdgpu_free_extended_power_table(adev); -} - -#define ixSMUSVI_NB_CURRENTVID 0xD8230044 -#define CURRENT_NB_VID_MASK 0xff000000 -#define CURRENT_NB_VID__SHIFT 24 -#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 -#define CURRENT_GFX_VID_MASK 0xff000000 -#define CURRENT_GFX_VID__SHIFT 24 - -static void -cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, - struct seq_file *m) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), - TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); - u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); - u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), - TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); - u32 sclk, vclk, dclk, ecclk, tmp; - u16 vddnb, vddgfx; - - if (sclk_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); - } else { - sclk = table->entries[sclk_index].clk; - seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); - } - - tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & - CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; - vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & - CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; - vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); - - seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); - if (!pi->uvd_power_gated) { - if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); - } else { - vclk = uvd_table->entries[uvd_index].vclk; - dclk = uvd_table->entries[uvd_index].dclk; - seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); - } - } - - seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); - if (!pi->vce_power_gated) { - if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { - seq_printf(m, "invalid vce dpm level %d\n", vce_index); - } else { - ecclk = vce_table->entries[vce_index].ecclk; - seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); - } - } -} - -static void cz_dpm_print_power_state(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - int i; - struct cz_ps *ps = cz_get_ps(rps); - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->num_levels; i++) { - struct cz_pl *pl = &ps->levels[i]; - - DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - cz_convert_8bit_index_to_voltage(adev, pl->vddc_index)); - } - - amdgpu_dpm_print_ps_status(adev, rps); -} - -static void cz_dpm_set_funcs(struct amdgpu_device *adev); - -static int cz_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - cz_dpm_set_funcs(adev); - - return 0; -} - - -static int cz_dpm_late_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (amdgpu_dpm) { - int ret; - /* init the sysfs and debugfs files late */ - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - return ret; - - /* powerdown unused blocks for now */ - cz_dpm_powergate_uvd(adev, true); - cz_dpm_powergate_vce(adev, true); - } - - return 0; -} - -static int cz_dpm_sw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; - /* fix me to add thermal support TODO */ - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - if (amdgpu_dpm == 0) - return 0; - - mutex_lock(&adev->pm.mutex); - ret = cz_dpm_init(adev); - if (ret) - goto dpm_init_failed; - - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_init_failed: - cz_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - - return ret; -} - -static int cz_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - amdgpu_pm_sysfs_fini(adev); - cz_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static void cz_reset_ap_mask(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->active_process_mask = 0; -} - -static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev, - void **table) -{ - return cz_smu_download_pptable(adev, table); -} - -static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct SMU8_Fusion_ClkTable *clock_table; - struct atom_clock_dividers dividers; - void *table = NULL; - uint8_t i = 0; - int ret = 0; - - struct amdgpu_clock_voltage_dependency_table *vddc_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - struct amdgpu_clock_voltage_dependency_table *vddgfx_table = - &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *acp_table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - if (!pi->need_pptable_upload) - return 0; - - ret = cz_dpm_download_pptable_from_smu(adev, &table); - if (ret) { - DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n"); - return -EINVAL; - } - - clock_table = (struct SMU8_Fusion_ClkTable *)table; - /* patch clock table */ - if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS || - acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n"); - return -EINVAL; - } - - for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { - - /* vddc sclk */ - clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = - (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; - clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = - (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - /* vddgfx sclk */ - clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = - (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0; - - /* acp breakdown */ - clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = - (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; - clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = - (i < acp_table->count) ? acp_table->entries[i].clk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - /* uvd breakdown */ - clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = - (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; - clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = - (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = - (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; - clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = - (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - - /* vce breakdown */ - clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = - (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; - clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = - (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, - false, ÷rs); - if (ret) - return ret; - clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = - (uint8_t)dividers.post_divider; - } - - /* its time to upload to SMU */ - ret = cz_smu_upload_pptable(adev); - if (ret) { - DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n"); - return ret; - } - - return 0; -} - -static void cz_init_sclk_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->sclk_dpm.soft_min_clk = 0; - pi->sclk_dpm.hard_min_clk = 0; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].clk; - } else { - DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].clk; - } - - pi->sclk_dpm.soft_max_clk = clock; - pi->sclk_dpm.hard_max_clk = clock; - -} - -static void cz_init_uvd_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->uvd_dpm.soft_min_clk = 0; - pi->uvd_dpm.hard_min_clk = 0; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].vclk; - } else { - DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].vclk; - } - - pi->uvd_dpm.soft_max_clk = clock; - pi->uvd_dpm.hard_max_clk = clock; - -} - -static void cz_init_vce_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->vce_dpm.soft_min_clk = table->entries[0].ecclk; - pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].ecclk; - } else { - /* future BIOS would fix this error */ - DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].ecclk; - } - - pi->vce_dpm.soft_max_clk = clock; - pi->vce_dpm.hard_max_clk = clock; - -} - -static void cz_init_acp_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - uint32_t clock = 0, level; - - if (!table || !table->count) { - DRM_ERROR("Invalid Voltage Dependency table.\n"); - return; - } - - pi->acp_dpm.soft_min_clk = 0; - pi->acp_dpm.hard_min_clk = 0; - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel); - level = cz_get_argument(adev); - if (level < table->count) { - clock = table->entries[level].clk; - } else { - DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n"); - clock = table->entries[table->count - 1].clk; - } - - pi->acp_dpm.soft_max_clk = clock; - pi->acp_dpm.hard_max_clk = clock; - -} - -static void cz_init_pg_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->uvd_power_gated = false; - pi->vce_power_gated = false; - pi->acp_power_gated = false; - -} - -static void cz_init_sclk_threshold(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - pi->low_sclk_interrupt_threshold = 0; -} - -static void cz_dpm_setup_asic(struct amdgpu_device *adev) -{ - cz_reset_ap_mask(adev); - cz_dpm_upload_pptable_to_smu(adev); - cz_init_sclk_limit(adev); - cz_init_uvd_limit(adev); - cz_init_vce_limit(adev); - cz_init_acp_limit(adev); - cz_init_pg_state(adev); - cz_init_sclk_threshold(adev); - -} - -static bool cz_check_smu_feature(struct amdgpu_device *adev, - uint32_t feature) -{ - uint32_t smu_feature = 0; - int ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_GetFeatureStatus, 0); - if (ret) { - DRM_ERROR("Failed to get SMU features from SMC.\n"); - return false; - } else { - smu_feature = cz_get_argument(adev); - if (feature & smu_feature) - return true; - } - - return false; -} - -static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev) -{ - if (cz_check_smu_feature(adev, - SMU_EnabledFeatureScoreboard_SclkDpmOn)) - return true; - - return false; -} - -static void cz_program_voting_clients(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); -} - -static void cz_clear_voting_clients(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); -} - -static int cz_start_dpm(struct amdgpu_device *adev) -{ - int ret = 0; - - if (amdgpu_dpm) { - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK); - if (ret) { - DRM_ERROR("SMU feature: SCLK_DPM enable failed\n"); - return -EINVAL; - } - } - - return 0; -} - -static int cz_stop_dpm(struct amdgpu_device *adev) -{ - int ret = 0; - - if (amdgpu_dpm && adev->pm.dpm_enabled) { - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK); - if (ret) { - DRM_ERROR("SMU feature: SCLK_DPM disable failed\n"); - return -EINVAL; - } - } - - return 0; -} - -static uint32_t cz_get_sclk_level(struct amdgpu_device *adev, - uint32_t clock, uint16_t msg) -{ - int i = 0; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - switch (msg) { - case PPSMC_MSG_SetSclkSoftMin: - case PPSMC_MSG_SetSclkHardMin: - for (i = 0; i < table->count; i++) - if (clock <= table->entries[i].clk) - break; - if (i == table->count) - i = table->count - 1; - break; - case PPSMC_MSG_SetSclkSoftMax: - case PPSMC_MSG_SetSclkHardMax: - for (i = table->count - 1; i >= 0; i--) - if (clock >= table->entries[i].clk) - break; - if (i < 0) - i = 0; - break; - default: - break; - } - - return i; -} - -static uint32_t cz_get_eclk_level(struct amdgpu_device *adev, - uint32_t clock, uint16_t msg) -{ - int i = 0; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - if (table->count == 0) - return 0; - - switch (msg) { - case PPSMC_MSG_SetEclkSoftMin: - case PPSMC_MSG_SetEclkHardMin: - for (i = 0; i < table->count-1; i++) - if (clock <= table->entries[i].ecclk) - break; - break; - case PPSMC_MSG_SetEclkSoftMax: - case PPSMC_MSG_SetEclkHardMax: - for (i = table->count - 1; i > 0; i--) - if (clock >= table->entries[i].ecclk) - break; - break; - default: - break; - } - - return i; -} - -static uint32_t cz_get_uvd_level(struct amdgpu_device *adev, - uint32_t clock, uint16_t msg) -{ - int i = 0; - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - - switch (msg) { - case PPSMC_MSG_SetUvdSoftMin: - case PPSMC_MSG_SetUvdHardMin: - for (i = 0; i < table->count; i++) - if (clock <= table->entries[i].vclk) - break; - if (i == table->count) - i = table->count - 1; - break; - case PPSMC_MSG_SetUvdSoftMax: - case PPSMC_MSG_SetUvdHardMax: - for (i = table->count - 1; i >= 0; i--) - if (clock >= table->entries[i].vclk) - break; - if (i < 0) - i = 0; - break; - default: - break; - } - - return i; -} - -static int cz_program_bootup_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - uint32_t soft_min_clk = 0; - uint32_t soft_max_clk = 0; - int ret = 0; - - pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk; - pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk; - - soft_min_clk = cz_get_sclk_level(adev, - pi->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin); - soft_max_clk = cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, soft_min_clk); - if (ret) - return -EINVAL; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, soft_max_clk); - if (ret) - return -EINVAL; - - return 0; -} - -/* TODO */ -static int cz_disable_cgpg(struct amdgpu_device *adev) -{ - return 0; -} - -/* TODO */ -static int cz_enable_cgpg(struct amdgpu_device *adev) -{ - return 0; -} - -/* TODO */ -static int cz_program_pt_config_registers(struct amdgpu_device *adev) -{ - return 0; -} - -static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - uint32_t reg = 0; - - if (pi->caps_sq_ramping) { - reg = RREG32_DIDT(ixDIDT_SQ_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_SQ_CTRL0, reg); - } - if (pi->caps_db_ramping) { - reg = RREG32_DIDT(ixDIDT_DB_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_DB_CTRL0, reg); - } - if (pi->caps_td_ramping) { - reg = RREG32_DIDT(ixDIDT_TD_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_TD_CTRL0, reg); - } - if (pi->caps_tcp_ramping) { - reg = RREG32_DIDT(ixDIDT_TCP_CTRL0); - if (enable) - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); - else - reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); - WREG32_DIDT(ixDIDT_TCP_CTRL0, reg); - } - -} - -static int cz_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret; - - if (pi->caps_sq_ramping || pi->caps_db_ramping || - pi->caps_td_ramping || pi->caps_tcp_ramping) { - if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) { - ret = cz_disable_cgpg(adev); - if (ret) { - DRM_ERROR("Pre Di/Dt disable cg/pg failed\n"); - return -EINVAL; - } - adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE; - } - - ret = cz_program_pt_config_registers(adev); - if (ret) { - DRM_ERROR("Di/Dt config failed\n"); - return -EINVAL; - } - cz_do_enable_didt(adev, enable); - - if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) { - ret = cz_enable_cgpg(adev); - if (ret) { - DRM_ERROR("Post Di/Dt enable cg/pg failed\n"); - return -EINVAL; - } - adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; - } - } - - return 0; -} - -/* TODO */ -static void cz_reset_acp_boot_level(struct amdgpu_device *adev) -{ -} - -static void cz_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = cz_get_ps(rps); - - pi->current_ps = *ps; - pi->current_rps = *rps; - pi->current_rps.ps_priv = &pi->current_ps; - adev->pm.dpm.current_ps = &pi->current_rps; - -} - -static void cz_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = cz_get_ps(rps); - - pi->requested_ps = *ps; - pi->requested_rps = *rps; - pi->requested_rps.ps_priv = &pi->requested_ps; - adev->pm.dpm.requested_ps = &pi->requested_rps; - -} - -/* PP arbiter support needed TODO */ -static void cz_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps) -{ - struct cz_ps *ps = cz_get_ps(new_rps); - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_and_voltage_limits *limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - /* 10kHz memory clock */ - uint32_t mclk = 0; - - ps->force_high = false; - ps->need_dfs_bypass = true; - pi->video_start = new_rps->dclk || new_rps->vclk || - new_rps->evclk || new_rps->ecclk; - - if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == - ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) - pi->battery_state = true; - else - pi->battery_state = false; - - if (pi->caps_stable_power_state) - mclk = limits->mclk; - - if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1]) - ps->force_high = true; - -} - -static int cz_dpm_enable(struct amdgpu_device *adev) -{ - const char *chip_name; - int ret = 0; - - /* renable will hang up SMU, so check first */ - if (cz_check_for_dpm_enabled(adev)) - return -EINVAL; - - cz_program_voting_clients(adev); - - switch (adev->asic_type) { - case CHIP_CARRIZO: - chip_name = "carrizo"; - break; - case CHIP_STONEY: - chip_name = "stoney"; - break; - default: - BUG(); - } - - - ret = cz_start_dpm(adev); - if (ret) { - DRM_ERROR("%s DPM enable failed\n", chip_name); - return -EINVAL; - } - - ret = cz_program_bootup_state(adev); - if (ret) { - DRM_ERROR("%s bootup state program failed\n", chip_name); - return -EINVAL; - } - - ret = cz_enable_didt(adev, true); - if (ret) { - DRM_ERROR("%s enable di/dt failed\n", chip_name); - return -EINVAL; - } - - cz_reset_acp_boot_level(adev); - cz_update_current_ps(adev, adev->pm.dpm.boot_ps); - - return 0; -} - -static int cz_dpm_hw_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; - - mutex_lock(&adev->pm.mutex); - - /* smu init only needs to be called at startup, not resume. - * It should be in sw_init, but requires the fw info gathered - * in sw_init from other IP modules. - */ - ret = cz_smu_init(adev); - if (ret) { - DRM_ERROR("amdgpu: smc initialization failed\n"); - mutex_unlock(&adev->pm.mutex); - return ret; - } - - /* do the actual fw loading */ - ret = cz_smu_start(adev); - if (ret) { - DRM_ERROR("amdgpu: smc start failed\n"); - mutex_unlock(&adev->pm.mutex); - return ret; - } - - if (!amdgpu_dpm) { - adev->pm.dpm_enabled = false; - mutex_unlock(&adev->pm.mutex); - return ret; - } - - /* cz dpm setup asic */ - cz_dpm_setup_asic(adev); - - /* cz dpm enable */ - ret = cz_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static int cz_dpm_disable(struct amdgpu_device *adev) -{ - int ret = 0; - - if (!cz_check_for_dpm_enabled(adev)) - return -EINVAL; - - ret = cz_enable_didt(adev, false); - if (ret) { - DRM_ERROR("disable di/dt failed\n"); - return -EINVAL; - } - - /* powerup blocks */ - cz_dpm_powergate_uvd(adev, false); - cz_dpm_powergate_vce(adev, false); - - cz_clear_voting_clients(adev); - cz_stop_dpm(adev); - cz_update_current_ps(adev, adev->pm.dpm.boot_ps); - - return 0; -} - -static int cz_dpm_hw_fini(void *handle) -{ - int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* smu fini only needs to be called at teardown, not suspend. - * It should be in sw_fini, but we put it here for symmetry - * with smu init. - */ - cz_smu_fini(adev); - - if (adev->pm.dpm_enabled) { - ret = cz_dpm_disable(adev); - - adev->pm.dpm.current_ps = - adev->pm.dpm.requested_ps = - adev->pm.dpm.boot_ps; - } - - adev->pm.dpm_enabled = false; - - mutex_unlock(&adev->pm.mutex); - - return ret; -} - -static int cz_dpm_suspend(void *handle) -{ - int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - - ret = cz_dpm_disable(adev); - - adev->pm.dpm.current_ps = - adev->pm.dpm.requested_ps = - adev->pm.dpm.boot_ps; - - mutex_unlock(&adev->pm.mutex); - } - - return ret; -} - -static int cz_dpm_resume(void *handle) -{ - int ret = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - mutex_lock(&adev->pm.mutex); - - /* do the actual fw loading */ - ret = cz_smu_start(adev); - if (ret) { - DRM_ERROR("amdgpu: smc start failed\n"); - mutex_unlock(&adev->pm.mutex); - return ret; - } - - if (!amdgpu_dpm) { - adev->pm.dpm_enabled = false; - mutex_unlock(&adev->pm.mutex); - return ret; - } - - /* cz dpm setup asic */ - cz_dpm_setup_asic(adev); - - /* cz dpm enable */ - ret = cz_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - - mutex_unlock(&adev->pm.mutex); - /* upon resume, re-compute the clocks */ - if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); - - return 0; -} - -static int cz_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int cz_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -static int cz_dpm_get_temperature(struct amdgpu_device *adev) -{ - int actual_temp = 0; - uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP); - uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP); - - if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL)) - actual_temp = 1000 * ((temp / 8) - 49); - else - actual_temp = 1000 * (temp / 8); - - return actual_temp; -} - -static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - cz_update_requested_ps(adev, new_ps); - cz_apply_state_adjust_rules(adev, &pi->requested_rps, - &pi->current_rps); - - return 0; -} - -static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_and_voltage_limits *limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - uint32_t clock, stable_ps_clock = 0; - - clock = pi->sclk_dpm.soft_min_clk; - - if (pi->caps_stable_power_state) { - stable_ps_clock = limits->sclk * 75 / 100; - if (clock < stable_ps_clock) - clock = stable_ps_clock; - } - - if (clock != pi->sclk_dpm.soft_min_clk) { - pi->sclk_dpm.soft_min_clk = clock; - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(adev, clock, - PPSMC_MSG_SetSclkSoftMin)); - } - - if (pi->caps_stable_power_state && - pi->sclk_dpm.soft_max_clk != clock) { - pi->sclk_dpm.soft_max_clk = clock; - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, clock, - PPSMC_MSG_SetSclkSoftMax)); - } else { - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); - } - - return 0; -} - -static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (pi->caps_sclk_ds) { - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetMinDeepSleepSclk, - CZ_MIN_DEEP_SLEEP_SCLK); - } - - return 0; -} - -/* ?? without dal support, is this still needed in setpowerstate list*/ -static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetWatermarkFrequency, - pi->sclk_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) -{ - int ret = 0; - struct cz_power_info *pi = cz_get_pi(adev); - - /* also depend on dal NBPStateDisableRequired */ - if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) { - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, - NB_DPM_MASK); - if (ret) { - DRM_ERROR("amdgpu: nb dpm enable failed\n"); - return ret; - } - pi->nb_dpm_enabled = true; - } - - return ret; -} - -static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, - bool enable) -{ - if (enable) - cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate); - else - cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate); - -} - -static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *ps = &pi->requested_ps; - - if (pi->sys_info.nb_dpm_enable) { - if (ps->force_high) - cz_dpm_nbdpm_lm_pstate_enable(adev, false); - else - cz_dpm_nbdpm_lm_pstate_enable(adev, true); - } - - return 0; -} - -/* with dpm enabled */ -static int cz_dpm_set_power_state(struct amdgpu_device *adev) -{ - cz_dpm_update_sclk_limit(adev); - cz_dpm_set_deep_sleep_sclk_threshold(adev); - cz_dpm_set_watermark_threshold(adev); - cz_dpm_enable_nbdpm(adev); - cz_dpm_update_low_memory_pstate(adev); - - return 0; -} - -static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_ps *ps = &pi->requested_rps; - - cz_update_current_ps(adev, ps); -} - -static int cz_dpm_force_highest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) { - pi->sclk_dpm.soft_min_clk = - pi->sclk_dpm.soft_max_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); - if (ret) - return ret; - } - - return ret; -} - -static int cz_dpm_force_lowest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) { - pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); - if (ret) - return ret; - } - - return ret; -} - -static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (!pi->max_sclk_level) { - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); - pi->max_sclk_level = cz_get_argument(adev) + 1; - } - - if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("Invalid max sclk level!\n"); - return -EINVAL; - } - - return pi->max_sclk_level; -} - -static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - uint32_t level = 0; - int ret = 0; - - pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk; - level = cz_dpm_get_max_sclk_level(adev) - 1; - if (level < dep_table->count) - pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk; - else - pi->sclk_dpm.soft_max_clk = - dep_table->entries[dep_table->count - 1].clk; - - /* get min/max sclk soft value - * notify SMU to execute */ - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMin, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_min_clk, - PPSMC_MSG_SetSclkSoftMin)); - if (ret) - return ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetSclkSoftMax, - cz_get_sclk_level(adev, - pi->sclk_dpm.soft_max_clk, - PPSMC_MSG_SetSclkSoftMax)); - if (ret) - return ret; - - DRM_DEBUG("DPM unforce state min=%d, max=%d.\n", - pi->sclk_dpm.soft_min_clk, - pi->sclk_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) { - pi->uvd_dpm.soft_min_clk = - pi->uvd_dpm.soft_max_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMin, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_min_clk, - PPSMC_MSG_SetUvdSoftMin)); - if (ret) - return ret; - } - - return ret; -} - -static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) { - pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMax, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_max_clk, - PPSMC_MSG_SetUvdSoftMax)); - if (ret) - return ret; - } - - return ret; -} - -static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (!pi->max_uvd_level) { - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); - pi->max_uvd_level = cz_get_argument(adev) + 1; - } - - if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("Invalid max uvd level!\n"); - return -EINVAL; - } - - return pi->max_uvd_level; -} - -static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - uint32_t level = 0; - int ret = 0; - - pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk; - level = cz_dpm_get_max_uvd_level(adev) - 1; - if (level < dep_table->count) - pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk; - else - pi->uvd_dpm.soft_max_clk = - dep_table->entries[dep_table->count - 1].vclk; - - /* get min/max sclk soft value - * notify SMU to execute */ - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMin, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_min_clk, - PPSMC_MSG_SetUvdSoftMin)); - if (ret) - return ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetUvdSoftMax, - cz_get_uvd_level(adev, - pi->uvd_dpm.soft_max_clk, - PPSMC_MSG_SetUvdSoftMax)); - if (ret) - return ret; - - DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n", - pi->uvd_dpm.soft_min_clk, - pi->uvd_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_vce_force_highest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) { - pi->vce_dpm.soft_min_clk = - pi->vce_dpm.soft_max_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMin, - cz_get_eclk_level(adev, - pi->vce_dpm.soft_min_clk, - PPSMC_MSG_SetEclkSoftMin)); - if (ret) - return ret; - } - - return ret; -} - -static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) { - pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk; - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMax, - cz_get_uvd_level(adev, - pi->vce_dpm.soft_max_clk, - PPSMC_MSG_SetEclkSoftMax)); - if (ret) - return ret; - } - - return ret; -} - -static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (!pi->max_vce_level) { - cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); - pi->max_vce_level = cz_get_argument(adev) + 1; - } - - if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) { - DRM_ERROR("Invalid max vce level!\n"); - return -EINVAL; - } - - return pi->max_vce_level; -} - -static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *dep_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - uint32_t level = 0; - int ret = 0; - - pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk; - level = cz_dpm_get_max_vce_level(adev) - 1; - if (level < dep_table->count) - pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk; - else - pi->vce_dpm.soft_max_clk = - dep_table->entries[dep_table->count - 1].ecclk; - - /* get min/max sclk soft value - * notify SMU to execute */ - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMin, - cz_get_eclk_level(adev, - pi->vce_dpm.soft_min_clk, - PPSMC_MSG_SetEclkSoftMin)); - if (ret) - return ret; - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkSoftMax, - cz_get_eclk_level(adev, - pi->vce_dpm.soft_max_clk, - PPSMC_MSG_SetEclkSoftMax)); - if (ret) - return ret; - - DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n", - pi->vce_dpm.soft_min_clk, - pi->vce_dpm.soft_max_clk); - - return 0; -} - -static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) -{ - int ret = 0; - - switch (level) { - case AMDGPU_DPM_FORCED_LEVEL_HIGH: - /* sclk */ - ret = cz_dpm_unforce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_force_highest(adev); - if (ret) - return ret; - - /* uvd */ - ret = cz_dpm_unforce_uvd_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_uvd_force_highest(adev); - if (ret) - return ret; - - /* vce */ - ret = cz_dpm_unforce_vce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_vce_force_highest(adev); - if (ret) - return ret; - break; - case AMDGPU_DPM_FORCED_LEVEL_LOW: - /* sclk */ - ret = cz_dpm_unforce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_force_lowest(adev); - if (ret) - return ret; - - /* uvd */ - ret = cz_dpm_unforce_uvd_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_uvd_force_lowest(adev); - if (ret) - return ret; - - /* vce */ - ret = cz_dpm_unforce_vce_dpm_levels(adev); - if (ret) - return ret; - ret = cz_dpm_vce_force_lowest(adev); - if (ret) - return ret; - break; - case AMDGPU_DPM_FORCED_LEVEL_AUTO: - /* sclk */ - ret = cz_dpm_unforce_dpm_levels(adev); - if (ret) - return ret; - - /* uvd */ - ret = cz_dpm_unforce_uvd_dpm_levels(adev); - if (ret) - return ret; - - /* vce */ - ret = cz_dpm_unforce_vce_dpm_levels(adev); - if (ret) - return ret; - break; - default: - break; - } - - adev->pm.dpm.forced_level = level; - - return ret; -} - -/* fix me, display configuration change lists here - * mostly dal related*/ -static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev) -{ -} - -static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps); - - if (low) - return requested_state->levels[0].sclk; - else - return requested_state->levels[requested_state->num_levels - 1].sclk; - -} - -static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - return pi->sys_info.bootup_uma_clk; -} - -static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (enable && pi->caps_uvd_dpm ) { - pi->dpm_flags |= DPMFlags_UVD_Enabled; - DRM_DEBUG("UVD DPM Enabled.\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK); - } else { - pi->dpm_flags &= ~DPMFlags_UVD_Enabled; - DRM_DEBUG("UVD DPM Stopped\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK); - } - - return ret; -} - -static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate) -{ - return cz_enable_uvd_dpm(adev, !gate); -} - - -static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret; - - if (pi->uvd_power_gated == gate) - return; - - pi->uvd_power_gated = gate; - - if (gate) { - if (pi->caps_uvd_pg) { - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - if (ret) { - DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n"); - return; - } - - /* shutdown the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - - if (ret) { - DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n"); - return; - } - } - cz_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) { - /* power off the UVD block */ - ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); - if (ret) { - DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n"); - return; - } - } - } else { - if (pi->caps_uvd_pg) { - /* power on the UVD block */ - if (pi->uvd_dynamic_pg) - ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); - else - ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); - - if (ret) { - DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n"); - return; - } - - /* re-init the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - - if (ret) { - DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n"); - return; - } - - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - if (ret) { - DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n"); - return; - } - } - cz_update_uvd_dpm(adev, gate); - } -} - -static int cz_enable_vce_dpm(struct amdgpu_device *adev, bool enable) -{ - struct cz_power_info *pi = cz_get_pi(adev); - int ret = 0; - - if (enable && pi->caps_vce_dpm) { - pi->dpm_flags |= DPMFlags_VCE_Enabled; - DRM_DEBUG("VCE DPM Enabled.\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_EnableAllSmuFeatures, VCE_DPM_MASK); - - } else { - pi->dpm_flags &= ~DPMFlags_VCE_Enabled; - DRM_DEBUG("VCE DPM Stopped\n"); - - ret = cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DisableAllSmuFeatures, VCE_DPM_MASK); - } - - return ret; -} - -static int cz_update_vce_dpm(struct amdgpu_device *adev) -{ - struct cz_power_info *pi = cz_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ - if (pi->caps_stable_power_state) { - pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; - } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ - /* leave it as set by user */ - /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/ - } - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(adev, - pi->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - return 0; -} - -static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) -{ - struct cz_power_info *pi = cz_get_pi(adev); - - if (pi->caps_vce_pg) { - if (pi->vce_power_gated != gate) { - if (gate) { - /* disable clockgating so we can properly shut down the block */ - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - /* shutdown the VCE block */ - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - - cz_enable_vce_dpm(adev, false); - cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); - pi->vce_power_gated = true; - } else { - cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); - pi->vce_power_gated = false; - - /* re-init the VCE block */ - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - - cz_update_vce_dpm(adev); - cz_enable_vce_dpm(adev, true); - } - } else { - if (! pi->vce_power_gated) { - cz_update_vce_dpm(adev); - } - } - } else { /*pi->caps_vce_pg*/ - pi->vce_power_gated = gate; - cz_update_vce_dpm(adev); - cz_enable_vce_dpm(adev, !gate); - } -} - -static int cz_check_state_equal(struct amdgpu_device *adev, - struct amdgpu_ps *cps, - struct amdgpu_ps *rps, - bool *equal) -{ - if (equal == NULL) - return -EINVAL; - - *equal = false; - return 0; -} - -const struct amd_ip_funcs cz_dpm_ip_funcs = { - .name = "cz_dpm", - .early_init = cz_dpm_early_init, - .late_init = cz_dpm_late_init, - .sw_init = cz_dpm_sw_init, - .sw_fini = cz_dpm_sw_fini, - .hw_init = cz_dpm_hw_init, - .hw_fini = cz_dpm_hw_fini, - .suspend = cz_dpm_suspend, - .resume = cz_dpm_resume, - .is_idle = NULL, - .wait_for_idle = NULL, - .soft_reset = NULL, - .set_clockgating_state = cz_dpm_set_clockgating_state, - .set_powergating_state = cz_dpm_set_powergating_state, -}; - -static const struct amdgpu_dpm_funcs cz_dpm_funcs = { - .get_temperature = cz_dpm_get_temperature, - .pre_set_power_state = cz_dpm_pre_set_power_state, - .set_power_state = cz_dpm_set_power_state, - .post_set_power_state = cz_dpm_post_set_power_state, - .display_configuration_changed = cz_dpm_display_configuration_changed, - .get_sclk = cz_dpm_get_sclk, - .get_mclk = cz_dpm_get_mclk, - .print_power_state = cz_dpm_print_power_state, - .debugfs_print_current_performance_level = - cz_dpm_debugfs_print_current_performance_level, - .force_performance_level = cz_dpm_force_dpm_level, - .vblank_too_short = NULL, - .powergate_uvd = cz_dpm_powergate_uvd, - .powergate_vce = cz_dpm_powergate_vce, - .check_state_equal = cz_check_state_equal, -}; - -static void cz_dpm_set_funcs(struct amdgpu_device *adev) -{ - if (NULL == adev->pm.funcs) - adev->pm.funcs = &cz_dpm_funcs; -} - -const struct amdgpu_ip_block_version cz_dpm_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 8, - .minor = 0, - .rev = 0, - .funcs = &cz_dpm_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h deleted file mode 100644 index 5df8c1faab51..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __CZ_DPM_H__ -#define __CZ_DPM_H__ - -#include "smu8_fusion.h" - -#define CZ_AT_DFLT 30 -#define CZ_NUM_NBPSTATES 4 -#define CZ_NUM_NBPMEMORY_CLOCK 2 -#define CZ_MAX_HARDWARE_POWERLEVELS 8 -#define CZ_MAX_DISPLAY_CLOCK_LEVEL 8 -#define CZ_MAX_DISPLAYPHY_IDS 10 - -#define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 - -#define SMC_RAM_END 0x40000 - -#define DPMFlags_SCLK_Enabled 0x00000001 -#define DPMFlags_UVD_Enabled 0x00000002 -#define DPMFlags_VCE_Enabled 0x00000004 -#define DPMFlags_ACP_Enabled 0x00000008 -#define DPMFlags_ForceHighestValid 0x40000000 -#define DPMFlags_Debug 0x80000000 - -/* Do not change the following, it is also defined in SMU8.h */ -#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 -#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 -#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 -#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 - -/* temporary solution to SetMinDeepSleepSclk - * should indicate by display adaptor - * 10k Hz unit*/ -#define CZ_MIN_DEEP_SLEEP_SCLK 800 - -enum cz_pt_config_reg_type { - CZ_CONFIGREG_MMR = 0, - CZ_CONFIGREG_SMC_IND, - CZ_CONFIGREG_DIDT_IND, - CZ_CONFIGREG_CACHE, - CZ_CONFIGREG_MAX -}; - -struct cz_pt_config_reg { - uint32_t offset; - uint32_t mask; - uint32_t shift; - uint32_t value; - enum cz_pt_config_reg_type type; -}; - -struct cz_dpm_entry { - uint32_t soft_min_clk; - uint32_t hard_min_clk; - uint32_t soft_max_clk; - uint32_t hard_max_clk; -}; - -struct cz_pl { - uint32_t sclk; - uint8_t vddc_index; - uint8_t ds_divider_index; - uint8_t ss_divider_index; - uint8_t allow_gnb_slow; - uint8_t force_nbp_state; - uint8_t display_wm; - uint8_t vce_wm; -}; - -struct cz_ps { - struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS]; - uint32_t num_levels; - bool need_dfs_bypass; - uint8_t dpm0_pg_nb_ps_lo; - uint8_t dpm0_pg_nb_ps_hi; - uint8_t dpmx_nb_ps_lo; - uint8_t dpmx_nb_ps_hi; - bool force_high; -}; - -struct cz_displayphy_entry { - uint8_t phy_present; - uint8_t active_lane_mapping; - uint8_t display_conf_type; - uint8_t num_active_lanes; -}; - -struct cz_displayphy_info { - bool phy_access_initialized; - struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS]; -}; - -struct cz_sys_info { - uint32_t bootup_uma_clk; - uint32_t bootup_sclk; - uint32_t dentist_vco_freq; - uint32_t nb_dpm_enable; - uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK]; - uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; - uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES]; - uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL]; - uint16_t bootup_nb_voltage_index; - uint8_t htc_tmp_lmt; - uint8_t htc_hyst_lmt; - uint32_t uma_channel_number; -}; - -struct cz_power_info { - uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS]; - struct cz_sys_info sys_info; - struct cz_pl boot_pl; - bool disable_nb_ps3_in_battery; - bool battery_state; - uint32_t lowest_valid; - uint32_t highest_valid; - uint16_t high_voltage_threshold; - /* smc offsets */ - uint32_t sram_end; - uint32_t dpm_table_start; - uint32_t soft_regs_start; - /* dpm SMU tables */ - uint8_t uvd_level_count; - uint8_t vce_level_count; - uint8_t acp_level_count; - uint32_t fps_high_threshold; - uint32_t fps_low_threshold; - /* dpm table */ - uint32_t dpm_flags; - struct cz_dpm_entry sclk_dpm; - struct cz_dpm_entry uvd_dpm; - struct cz_dpm_entry vce_dpm; - struct cz_dpm_entry acp_dpm; - - uint8_t uvd_boot_level; - uint8_t uvd_interval; - uint8_t vce_boot_level; - uint8_t vce_interval; - uint8_t acp_boot_level; - uint8_t acp_interval; - - uint8_t graphics_boot_level; - uint8_t graphics_interval; - uint8_t graphics_therm_throttle_enable; - uint8_t graphics_voltage_change_enable; - uint8_t graphics_clk_slow_enable; - uint8_t graphics_clk_slow_divider; - - uint32_t low_sclk_interrupt_threshold; - bool uvd_power_gated; - bool vce_power_gated; - bool acp_power_gated; - - uint32_t active_process_mask; - - uint32_t mgcg_cgtt_local0; - uint32_t mgcg_cgtt_local1; - uint32_t clock_slow_down_step; - uint32_t skip_clock_slow_down; - bool enable_nb_ps_policy; - uint32_t voting_clients; - uint32_t voltage_drop_threshold; - uint32_t gfx_pg_threshold; - uint32_t max_sclk_level; - uint32_t max_uvd_level; - uint32_t max_vce_level; - /* flags */ - bool didt_enabled; - bool video_start; - bool cac_enabled; - bool bapm_enabled; - bool nb_dpm_enabled_by_driver; - bool nb_dpm_enabled; - bool auto_thermal_throttling_enabled; - bool dpm_enabled; - bool need_pptable_upload; - /* caps */ - bool caps_cac; - bool caps_power_containment; - bool caps_sq_ramping; - bool caps_db_ramping; - bool caps_td_ramping; - bool caps_tcp_ramping; - bool caps_sclk_throttle_low_notification; - bool caps_fps; - bool caps_uvd_dpm; - bool caps_uvd_pg; - bool caps_vce_dpm; - bool caps_vce_pg; - bool caps_acp_dpm; - bool caps_acp_pg; - bool caps_stable_power_state; - bool caps_enable_dfs_bypass; - bool caps_sclk_ds; - bool caps_voltage_island; - /* power state */ - struct amdgpu_ps current_rps; - struct cz_ps current_ps; - struct amdgpu_ps requested_rps; - struct cz_ps requested_ps; - - bool uvd_power_down; - bool vce_power_down; - bool acp_power_down; - - bool uvd_dynamic_pg; -}; - -/* cz_smc.c */ -uint32_t cz_get_argument(struct amdgpu_device *adev); -int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg); -int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - uint16_t msg, uint32_t parameter); -int cz_read_smc_sram_dword(struct amdgpu_device *adev, - uint32_t smc_address, uint32_t *value, uint32_t limit); -int cz_smu_upload_pptable(struct amdgpu_device *adev); -int cz_smu_download_pptable(struct amdgpu_device *adev, void **table); -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c deleted file mode 100644 index aed7033c0973..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c +++ /dev/null @@ -1,995 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include <linux/firmware.h> -#include "drmP.h" -#include "amdgpu.h" -#include "smu8.h" -#include "smu8_fusion.h" -#include "cz_ppsmc.h" -#include "cz_smumgr.h" -#include "smu_ucode_xfer_cz.h" -#include "amdgpu_ucode.h" -#include "cz_dpm.h" -#include "vi_dpm.h" - -#include "smu/smu_8_0_d.h" -#include "smu/smu_8_0_sh_mask.h" -#include "gca/gfx_8_0_d.h" -#include "gca/gfx_8_0_sh_mask.h" - -uint32_t cz_get_argument(struct amdgpu_device *adev) -{ - return RREG32(mmSMU_MP1_SRBM2P_ARG_0); -} - -static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = - (struct cz_smu_private_data *)(adev->smu.priv); - - return priv; -} - -static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg) -{ - int i; - u32 content = 0, tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), - SMU_MP1_SRBM2P_RESP_0, CONTENT); - if (content != tmp) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == adev->usec_timeout) - return -EINVAL; - - WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0); - WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg); - - return 0; -} - -int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg) -{ - int i; - u32 content = 0, tmp = 0; - - if (cz_send_msg_to_smc_async(adev, msg)) - return -EINVAL; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), - SMU_MP1_SRBM2P_RESP_0, CONTENT); - if (content != tmp) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == adev->usec_timeout) - return -EINVAL; - - if (PPSMC_Result_OK != tmp) { - dev_err(adev->dev, "SMC Failed to send Message.\n"); - return -EINVAL; - } - - return 0; -} - -int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - u16 msg, u32 parameter) -{ - WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc(adev, msg); -} - -static int cz_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); - - return 0; -} - -int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit) -{ - int ret; - - ret = cz_set_smc_sram_address(adev, smc_address, limit); - if (ret) - return ret; - - *value = RREG32(mmMP0PUB_IND_DATA_0); - - return 0; -} - -static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 value, u32 limit) -{ - int ret; - - ret = cz_set_smc_sram_address(adev, smc_address, limit); - if (ret) - return ret; - - WREG32(mmMP0PUB_IND_DATA_0, value); - - return 0; -} - -static int cz_smu_request_load_fw(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - - cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4); - - /*prepare toc buffers*/ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DriverDramAddrHi, - priv->toc_buffer.mc_addr_high); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_DriverDramAddrLo, - priv->toc_buffer.mc_addr_low); - cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs); - - /*execute jobs*/ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_aram); - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_power_profiling_index); - - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_initialize_index); - - return 0; -} - -/* - *Check if the FW has been loaded, SMU will not return if loading - *has not finished. - */ -static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev, - uint32_t fw_mask) -{ - int i; - uint32_t index = SMN_MP1_SRAM_START_ADDR + - SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - - WREG32(mmMP0PUB_IND_INDEX, index); - - for (i = 0; i < adev->usec_timeout; i++) { - if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask)) - break; - udelay(1); - } - - if (i >= adev->usec_timeout) { - dev_err(adev->dev, - "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x", - fw_mask, RREG32(mmMP0PUB_IND_DATA)); - return -EINVAL; - } - - return 0; -} - -/* - * interfaces for different ip blocks to check firmware loading status - * 0 for success otherwise failed - */ -static int cz_smu_check_finished(struct amdgpu_device *adev, - enum AMDGPU_UCODE_ID id) -{ - switch (id) { - case AMDGPU_UCODE_ID_SDMA0: - if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_SDMA1: - if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_CE: - if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_PFP: - if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED) - return 0; - case AMDGPU_UCODE_ID_CP_ME: - if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_MEC1: - if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_CP_MEC2: - if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_RLC_G: - if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED) - return 0; - break; - case AMDGPU_UCODE_ID_MAXIMUM: - default: - break; - } - - return 1; -} - -static int cz_load_mec_firmware(struct amdgpu_device *adev) -{ - struct amdgpu_firmware_info *ucode = - &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; - uint32_t reg_data; - uint32_t tmp; - - if (ucode->fw == NULL) - return -EINVAL; - - /* Disable MEC parsing/prefetching */ - tmp = RREG32(mmCP_MEC_CNTL); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - WREG32(mmCP_MEC_CNTL, tmp); - - tmp = RREG32(mmCP_CPC_IC_BASE_CNTL); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); - WREG32(mmCP_CPC_IC_BASE_CNTL, tmp); - - reg_data = lower_32_bits(ucode->mc_addr) & - REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); - WREG32(mmCP_CPC_IC_BASE_LO, reg_data); - - reg_data = upper_32_bits(ucode->mc_addr) & - REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); - WREG32(mmCP_CPC_IC_BASE_HI, reg_data); - - return 0; -} - -int cz_smu_start(struct amdgpu_device *adev) -{ - int ret = 0; - - uint32_t fw_to_check = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_JT1_MASK | - UCODE_ID_CP_MEC_JT2_MASK; - - if (adev->asic_type == CHIP_STONEY) - fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - - cz_smu_request_load_fw(adev); - ret = cz_smu_check_fw_load_finish(adev, fw_to_check); - if (ret) - return ret; - - /* manually load MEC firmware for CZ */ - if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) { - ret = cz_load_mec_firmware(adev); - if (ret) { - dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret); - return ret; - } - } - - /* setup fw load flag */ - adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED | - AMDGPU_SDMA1_UCODE_LOADED | - AMDGPU_CPCE_UCODE_LOADED | - AMDGPU_CPPFP_UCODE_LOADED | - AMDGPU_CPME_UCODE_LOADED | - AMDGPU_CPMEC1_UCODE_LOADED | - AMDGPU_CPMEC2_UCODE_LOADED | - AMDGPU_CPRLC_UCODE_LOADED; - - if (adev->asic_type == CHIP_STONEY) - adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED); - - return ret; -} - -static uint32_t cz_convert_fw_type(uint32_t fw_type) -{ - enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = AMDGPU_UCODE_ID_SDMA0; - break; - case UCODE_ID_SDMA1: - result = AMDGPU_UCODE_ID_SDMA1; - break; - case UCODE_ID_CP_CE: - result = AMDGPU_UCODE_ID_CP_CE; - break; - case UCODE_ID_CP_PFP: - result = AMDGPU_UCODE_ID_CP_PFP; - break; - case UCODE_ID_CP_ME: - result = AMDGPU_UCODE_ID_CP_ME; - break; - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = AMDGPU_UCODE_ID_CP_MEC1; - break; - case UCODE_ID_RLC_G: - result = AMDGPU_UCODE_ID_RLC_G; - break; - default: - DRM_ERROR("UCode type is out of range!"); - } - - return result; -} - -static uint8_t cz_smu_translate_firmware_enum_to_arg( - enum cz_scratch_entry firmware_enum) -{ - uint8_t ret = 0; - - switch (firmware_enum) { - case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0: - ret = UCODE_ID_SDMA0; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: - ret = UCODE_ID_SDMA1; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: - ret = UCODE_ID_CP_CE; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP: - ret = UCODE_ID_CP_PFP; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME: - ret = UCODE_ID_CP_ME; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: - ret = UCODE_ID_CP_MEC_JT1; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: - ret = UCODE_ID_CP_MEC_JT2; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: - ret = UCODE_ID_GMCON_RENG; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G: - ret = UCODE_ID_RLC_G; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: - ret = UCODE_ID_RLC_SCRATCH; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: - ret = UCODE_ID_RLC_SRM_ARAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: - ret = UCODE_ID_RLC_SRM_DRAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: - ret = UCODE_ID_DMCU_ERAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: - ret = UCODE_ID_DMCU_IRAM; - break; - case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: - ret = TASK_ARG_INIT_MM_PWR_LOG; - break; - case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: - case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: - case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: - case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: - case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START: - case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: - ret = TASK_ARG_REG_MMIO; - break; - case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: - ret = TASK_ARG_INIT_CLK_TABLE; - break; - } - - return ret; -} - -static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev, - enum cz_scratch_entry firmware_enum, - struct cz_buffer_entry *entry) -{ - uint64_t gpu_addr; - uint32_t data_size; - uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum); - enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id); - struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; - const struct gfx_firmware_header_v1_0 *header; - - if (ucode->fw == NULL) - return -EINVAL; - - gpu_addr = ucode->mc_addr; - header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; - data_size = le32_to_cpu(header->header.ucode_size_bytes); - - if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) || - (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) { - gpu_addr += le32_to_cpu(header->jt_offset) << 2; - data_size = le32_to_cpu(header->jt_size) << 2; - } - - entry->mc_addr_low = lower_32_bits(gpu_addr); - entry->mc_addr_high = upper_32_bits(gpu_addr); - entry->data_size = data_size; - entry->firmware_ID = firmware_enum; - - return 0; -} - -static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev, - enum cz_scratch_entry scratch_type, - uint32_t size_in_byte, - struct cz_buffer_entry *entry) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) | - priv->smu_buffer.mc_addr_low; - mc_addr += size_in_byte; - - priv->smu_buffer_used_bytes += size_in_byte; - entry->data_size = size_in_byte; - entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes; - entry->mc_addr_low = lower_32_bits(mc_addr); - entry->mc_addr_high = upper_32_bits(mc_addr); - entry->firmware_ID = scratch_type; - - return 0; -} - -static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev, - enum cz_scratch_entry firmware_enum, - bool is_last) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; - - task->type = TASK_TYPE_UCODE_LOAD; - task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); - task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; - - for (i = 0; i < priv->driver_buffer_length; i++) - if (priv->driver_buffer[i].firmware_ID == firmware_enum) - break; - - if (i >= priv->driver_buffer_length) { - dev_err(adev->dev, "Invalid Firmware Type\n"); - return -EINVAL; - } - - task->addr.low = priv->driver_buffer[i].mc_addr_low; - task->addr.high = priv->driver_buffer[i].mc_addr_high; - task->size_bytes = priv->driver_buffer[i].data_size; - - return 0; -} - -static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev, - enum cz_scratch_entry firmware_enum, - uint8_t type, bool is_last) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; - - task->type = type; - task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); - task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; - - for (i = 0; i < priv->scratch_buffer_length; i++) - if (priv->scratch_buffer[i].firmware_ID == firmware_enum) - break; - - if (i >= priv->scratch_buffer_length) { - dev_err(adev->dev, "Invalid Firmware Type\n"); - return -EINVAL; - } - - task->addr.low = priv->scratch_buffer[i].mc_addr_low; - task->addr.high = priv->scratch_buffer[i].mc_addr_high; - task->size_bytes = priv->scratch_buffer[i].data_size; - - if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) { - struct cz_ih_meta_data *pIHReg_restore = - (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr; - pIHReg_restore->command = - METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; - } - - return 0; -} - -static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - priv->toc_entry_aram = priv->toc_entry_used_count; - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - TASK_TYPE_UCODE_SAVE, true); - - return 0; -} - -static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - - toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count; - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - TASK_TYPE_UCODE_SAVE, false); - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - TASK_TYPE_UCODE_SAVE, true); - - return 0; -} - -static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - - toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count; - - /* populate ucode */ - if (adev->firmware.smu_load) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (adev->asic_type == CHIP_STONEY) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - } else { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - } - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); - } - - /* populate scratch */ - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - TASK_TYPE_UCODE_LOAD, true); - - return 0; -} - -static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - priv->toc_entry_power_profiling_index = priv->toc_entry_used_count; - - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, - TASK_TYPE_INITIALIZE, true); - return 0; -} - -static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - priv->toc_entry_initialize_index = priv->toc_entry_used_count; - - if (adev->firmware.smu_load) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - if (adev->asic_type == CHIP_STONEY) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - } else { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); - } - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (adev->asic_type == CHIP_STONEY) { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - } else { - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - } - cz_smu_populate_single_ucode_load_task(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); - } - - return 0; -} - -static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev) -{ - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - priv->toc_entry_clock_table = priv->toc_entry_used_count; - - cz_smu_populate_single_scratch_task(adev, - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, - TASK_TYPE_INITIALIZE, true); - - return 0; -} - -static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev) -{ - int i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; - - for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) - toc->JobList[i] = (uint8_t)IGNORE_JOB; - - return 0; -} - -/* - * cz smu uninitialization - */ -int cz_smu_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_unref(&adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - kfree(adev->smu.priv); - adev->smu.priv = NULL; - if (adev->firmware.smu_load) - amdgpu_ucode_fini_bo(adev); - - return 0; -} - -int cz_smu_download_pptable(struct amdgpu_device *adev, void **table) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - for (i = 0; i < priv->scratch_buffer_length; i++) - if (priv->scratch_buffer[i].firmware_ID == - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) - break; - - if (i >= priv->scratch_buffer_length) { - dev_err(adev->dev, "Invalid Scratch Type\n"); - return -EINVAL; - } - - *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr; - - /* prepare buffer for pptable */ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrHi, - priv->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrLo, - priv->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_clock_table); - - /* actual downloading */ - cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram); - - return 0; -} - -int cz_smu_upload_pptable(struct amdgpu_device *adev) -{ - uint8_t i; - struct cz_smu_private_data *priv = cz_smu_get_priv(adev); - - for (i = 0; i < priv->scratch_buffer_length; i++) - if (priv->scratch_buffer[i].firmware_ID == - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) - break; - - if (i >= priv->scratch_buffer_length) { - dev_err(adev->dev, "Invalid Scratch Type\n"); - return -EINVAL; - } - - /* prepare SMU */ - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrHi, - priv->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetClkTableAddrLo, - priv->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ExecuteJob, - priv->toc_entry_clock_table); - - /* actual uploading */ - cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu); - - return 0; -} - -/* - * cz smumgr functions initialization - */ -static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = { - .check_fw_load_finish = cz_smu_check_finished, - .request_smu_load_fw = NULL, - .request_smu_specific_fw = NULL, -}; - -/* - * cz smu initialization - */ -int cz_smu_init(struct amdgpu_device *adev) -{ - int ret = -EINVAL; - uint64_t mc_addr = 0; - struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; - struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; - void *toc_buf_ptr = NULL; - void *smu_buf_ptr = NULL; - - struct cz_smu_private_data *priv = - kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL); - if (priv == NULL) - return -ENOMEM; - - /* allocate firmware buffers */ - if (adev->firmware.smu_load) - amdgpu_ucode_init_bo(adev); - - adev->smu.priv = priv; - adev->smu.fw_flags = 0; - priv->toc_buffer.data_size = 4096; - - priv->smu_buffer.data_size = - ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + - ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + - ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + - ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + - ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); - - /* prepare toc buffer and smu buffer: - * 1. create amdgpu_bo for toc buffer and smu buffer - * 2. pin mc address - * 3. map kernel virtual address - */ - ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - toc_buf); - - if (ret) { - dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); - return ret; - } - - ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, - true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - smu_buf); - - if (ret) { - dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); - return ret; - } - - /* toc buffer reserve/pin/map */ - ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.toc_buf); - dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret); - return ret; - } - - ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.toc_buf); - amdgpu_bo_unref(&adev->smu.toc_buf); - dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret); - return ret; - } - - ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); - if (ret) - goto smu_init_failed; - - amdgpu_bo_unreserve(adev->smu.toc_buf); - - priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr); - priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr); - priv->toc_buffer.kaddr = toc_buf_ptr; - - /* smu buffer reserve/pin/map */ - ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); - if (ret) { - amdgpu_bo_unref(&adev->smu.smu_buf); - dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret); - return ret; - } - - ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); - if (ret) { - amdgpu_bo_unreserve(adev->smu.smu_buf); - amdgpu_bo_unref(&adev->smu.smu_buf); - dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret); - return ret; - } - - ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); - if (ret) - goto smu_init_failed; - - amdgpu_bo_unreserve(adev->smu.smu_buf); - - priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr); - priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr); - priv->smu_buffer.kaddr = smu_buf_ptr; - - if (adev->firmware.smu_load) { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - - if (adev->asic_type == CHIP_STONEY) { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } else { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - if (adev->asic_type == CHIP_STONEY) { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } else { - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } - if (cz_smu_populate_single_firmware_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, - &priv->driver_buffer[priv->driver_buffer_length++])) - goto smu_init_failed; - } - - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - UCODE_ID_RLC_SCRATCH_SIZE_BYTE, - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, - sizeof(struct SMU8_MultimediaPowerLogData), - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - if (cz_smu_populate_single_scratch_entry(adev, - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, - sizeof(struct SMU8_Fusion_ClkTable), - &priv->scratch_buffer[priv->scratch_buffer_length++])) - goto smu_init_failed; - - cz_smu_initialize_toc_empty_job_list(adev); - cz_smu_construct_toc_for_rlc_aram_save(adev); - cz_smu_construct_toc_for_vddgfx_enter(adev); - cz_smu_construct_toc_for_vddgfx_exit(adev); - cz_smu_construct_toc_for_power_profiling(adev); - cz_smu_construct_toc_for_bootup(adev); - cz_smu_construct_toc_for_clock_table(adev); - /* init the smumgr functions */ - adev->smu.smumgr_funcs = &cz_smumgr_funcs; - - return 0; - -smu_init_failed: - amdgpu_bo_unref(toc_buf); - amdgpu_bo_unref(smu_buf); - - return ret; -} diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h deleted file mode 100644 index 026342fcf0f3..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __CZ_SMC_H__ -#define __CZ_SMC_H__ - -#define MAX_NUM_FIRMWARE 8 -#define MAX_NUM_SCRATCH 11 -#define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 -#define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 -#define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 -#define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) - -enum cz_scratch_entry { - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, - CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, - CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, - CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, - CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, - CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, - CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, - CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, - CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, - CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, - CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, - CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, - CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, - CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, - CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE -}; - -struct cz_buffer_entry { - uint32_t data_size; - uint32_t mc_addr_low; - uint32_t mc_addr_high; - void *kaddr; - enum cz_scratch_entry firmware_ID; -}; - -struct cz_register_index_data_pair { - uint32_t offset; - uint32_t value; -}; - -struct cz_ih_meta_data { - uint32_t command; - struct cz_register_index_data_pair register_index_value_pair[1]; -}; - -struct cz_smu_private_data { - uint8_t driver_buffer_length; - uint8_t scratch_buffer_length; - uint16_t toc_entry_used_count; - uint16_t toc_entry_initialize_index; - uint16_t toc_entry_power_profiling_index; - uint16_t toc_entry_aram; - uint16_t toc_entry_ih_register_restore_task_index; - uint16_t toc_entry_clock_table; - uint16_t ih_register_restore_task_size; - uint16_t smu_buffer_used_bytes; - - struct cz_buffer_entry toc_buffer; - struct cz_buffer_entry smu_buffer; - struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; - struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 84afaae97e65..d4452d8f76ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v10_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v10_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v10_0_show_cursor(crtc); @@ -2620,7 +2617,6 @@ unpin: static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v10_0_lock_cursor(crtc, true); @@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v10_0_show_cursor(crtc); dce_v10_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7a7fa96d2e49..1cf1d9d1aec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, polaris11_golden_settings_a11, (const u32)ARRAY_SIZE(polaris11_golden_settings_a11)); @@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) num_crtc = 6; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: num_crtc = 5; break; default: @@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev) adev->mode_info.audio.num_pins = 8; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: adev->mode_info.audio.num_pins = 6; break; default: @@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) int pll; if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) { + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v11_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v11_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v11_0_show_cursor(crtc); @@ -2636,7 +2637,6 @@ unpin: static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v11_0_lock_cursor(crtc, true); @@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v11_0_show_cursor(crtc); dce_v11_0_lock_cursor(crtc, false); @@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) { + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); int encoder_mode = @@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle) adev->mode_info.num_dig = 6; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; @@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle) amdgpu_atombios_crtc_powergate_init(adev); amdgpu_atombios_encoder_init_dig(adev); if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) { + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); amdgpu_atombios_crtc_set_dce_clock(adev, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 44f024c9b9aa..809aa94a0cc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, struct amdgpu_device *adev = crtc->dev->dev_private; int xorigin = 0, yorigin = 0; + int w = amdgpu_crtc->cursor_width; + amdgpu_crtc->cursor_x = x; amdgpu_crtc->cursor_y = y; @@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v6_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v6_0_show_cursor(crtc); @@ -1986,7 +1985,6 @@ unpin: static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v6_0_lock_cursor(crtc, true); @@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v6_0_show_cursor(crtc); dce_v6_0_lock_cursor(crtc, false); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 30945fe55ac7..d2590d75aa11 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, + ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); return 0; } @@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, int32_t hot_y) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; struct drm_gem_object *obj; struct amdgpu_bo *aobj; int ret; @@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_lock_cursor(crtc, true); - if (hot_x != amdgpu_crtc->cursor_hot_x || + if (width != amdgpu_crtc->cursor_width || + height != amdgpu_crtc->cursor_height || + hot_x != amdgpu_crtc->cursor_hot_x || hot_y != amdgpu_crtc->cursor_hot_y) { int x, y; @@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, dce_v8_0_cursor_move_locked(crtc, x, y); - amdgpu_crtc->cursor_hot_x = hot_x; - amdgpu_crtc->cursor_hot_y = hot_y; - } - - if (width != amdgpu_crtc->cursor_width || - height != amdgpu_crtc->cursor_height) { - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (width - 1) << 16 | (height - 1)); amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_height = height; + amdgpu_crtc->cursor_hot_x = hot_x; + amdgpu_crtc->cursor_hot_y = hot_y; } dce_v8_0_show_cursor(crtc); @@ -2471,7 +2468,6 @@ unpin: static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_device *adev = crtc->dev->dev_private; if (amdgpu_crtc->cursor_bo) { dce_v8_0_lock_cursor(crtc, true); @@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, amdgpu_crtc->cursor_y); - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, - (amdgpu_crtc->cursor_width - 1) << 16 | - (amdgpu_crtc->cursor_height - 1)); - dce_v8_0_show_cursor(crtc); dce_v8_0_lock_cursor(crtc, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index b323f5ef64d2..c998f6aaaf36 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -25,7 +25,7 @@ #include "amdgpu_ih.h" #include "amdgpu_gfx.h" #include "amdgpu_ucode.h" -#include "si/clearstate_si.h" +#include "clearstate_si.h" #include "bif/bif_3_0_d.h" #include "bif/bif_3_0_sh_mask.h" #include "oss/oss_1_0_d.h" @@ -1794,14 +1794,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) static void gfx_v6_0_scratch_init(struct amdgpu_device *adev) { - int i; - adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - adev->gfx.scratch.free[i] = true; - adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; - } + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) @@ -1975,7 +1970,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err2; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index c4e14015ec5b..e3589b55a1e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2003,14 +2003,9 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) */ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) { - int i; - adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - adev->gfx.scratch.free[i] = true; - adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; - } + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } /** @@ -2321,7 +2316,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err2; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d0ec00986f38..35f9cd83b821 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); + static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, @@ -650,6 +657,8 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev); +static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t addr); +static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t addr); static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) { @@ -689,6 +698,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(tonga_golden_common_all)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, golden_settings_polaris11_a11, (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); @@ -741,14 +751,9 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) { - int i; - adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - adev->gfx.scratch.free[i] = true; - adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; - } + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) @@ -821,7 +826,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err2; @@ -903,6 +908,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS12: + chip_name = "polaris12"; + break; case CHIP_STONEY: chip_name = "stoney"; break; @@ -930,6 +938,13 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + + /* chain ib ucode isn't formal released, just disable it by far + * TODO: when ucod ready we should use ucode version to judge if + * chain-ib support or not. + */ + adev->virt.chained_ib_support = false; + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); @@ -1356,6 +1371,51 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) } } +static int gfx_v8_0_kiq_init_ring(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_irq_src *irq) +{ + int r = 0; + + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); + if (r) + return r; + } + + ring->adev = NULL; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_KIQ; + if (adev->gfx.mec2_fw) { + ring->me = 2; + ring->pipe = 0; + } else { + ring->me = 1; + ring->pipe = 1; + } + + irq->data = ring; + ring->queue = 0; + sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue); + r = amdgpu_ring_init(adev, ring, 1024, + irq, AMDGPU_CP_KIQ_IRQ_DRIVER0); + if (r) + dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); + + return r; +} + +static void gfx_v8_0_kiq_free_ring(struct amdgpu_ring *ring, + struct amdgpu_irq_src *irq) +{ + if (amdgpu_sriov_vf(ring->adev)) + amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs); + + amdgpu_ring_fini(ring); + irq->data = NULL; +} + #define MEC_HPD_SIZE 2048 static int gfx_v8_0_mec_init(struct amdgpu_device *adev) @@ -1410,6 +1470,35 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) return 0; } +static void gfx_v8_0_kiq_fini(struct amdgpu_device *adev) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); + kiq->eop_obj = NULL; +} + +static int gfx_v8_0_kiq_init(struct amdgpu_device *adev) +{ + int r; + u32 *hpd; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + r = amdgpu_bo_create_kernel(adev, MEC_HPD_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, + &kiq->eop_gpu_addr, (void **)&hpd); + if (r) { + dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); + return r; + } + + memset(hpd, 0, MEC_HPD_SIZE); + + amdgpu_bo_kunmap(kiq->eop_obj); + + return 0; +} + static const u32 vgpr_init_compute_shader[] = { 0x7e000209, 0x7e020208, @@ -1691,7 +1780,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); /* shedule the ib on the ring */ - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) { DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); goto fail; @@ -1768,6 +1857,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: ret = amdgpu_atombios_get_gfx_info(adev); if (ret) return ret; @@ -1985,8 +2075,14 @@ static int gfx_v8_0_sw_init(void *handle) { int i, r; struct amdgpu_ring *ring; + struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* KIQ event */ + r = amdgpu_irq_add_id(adev, 178, &adev->gfx.kiq.irq); + if (r) + return r; + /* EOP Event */ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); if (r) @@ -2024,6 +2120,17 @@ static int gfx_v8_0_sw_init(void *handle) return r; } + r = gfx_v8_0_kiq_init(adev); + if (r) { + DRM_ERROR("Failed to init KIQ BOs!\n"); + return r; + } + + kiq = &adev->gfx.kiq; + r = gfx_v8_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + if (r) + return r; + /* set up the gfx ring */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; @@ -2107,7 +2214,9 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + gfx_v8_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + gfx_v8_0_kiq_fini(adev); gfx_v8_0_mec_fini(adev); gfx_v8_0_rlc_fini(adev); gfx_v8_0_free_microcode(adev); @@ -2682,6 +2791,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) break; case CHIP_POLARIS11: + case CHIP_POLARIS12: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P4_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | @@ -3503,6 +3613,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) *rconf1 |= 0x0; break; case CHIP_POLARIS11: + case CHIP_POLARIS12: *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | SE_XSEL(1) | SE_YSEL(1); *rconf1 |= 0x0; @@ -4010,18 +4121,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); gfx_v8_0_init_power_gating(adev); WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); - if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { - cz_enable_sck_slow_down_on_power_up(adev, true); - cz_enable_sck_slow_down_on_power_down(adev, true); - } else { - cz_enable_sck_slow_down_on_power_up(adev, false); - cz_enable_sck_slow_down_on_power_down(adev, false); - } - if (adev->pg_flags & AMD_PG_SUPPORT_CP) - cz_enable_cp_power_gating(adev, true); - else - cz_enable_cp_power_gating(adev, false); - } else if (adev->asic_type == CHIP_POLARIS11) { + } else if ((adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) { gfx_v8_0_init_csb(adev); gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); @@ -4095,7 +4196,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) { + adev->asic_type == CHIP_POLARIS10 || + adev->asic_type == CHIP_POLARIS12) { tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); tmp &= ~0x3; WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); @@ -4283,6 +4385,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x0000002A); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_ring_write(ring, 0x16000012); amdgpu_ring_write(ring, 0x00000000); break; @@ -4489,6 +4592,393 @@ static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev) } } +/* KIQ functions */ +static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) +{ + uint32_t tmp; + struct amdgpu_device *adev = ring->adev; + + /* tell RLC which is KIQ queue */ + tmp = RREG32(mmRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); + WREG32(mmRLC_CP_SCHEDULERS, tmp); + tmp |= 0x80; + WREG32(mmRLC_CP_SCHEDULERS, tmp); +} + +static void gfx_v8_0_kiq_enable(struct amdgpu_ring *ring) +{ + amdgpu_ring_alloc(ring, 8); + /* set resources */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_RESOURCES, 6)); + amdgpu_ring_write(ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */ + amdgpu_ring_write(ring, 0x000000FF); /* queue mask lo */ + amdgpu_ring_write(ring, 0); /* queue mask hi */ + amdgpu_ring_write(ring, 0); /* gws mask lo */ + amdgpu_ring_write(ring, 0); /* gws mask hi */ + amdgpu_ring_write(ring, 0); /* oac mask */ + amdgpu_ring_write(ring, 0); /* gds heap base:0, gds heap size:0 */ + amdgpu_ring_commit(ring); + udelay(50); +} + +static void gfx_v8_0_map_queue_enable(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint64_t mqd_addr, wptr_addr; + + mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + amdgpu_ring_alloc(kiq_ring, 8); + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ + amdgpu_ring_write(kiq_ring, 0x21010000); + amdgpu_ring_write(kiq_ring, (ring->doorbell_index << 2) | + (ring->queue << 26) | + (ring->pipe << 29) | + ((ring->me == 1 ? 0 : 1) << 31)); /* doorbell */ + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); + amdgpu_ring_commit(kiq_ring); + udelay(50); +} + +static int gfx_v8_0_mqd_init(struct amdgpu_device *adev, + struct vi_mqd *mqd, + uint64_t mqd_gpu_addr, + uint64_t eop_gpu_addr, + struct amdgpu_ring *ring) +{ + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; + uint32_t tmp; + + mqd->header = 0xC0310800; + mqd->compute_pipelinestat_enable = 0x00000001; + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_misc_reserved = 0x00000003; + + eop_base_addr = eop_gpu_addr >> 8; + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32(mmCP_HQD_EOP_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, + (order_base_2(MEC_HPD_SIZE / 4) - 1)); + + mqd->cp_hqd_eop_control = tmp; + + /* enable doorbell? */ + tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + + if (ring->use_doorbell) + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + else + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* disable the queue if it's active */ + mqd->cp_hqd_dequeue_request = 0; + mqd->cp_hqd_pq_rptr = 0; + mqd->cp_hqd_pq_wptr = 0; + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); + + /* set MQD vmid to 0 */ + tmp = RREG32(mmCP_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); + mqd->cp_mqd_control = tmp; + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = ring->gpu_addr >> 8; + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + tmp = RREG32(mmCP_HQD_PQ_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, + (order_base_2(ring->ring_size / 4) - 1)); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); +#endif + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + mqd->cp_hqd_pq_control = tmp; + + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + + tmp = 0; + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + ring->wptr = 0; + mqd->cp_hqd_pq_wptr = ring->wptr; + mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); + + /* set the vmid for the queue */ + mqd->cp_hqd_vmid = 0; + + tmp = RREG32(mmCP_HQD_PERSISTENT_STATE); + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); + mqd->cp_hqd_persistent_state = tmp; + + /* activate the queue */ + mqd->cp_hqd_active = 1; + + return 0; +} + +static int gfx_v8_0_kiq_init_register(struct amdgpu_device *adev, + struct vi_mqd *mqd, + struct amdgpu_ring *ring) +{ + uint32_t tmp; + int j; + + /* disable wptr polling */ + tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); + WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); + + WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); + WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + WREG32(mmCP_HQD_EOP_CONTROL, mqd->cp_hqd_eop_control); + + /* enable doorbell? */ + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); + + /* disable the queue if it's active */ + if (RREG32(mmCP_HQD_ACTIVE) & 1) { + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); + WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); + WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); + } + + /* set the pointer to the MQD */ + WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); + WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); + + /* set MQD vmid to 0 */ + WREG32(mmCP_MQD_CONTROL, mqd->cp_mqd_control); + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); + WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + WREG32(mmCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); + + /* set the wb address whether it's enabled or not */ + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->cp_hqd_pq_rptr_report_addr_lo); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->cp_hqd_pq_rptr_report_addr_hi); + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo); + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi); + + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_FIJI) || + (adev->asic_type == CHIP_STONEY)) { + WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, + AMDGPU_DOORBELL_KIQ << 2); + WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, + AMDGPU_DOORBELL_MEC_RING7 << 2); + } + } + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); + + /* set the vmid for the queue */ + WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); + + WREG32(mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); + + /* activate the queue */ + WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); + + if (ring->use_doorbell) { + tmp = RREG32(mmCP_PQ_STATUS); + tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + WREG32(mmCP_PQ_STATUS, tmp); + } + + return 0; +} + +static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring, + struct vi_mqd *mqd, + u64 mqd_gpu_addr) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + uint64_t eop_gpu_addr; + bool is_kiq = false; + + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + is_kiq = true; + + if (is_kiq) { + eop_gpu_addr = kiq->eop_gpu_addr; + gfx_v8_0_kiq_setting(&kiq->ring); + } else + eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + + ring->queue * MEC_HPD_SIZE; + + mutex_lock(&adev->srbm_mutex); + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + + gfx_v8_0_mqd_init(adev, mqd, mqd_gpu_addr, eop_gpu_addr, ring); + + if (is_kiq) + gfx_v8_0_kiq_init_register(adev, mqd, ring); + + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + if (is_kiq) + gfx_v8_0_kiq_enable(ring); + else + gfx_v8_0_map_queue_enable(&kiq->ring, ring); + + return 0; +} + +static void gfx_v8_0_kiq_free_queue(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); + ring->mqd_obj = NULL; + } + + ring = &adev->gfx.kiq.ring; + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); + ring->mqd_obj = NULL; +} + +static int gfx_v8_0_kiq_setup_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + struct vi_mqd *mqd; + u64 mqd_gpu_addr; + u32 *buf; + int r = 0; + + r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + &mqd_gpu_addr, (void **)&buf); + if (r) { + dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); + return r; + } + + /* init the mqd struct */ + memset(buf, 0, sizeof(struct vi_mqd)); + mqd = (struct vi_mqd *)buf; + + r = gfx_v8_0_kiq_init_queue(ring, mqd, mqd_gpu_addr); + if (r) + return r; + + amdgpu_bo_kunmap(ring->mqd_obj); + + return 0; +} + +static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int r, i; + + ring = &adev->gfx.kiq.ring; + r = gfx_v8_0_kiq_setup_queue(adev, ring); + if (r) + return r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + r = gfx_v8_0_kiq_setup_queue(adev, ring); + if (r) + return r; + } + + gfx_v8_0_cp_compute_enable(adev, true); + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) + ring->ready = false; + } + + ring = &adev->gfx.kiq.ring; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) + ring->ready = false; + + return 0; +} + static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) { int r, i, j; @@ -4664,7 +5154,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) (adev->asic_type == CHIP_FIJI) || (adev->asic_type == CHIP_STONEY) || (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS10)) { + (adev->asic_type == CHIP_POLARIS10) || + (adev->asic_type == CHIP_POLARIS12)) { WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, @@ -4700,7 +5191,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) mqd->cp_hqd_persistent_state = tmp; if (adev->asic_type == CHIP_STONEY || adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) { + adev->asic_type == CHIP_POLARIS10 || + adev->asic_type == CHIP_POLARIS12) { tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1); WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp); @@ -4787,7 +5279,10 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) if (r) return r; - r = gfx_v8_0_cp_compute_resume(adev); + if (amdgpu_sriov_vf(adev)) + r = gfx_v8_0_kiq_resume(adev); + else + r = gfx_v8_0_cp_compute_resume(adev); if (r) return r; @@ -4826,6 +5321,7 @@ static int gfx_v8_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (amdgpu_sriov_vf(adev)) { + gfx_v8_0_kiq_free_queue(adev); pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } @@ -5279,7 +5775,8 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - if (adev->asic_type == CHIP_POLARIS11) + if ((adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) /* Send msg to SMU via Powerplay */ amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_SMC, @@ -5340,6 +5837,18 @@ static int gfx_v8_0_set_powergating_state(void *handle, case CHIP_CARRIZO: case CHIP_STONEY: + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); + } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + cz_update_gfx_cg_power_gating(adev, enable); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) @@ -5353,6 +5862,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); else @@ -5375,6 +5885,45 @@ static int gfx_v8_0_set_powergating_state(void *handle, return 0; } +static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + /* AMD_CG_SUPPORT_GFX_MGCG */ + data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_MGCG; + + /* AMD_CG_SUPPORT_GFX_CGLG */ + data = RREG32(mmRLC_CGCG_CGLS_CTRL); + if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGCG; + + /* AMD_CG_SUPPORT_GFX_CGLS */ + if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGLS; + + /* AMD_CG_SUPPORT_GFX_CGTS */ + data = RREG32(mmCGTS_SM_CTRL_REG); + if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_CGTS; + + /* AMD_CG_SUPPORT_GFX_CGTS_LS */ + if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS; + + /* AMD_CG_SUPPORT_GFX_RLC_LS */ + data = RREG32(mmRLC_MEM_SLP_CNTL); + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; + + /* AMD_CG_SUPPORT_GFX_CP_LS */ + data = RREG32(mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; +} + static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, uint32_t reg_addr, uint32_t cmd) { @@ -5423,68 +5972,6 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e -static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) -{ - u32 data = 0; - unsigned i; - - data = RREG32(mmRLC_CNTL); - if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0) - return; - - if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) || - (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG))) { - data |= RLC_GPR_REG2__REQ_MASK; - data &= ~RLC_GPR_REG2__MESSAGE_MASK; - data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT); - WREG32(mmRLC_GPR_REG2, data); - - for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPM_STAT) & - (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | - RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == - (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | - RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) - break; - udelay(1); - } - - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) - break; - udelay(1); - } - adev->gfx.rlc.in_safe_mode = true; - } -} - -static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) -{ - u32 data; - unsigned i; - - data = RREG32(mmRLC_CNTL); - if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0) - return; - - if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) || - (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG))) { - data |= RLC_GPR_REG2__REQ_MASK; - data &= ~RLC_GPR_REG2__MESSAGE_MASK; - data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT); - WREG32(mmRLC_GPR_REG2, data); - adev->gfx.rlc.in_safe_mode = false; - } - - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) - break; - udelay(1); - } -} - static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 data; @@ -5544,31 +6031,11 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev) -{ - adev->gfx.rlc.in_safe_mode = true; -} - -static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev) -{ - adev->gfx.rlc.in_safe_mode = false; -} - -static const struct amdgpu_rlc_funcs cz_rlc_funcs = { - .enter_safe_mode = cz_enter_rlc_safe_mode, - .exit_safe_mode = cz_exit_rlc_safe_mode -}; - static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .enter_safe_mode = iceland_enter_rlc_safe_mode, .exit_safe_mode = iceland_exit_rlc_safe_mode }; -static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = { - .enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode, - .exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode -}; - static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable) { @@ -5990,7 +6457,8 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask, reg_mem_engine; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || + (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { switch (ring->me) { case 1: ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; @@ -6203,6 +6671,31 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, upper_32_bits(seq)); } +static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned int flags) +{ + /* we only allocate 32bit for each seq wb address */ + BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + + /* write fence seq to the "addr" */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + if (flags & AMDGPU_FENCE_FLAG_INT) { + /* set register to trigger INT */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); + amdgpu_ring_write(ring, mmCPC_INT_STATUS); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ + } +} + static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); @@ -6213,6 +6706,10 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) { uint32_t dw2 = 0; + if (amdgpu_sriov_vf(ring->adev)) + gfx_v8_0_ring_emit_ce_meta_init(ring, + (flags & AMDGPU_VM_DOMAIN) ? AMDGPU_CSA_VADDR : ring->adev->virt.csa_vmid0_addr); + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { gfx_v8_0_ring_emit_vgt_flush(ring); @@ -6237,6 +6734,36 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); amdgpu_ring_write(ring, dw2); amdgpu_ring_write(ring, 0); + + if (amdgpu_sriov_vf(ring->adev)) + gfx_v8_0_ring_emit_de_meta_init(ring, + (flags & AMDGPU_VM_DOMAIN) ? AMDGPU_CSA_VADDR : ring->adev->virt.csa_vmid0_addr); +} + +static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 0 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + adev->virt.reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + adev->virt.reg_val_offs * 4)); +} + +static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */ + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); } static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, @@ -6384,6 +6911,72 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + uint32_t tmp, target; + struct amdgpu_ring *ring = (struct amdgpu_ring *)src->data; + + BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)); + + if (ring->me == 1) + target = mmCP_ME1_PIPE0_INT_CNTL; + else + target = mmCP_ME2_PIPE0_INT_CNTL; + target += ring->pipe; + + switch (type) { + case AMDGPU_CP_KIQ_IRQ_DRIVER0: + if (state == AMDGPU_IRQ_STATE_DISABLE) { + tmp = RREG32(mmCPC_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, + GENERIC2_INT_ENABLE, 0); + WREG32(mmCPC_INT_CNTL, tmp); + + tmp = RREG32(target); + tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, + GENERIC2_INT_ENABLE, 0); + WREG32(target, tmp); + } else { + tmp = RREG32(mmCPC_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, + GENERIC2_INT_ENABLE, 1); + WREG32(mmCPC_INT_CNTL, tmp); + + tmp = RREG32(target); + tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, + GENERIC2_INT_ENABLE, 1); + WREG32(target, tmp); + } + break; + default: + BUG(); /* kiq only support GENERIC2_INT now */ + break; + } + return 0; +} + +static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring = (struct amdgpu_ring *)source->data; + + BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)); + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", + me_id, pipe_id, queue_id); + + amdgpu_fence_process(ring); + return 0; +} + static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6402,6 +6995,7 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .post_soft_reset = gfx_v8_0_post_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, + .get_clockgating_state = gfx_v8_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { @@ -6419,7 +7013,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ 2 + /* gfx_v8_ring_emit_sb */ - 3 + 4, /* gfx_v8_ring_emit_cntxcntl including vgt flush */ + 3 + 4 + 29, /* gfx_v8_ring_emit_cntxcntl including vgt flush/meta-data */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, @@ -6464,10 +7058,39 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .pad_ib = amdgpu_ring_generic_pad_ib, }; +static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { + .type = AMDGPU_RING_TYPE_KIQ, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .get_rptr = gfx_v8_0_ring_get_rptr, + .get_wptr = gfx_v8_0_ring_get_wptr_compute, + .set_wptr = gfx_v8_0_ring_set_wptr_compute, + .emit_frame_size = + 20 + /* gfx_v8_0_ring_emit_gds_switch */ + 7 + /* gfx_v8_0_ring_emit_hdp_flush */ + 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ + 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ + 17 + /* gfx_v8_0_ring_emit_vm_flush */ + 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ + .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ + .emit_ib = gfx_v8_0_ring_emit_ib_compute, + .emit_fence = gfx_v8_0_ring_emit_fence_kiq, + .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate, + .test_ring = gfx_v8_0_ring_test_ring, + .test_ib = gfx_v8_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_rreg = gfx_v8_0_ring_emit_rreg, + .emit_wreg = gfx_v8_0_ring_emit_wreg, +}; + static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) { int i; + adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq; + for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; @@ -6490,6 +7113,11 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = { .process = gfx_v8_0_priv_inst_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = { + .set = gfx_v8_0_kiq_set_interrupt_state, + .process = gfx_v8_0_kiq_irq, +}; + static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; @@ -6500,22 +7128,14 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; + + adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; + adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs; } static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_TOPAZ: - adev->gfx.rlc.funcs = &iceland_rlc_funcs; - break; - case CHIP_STONEY: - case CHIP_CARRIZO: - adev->gfx.rlc.funcs = &cz_rlc_funcs; - break; - default: - adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs; - break; - } + adev->gfx.rlc.funcs = &iceland_rlc_funcs; } static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) @@ -6632,3 +7252,62 @@ const struct amdgpu_ip_block_version gfx_v8_1_ip_block = .rev = 0, .funcs = &gfx_v8_0_ip_funcs, }; + +static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t csa_addr) +{ + uint64_t ce_payload_addr; + int cnt_ce; + static union { + struct amdgpu_ce_ib_state regular; + struct amdgpu_ce_ib_state_chained_ib chained; + } ce_payload = {0}; + + if (ring->adev->virt.chained_ib_support) { + ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); + cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2; + } else { + ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data, ce_payload); + cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | + WRITE_DATA_DST_SEL(8) | + WR_CONFIRM) | + WRITE_DATA_CACHE_POLICY(0)); + amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr)); + amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr)); + amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2); +} + +static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t csa_addr) +{ + uint64_t de_payload_addr, gds_addr; + int cnt_de; + static union { + struct amdgpu_de_ib_state regular; + struct amdgpu_de_ib_state_chained_ib chained; + } de_payload = {0}; + + gds_addr = csa_addr + 4096; + if (ring->adev->virt.chained_ib_support) { + de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr); + de_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, de_payload); + cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2; + } else { + de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr); + de_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data, de_payload); + cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(8) | + WR_CONFIRM) | + WRITE_DATA_CACHE_POLICY(0)); + amdgpu_ring_write(ring, lower_32_bits(de_payload_addr)); + amdgpu_ring_write(ring, upper_32_bits(de_payload_addr)); + amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2); +} diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 45a573e63d4a..e2b0b1646f99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); MODULE_FIRMWARE("radeon/verde_mc.bin"); MODULE_FIRMWARE("radeon/oland_mc.bin"); +MODULE_FIRMWARE("radeon/si58_mc.bin"); #define MC_SEQ_MISC0__MT__MASK 0xf0000000 #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 @@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) const char *chip_name; char fw_name[30]; int err; + bool is_58_fw = false; DRM_DEBUG("\n"); @@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + /* this memory configuration requires special firmware */ + if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + is_58_fw = true; + + if (is_58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); err = request_firmware(&adev->mc.fw, fw_name, adev->dev); if (err) goto out; @@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_CONTEXT1_CNTL, VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | - ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); + ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) + gmc_v6_0_set_fault_enable_default(adev, false); + else + gmc_v6_0_set_fault_enable_default(adev, true); gmc_v6_0_gart_flush_gpu_tlb(adev, 0); dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) + return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); + else + return 0; } static int gmc_v6_0_sw_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 273b16fb9459..8d05e0c4e3d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -375,9 +375,16 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) /* size in MB on si */ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; - adev->mc.visible_vram_size = adev->mc.aper_size; + +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) { + adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; + adev->mc.aper_size = adev->mc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ + adev->mc.visible_vram_size = adev->mc.aper_size; if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 0daac3a5be79..7669b3259f35 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, golden_settings_polaris11_a11, (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); @@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS12: + chip_name = "polaris12"; + break; case CHIP_FIJI: case CHIP_CARRIZO: case CHIP_STONEY: @@ -462,9 +467,16 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) /* size in MB on si */ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; - adev->mc.visible_vram_size = adev->mc.aper_size; + +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) { + adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; + adev->mc.aper_size = adev->mc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ + adev->mc.visible_vram_size = adev->mc.aper_size; if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; @@ -1434,6 +1446,21 @@ static int gmc_v8_0_set_powergating_state(void *handle, return 0; } +static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + /* AMD_CG_SUPPORT_MC_MGCG */ + data = RREG32(mmMC_HUB_MISC_HUB_CG); + if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_MC_MGCG; + + /* AMD_CG_SUPPORT_MC_LS */ + if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_MC_LS; +} + static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .name = "gmc_v8_0", .early_init = gmc_v8_0_early_init, @@ -1452,6 +1479,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .post_soft_reset = gmc_v8_0_post_soft_reset, .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, + .get_clockgating_state = gmc_v8_0_get_clockgating_state, }; static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 5a1bc358bcb1..8785ca570729 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1230,6 +1230,7 @@ static void kv_update_current_ps(struct amdgpu_device *adev, pi->current_rps = *rps; pi->current_ps = *new_ps; pi->current_rps.ps_priv = &pi->current_ps; + adev->pm.dpm.current_ps = &pi->current_rps; } static void kv_update_requested_ps(struct amdgpu_device *adev, @@ -1241,6 +1242,7 @@ static void kv_update_requested_ps(struct amdgpu_device *adev, pi->requested_rps = *rps; pi->requested_ps = *new_ps; pi->requested_rps.ps_priv = &pi->requested_ps; + adev->pm.dpm.requested_ps = &pi->requested_rps; } static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable) @@ -1904,19 +1906,19 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev, } static int kv_dpm_force_performance_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) + enum amd_dpm_forced_level level) { int ret; - if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { + if (level == AMD_DPM_FORCED_LEVEL_HIGH) { ret = kv_force_dpm_highest(adev); if (ret) return ret; - } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { + } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { ret = kv_force_dpm_lowest(adev); if (ret) return ret; - } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { + } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { ret = kv_unforce_levels(adev); if (ret) return ret; @@ -3009,7 +3011,6 @@ static int kv_dpm_late_init(void *handle) kv_dpm_powergate_samu(adev, true); kv_dpm_powergate_vce(adev, true); kv_dpm_powergate_uvd(adev, true); - return 0; } @@ -3029,7 +3030,7 @@ static int kv_dpm_sw_init(void *handle) /* default to balanced state */ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; adev->pm.default_sclk = adev->clock.default_sclk; adev->pm.default_mclk = adev->clock.default_mclk; adev->pm.current_sclk = adev->clock.default_sclk; @@ -3078,6 +3079,9 @@ static int kv_dpm_hw_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!amdgpu_dpm) + return 0; + mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); @@ -3245,15 +3249,52 @@ static int kv_dpm_set_powergating_state(void *handle, return 0; } +static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, + const struct kv_pl *kv_cpl2) +{ + return ((kv_cpl1->sclk == kv_cpl2->sclk) && + (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && + (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && + (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); +} + static int kv_check_state_equal(struct amdgpu_device *adev, struct amdgpu_ps *cps, struct amdgpu_ps *rps, bool *equal) { - if (equal == NULL) + struct kv_ps *kv_cps; + struct kv_ps *kv_rps; + int i; + + if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) return -EINVAL; - *equal = false; + kv_cps = kv_get_ps(cps); + kv_rps = kv_get_ps(rps); + + if (kv_cps == NULL) { + *equal = false; + return 0; + } + + if (kv_cps->num_levels != kv_rps->num_levels) { + *equal = false; + return 0; + } + + for (i = 0; i < kv_cps->num_levels; i++) { + if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), + &(kv_rps->levels[i]))) { + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); + *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); + return 0; } @@ -3307,12 +3348,3 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; } - -const struct amdgpu_ip_block_version kv_dpm_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 7, - .minor = 0, - .rev = 0, - .funcs = &kv_dpm_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c new file mode 100644 index 000000000000..d2622b6f49fa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -0,0 +1,592 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Xiangliang.Yu@amd.com + */ + +#include "amdgpu.h" +#include "vi.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "vid.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "gmc_v8_0.h" +#include "gfx_v8_0.h" +#include "sdma_v3_0.h" +#include "tonga_ih.h" +#include "gmc/gmc_8_2_d.h" +#include "gmc/gmc_8_2_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "oss/oss_3_0_sh_mask.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" +#include "smu/smu_7_1_3_d.h" +#include "mxgpu_vi.h" + +/* VI golden setting */ +static const u32 xgpu_fiji_mgcg_cgcg_init[] = { + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, + mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, + mmPCIE_INDEX, 0xffffffff, 0x0140001c, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, + mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100, +}; + +static const u32 xgpu_fiji_golden_settings_a10[] = { + mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x1f311fff, 0x12300000, + mmHDMI_CONTROL, 0x31000111, 0x00000011, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, +}; + +static const u32 xgpu_fiji_golden_common_all[] = { + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009, +}; + +static const u32 xgpu_tonga_mgcg_cgcg_init[] = { + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, + mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, + mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, + mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, + mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, + mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, + mmPCIE_INDEX, 0xffffffff, 0x0140001c, + mmPCIE_DATA, 0x000f0000, 0x00000000, + mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, + mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, + mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, + mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100, +}; + +static const u32 xgpu_tonga_golden_settings_a11[] = { + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, + mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x1f311fff, 0x12300000, + mmGB_GPU_ID, 0x0000000f, 0x00000000, + mmHDMI_CONTROL, 0x31000111, 0x00000011, + mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, + mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, + mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, + mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, + mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, + mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, + mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, + mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, +}; + +static const u32 xgpu_tonga_golden_common_all[] = { + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, + mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, +}; + +void xgpu_vi_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_FIJI: + amdgpu_program_register_sequence(adev, + xgpu_fiji_mgcg_cgcg_init, + (const u32)ARRAY_SIZE( + xgpu_fiji_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + xgpu_fiji_golden_settings_a10, + (const u32)ARRAY_SIZE( + xgpu_fiji_golden_settings_a10)); + amdgpu_program_register_sequence(adev, + xgpu_fiji_golden_common_all, + (const u32)ARRAY_SIZE( + xgpu_fiji_golden_common_all)); + break; + case CHIP_TONGA: + amdgpu_program_register_sequence(adev, + xgpu_tonga_mgcg_cgcg_init, + (const u32)ARRAY_SIZE( + xgpu_tonga_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + xgpu_tonga_golden_settings_a11, + (const u32)ARRAY_SIZE( + xgpu_tonga_golden_settings_a11)); + amdgpu_program_register_sequence(adev, + xgpu_tonga_golden_common_all, + (const u32)ARRAY_SIZE( + xgpu_tonga_golden_common_all)); + break; + default: + BUG_ON("Doesn't support chip type.\n"); + break; + } +} + +/* + * Mailbox communication between GPU hypervisor and VFs + */ +static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev) +{ + u32 reg; + + reg = RREG32(mmMAILBOX_CONTROL); + reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1); + WREG32(mmMAILBOX_CONTROL, reg); +} + +static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val) +{ + u32 reg; + + reg = RREG32(mmMAILBOX_CONTROL); + reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, + TRN_MSG_VALID, val ? 1 : 0); + WREG32(mmMAILBOX_CONTROL, reg); +} + +static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev, + enum idh_event event) +{ + u32 reg; + + reg = RREG32(mmMAILBOX_MSGBUF_TRN_DW0); + reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, event); + WREG32(mmMAILBOX_MSGBUF_TRN_DW0, reg); + + xgpu_vi_mailbox_set_valid(adev, true); +} + +static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev, + enum idh_event event) +{ + u32 reg; + + reg = RREG32(mmMAILBOX_MSGBUF_RCV_DW0); + if (reg != event) + return -ENOENT; + + /* send ack to PF */ + xgpu_vi_mailbox_send_ack(adev); + + return 0; +} + +static int xgpu_vi_poll_ack(struct amdgpu_device *adev) +{ + int r = 0, timeout = VI_MAILBOX_TIMEDOUT; + u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK); + u32 reg; + + reg = RREG32(mmMAILBOX_CONTROL); + while (!(reg & mask)) { + if (timeout <= 0) { + pr_err("Doesn't get ack from pf.\n"); + r = -ETIME; + break; + } + msleep(1); + timeout -= 1; + + reg = RREG32(mmMAILBOX_CONTROL); + } + + return r; +} + +static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) +{ + int r = 0, timeout = VI_MAILBOX_TIMEDOUT; + + r = xgpu_vi_mailbox_rcv_msg(adev, event); + while (r) { + if (timeout <= 0) { + pr_err("Doesn't get ack from pf.\n"); + r = -ETIME; + break; + } + msleep(1); + timeout -= 1; + + r = xgpu_vi_mailbox_rcv_msg(adev, event); + } + + return r; +} + +static int xgpu_vi_send_access_requests(struct amdgpu_device *adev, + enum idh_request request) +{ + int r; + + xgpu_vi_mailbox_trans_msg(adev, request); + + /* start to poll ack */ + r = xgpu_vi_poll_ack(adev); + if (r) + return r; + + xgpu_vi_mailbox_set_valid(adev, false); + + /* start to check msg if request is idh_req_gpu_init_access */ + if (request == IDH_REQ_GPU_INIT_ACCESS) { + r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + if (r) + return r; + } + + return 0; +} + +static int xgpu_vi_request_reset(struct amdgpu_device *adev) +{ + return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); +} + +static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_event event; + + event = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; + return xgpu_vi_send_access_requests(adev, event); +} + +static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_event event; + int r = 0; + + event = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; + r = xgpu_vi_send_access_requests(adev, event); + + return r; +} + +/* add support mailbox interrupts */ +static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("get ack intr and do nothing.\n"); + return 0; +} + +static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32(mmMAILBOX_INT_CNTL); + + tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32(mmMAILBOX_INT_CNTL, tmp); + + return 0; +} + +static void xgpu_vi_mailbox_flr_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, + struct amdgpu_virt, flr_work.work); + struct amdgpu_device *adev = container_of(virt, + struct amdgpu_device, virt); + int r = 0; + + r = xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL); + if (r) + DRM_ERROR("failed to get flr cmpl msg from hypervior.\n"); + + /* TODO: need to restore gfx states */ +} + +static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32(mmMAILBOX_INT_CNTL); + + tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32(mmMAILBOX_INT_CNTL, tmp); + + return 0; +} + +static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int r; + + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); + /* do nothing for other msg */ + if (r) + return 0; + + /* TODO: need to save gfx states */ + schedule_delayed_work(&adev->virt.flr_work, + msecs_to_jiffies(VI_MAILBOX_RESET_TIME)); + + return 0; +} + +static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = { + .set = xgpu_vi_set_mailbox_ack_irq, + .process = xgpu_vi_mailbox_ack_irq, +}; + +static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = { + .set = xgpu_vi_set_mailbox_rcv_irq, + .process = xgpu_vi_mailbox_rcv_irq, +}; + +void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->virt.ack_irq.num_types = 1; + adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs; + adev->virt.rcv_irq.num_types = 1; + adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs; +} + +int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + return 0; +} + +int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + INIT_DELAYED_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work); + + return 0; +} + +void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev) +{ + cancel_delayed_work_sync(&adev->virt.flr_work); + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); +} + +const struct amdgpu_virt_ops xgpu_vi_virt_ops = { + .req_full_gpu = xgpu_vi_request_full_gpu_access, + .rel_full_gpu = xgpu_vi_release_full_gpu_access, + .reset_gpu = xgpu_vi_request_reset, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h new file mode 100644 index 000000000000..fd6216efd2b0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h @@ -0,0 +1,55 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __MXGPU_VI_H__ +#define __MXGPU_VI_H__ + +#define VI_MAILBOX_TIMEDOUT 150 +#define VI_MAILBOX_RESET_TIME 12 + +/* VI mailbox messages request */ +enum idh_request { + IDH_REQ_GPU_INIT_ACCESS = 1, + IDH_REL_GPU_INIT_ACCESS, + IDH_REQ_GPU_FINI_ACCESS, + IDH_REL_GPU_FINI_ACCESS, + IDH_REQ_GPU_RESET_ACCESS +}; + +/* VI mailbox messages data */ +enum idh_event { + IDH_CLR_MSG_BUF = 0, + IDH_READY_TO_ACCESS_GPU, + IDH_FLR_NOTIFICATION, + IDH_FLR_NOTIFICATION_CMPL, + IDH_EVENT_MAX +}; + +extern const struct amdgpu_virt_ops xgpu_vi_virt_ops; + +void xgpu_vi_init_golden_registers(struct amdgpu_device *adev); +void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev); +int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev); +int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev); +void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index fbe74a33899c..896be64b7013 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -701,7 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1170a64a3184..31375bdde6f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin"); MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = @@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: + case CHIP_POLARIS12: amdgpu_program_register_sequence(adev, golden_settings_polaris11_a11, (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); @@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS12: + chip_name = "polaris12"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; @@ -904,7 +910,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; @@ -1527,6 +1533,22 @@ static int sdma_v3_0_set_powergating_state(void *handle, return 0; } +static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + /* AMD_CG_SUPPORT_SDMA_MGCG */ + data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]); + if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_SDMA_MGCG; + + /* AMD_CG_SUPPORT_SDMA_LS */ + data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]); + if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) + *flags |= AMD_CG_SUPPORT_SDMA_LS; +} + static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .name = "sdma_v3_0", .early_init = sdma_v3_0_early_init, @@ -1545,6 +1567,7 @@ static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .soft_reset = sdma_v3_0_soft_reset, .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, + .get_clockgating_state = sdma_v3_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index c46b0159007d..da46992f7b18 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -32,7 +32,7 @@ #include "amdgpu_vce.h" #include "atom.h" #include "amdgpu_powerplay.h" -#include "si/sid.h" +#include "sid.h" #include "si_ih.h" #include "gfx_v6_0.h" #include "gmc_v6_0.h" @@ -40,337 +40,343 @@ #include "dce_v6_0.h" #include "si.h" #include "dce_virtual.h" +#include "gca/gfx_6_0_d.h" +#include "oss/oss_1_0_d.h" +#include "gmc/gmc_6_0_d.h" +#include "dce/dce_6_0_d.h" +#include "uvd/uvd_4_0_d.h" static const u32 tahiti_golden_registers[] = { - 0x17bc, 0x00000030, 0x00000011, - 0x2684, 0x00010000, 0x00018208, - 0x260c, 0xffffffff, 0x00000000, - 0x260d, 0xf00fffff, 0x00000400, - 0x260e, 0x0002021c, 0x00020200, - 0x031e, 0x00000080, 0x00000000, + mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, + mmCB_HW_CONTROL, 0x00010000, 0x00018208, + mmDB_DEBUG, 0xffffffff, 0x00000000, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDB_DEBUG3, 0x0002021c, 0x00020200, + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 0x340c, 0x000000c0, 0x00800040, 0x360c, 0x000000c0, 0x00800040, - 0x16ec, 0x000000f0, 0x00000070, - 0x16f0, 0x00200000, 0x50100000, - 0x1c0c, 0x31000311, 0x00000011, - 0x09df, 0x00000003, 0x000007ff, - 0x0903, 0x000007ff, 0x00000000, - 0x2285, 0xf000001f, 0x00000007, - 0x22c9, 0xffffffff, 0x00ffffff, - 0x22c4, 0x0000ff0f, 0x00000000, - 0xa293, 0x07ffffff, 0x4e000000, - 0xa0d4, 0x3f3f3fff, 0x2a00126a, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x00200000, 0x50100000, + mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011, + mmMC_ARB_WTM_CNTL_RD, 0x00000003, 0x000007ff, + mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000, + mmPA_CL_ENHANCE, 0xf000001f, 0x00000007, + mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000, + mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a, 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, - 0x2440, 0x07ffffff, 0x03000000, - 0x23a2, 0x01ff1f3f, 0x00000000, - 0x23a1, 0x01ff1f3f, 0x00000000, - 0x2418, 0x0000007f, 0x00000020, - 0x2542, 0x00010000, 0x00010000, - 0x2b05, 0x00000200, 0x000002fb, - 0x2b04, 0xffffffff, 0x0000543b, - 0x2b03, 0xffffffff, 0xa9210876, - 0x2234, 0xffffffff, 0x000fff40, - 0x2235, 0x0000001f, 0x00000010, - 0x0504, 0x20000000, 0x20fffed8, - 0x0570, 0x000c0fc0, 0x000c0400, - 0x052c, 0x0fffffff, 0xffffffff, - 0x052d, 0x0fffffff, 0x0fffffff, - 0x052e, 0x0fffffff, 0x0fffffff, - 0x052f, 0x0fffffff, 0x0fffffff + mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000, + mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000, + mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000, + mmSX_DEBUG_1, 0x0000007f, 0x00000020, + mmTA_CNTL_AUX, 0x00010000, 0x00010000, + mmTCP_ADDR_CONFIG, 0x00000200, 0x000002fb, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876, + mmVGT_FIFO_DEPTHS, 0xffffffff, 0x000fff40, + mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010, + mmVM_CONTEXT0_CNTL, 0x20000000, 0x20fffed8, + mmVM_L2_CG, 0x000c0fc0, 0x000c0400, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; static const u32 tahiti_golden_registers2[] = { - 0x0319, 0x00000001, 0x00000001 + mmMCIF_MEM_CONTROL, 0x00000001, 0x00000001, }; static const u32 tahiti_golden_rlc_registers[] = { - 0x263e, 0xffffffff, 0x12011003, - 0x3109, 0xffffffff, 0x00601005, + mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003, + mmRLC_LB_PARAMS, 0xffffffff, 0x00601005, 0x311f, 0xffffffff, 0x10104040, 0x3122, 0xffffffff, 0x0100000a, - 0x30c5, 0xffffffff, 0x00000800, - 0x30c3, 0xffffffff, 0x800000f4, - 0x3d2a, 0x00000008, 0x00000000 + mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800, + mmRLC_LB_CNTL, 0xffffffff, 0x800000f4, + mmUVD_CGC_GATE, 0x00000008, 0x00000000, }; static const u32 pitcairn_golden_registers[] = { - 0x17bc, 0x00000030, 0x00000011, - 0x2684, 0x00010000, 0x00018208, - 0x260c, 0xffffffff, 0x00000000, - 0x260d, 0xf00fffff, 0x00000400, - 0x260e, 0x0002021c, 0x00020200, - 0x031e, 0x00000080, 0x00000000, + mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, + mmCB_HW_CONTROL, 0x00010000, 0x00018208, + mmDB_DEBUG, 0xffffffff, 0x00000000, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDB_DEBUG3, 0x0002021c, 0x00020200, + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 0x340c, 0x000300c0, 0x00800040, 0x360c, 0x000300c0, 0x00800040, - 0x16ec, 0x000000f0, 0x00000070, - 0x16f0, 0x00200000, 0x50100000, - 0x1c0c, 0x31000311, 0x00000011, - 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0903, 0x000007ff, 0x00000000, - 0x2285, 0xf000001f, 0x00000007, - 0x22c9, 0xffffffff, 0x00ffffff, - 0x22c4, 0x0000ff0f, 0x00000000, - 0xa293, 0x07ffffff, 0x4e000000, - 0xa0d4, 0x3f3f3fff, 0x2a00126a, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x00200000, 0x50100000, + mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011, + mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2, + mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000, + mmPA_CL_ENHANCE, 0xf000001f, 0x00000007, + mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000, + mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a, 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, - 0x2440, 0x07ffffff, 0x03000000, - 0x2418, 0x0000007f, 0x00000020, - 0x2542, 0x00010000, 0x00010000, - 0x2b05, 0x000003ff, 0x000000f7, - 0x2b04, 0xffffffff, 0x00000000, - 0x2b03, 0xffffffff, 0x32761054, - 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400, - 0x052c, 0x0fffffff, 0xffffffff, - 0x052d, 0x0fffffff, 0x0fffffff, - 0x052e, 0x0fffffff, 0x0fffffff, - 0x052f, 0x0fffffff, 0x0fffffff + mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000, + mmSX_DEBUG_1, 0x0000007f, 0x00000020, + mmTA_CNTL_AUX, 0x00010000, 0x00010000, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054, + mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010, + mmVM_L2_CG, 0x000c0fc0, 0x000c0400, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; static const u32 pitcairn_golden_rlc_registers[] = { - 0x263e, 0xffffffff, 0x12011003, - 0x3109, 0xffffffff, 0x00601004, + mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003, + mmRLC_LB_PARAMS, 0xffffffff, 0x00601004, 0x311f, 0xffffffff, 0x10102020, 0x3122, 0xffffffff, 0x01000020, - 0x30c5, 0xffffffff, 0x00000800, - 0x30c3, 0xffffffff, 0x800000a4 + mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800, + mmRLC_LB_CNTL, 0xffffffff, 0x800000a4, }; static const u32 verde_pg_init[] = { - 0x0d4f, 0xffffffff, 0x40000, - 0x0d4e, 0xffffffff, 0x200010ff, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x7007, - 0x0d4e, 0xffffffff, 0x300010ff, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x400000, - 0x0d4e, 0xffffffff, 0x100010ff, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x120200, - 0x0d4e, 0xffffffff, 0x500010ff, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x1e1e16, - 0x0d4e, 0xffffffff, 0x600010ff, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x171f1e, - 0x0d4e, 0xffffffff, 0x700010ff, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4f, 0xffffffff, 0x0, - 0x0d4e, 0xffffffff, 0x9ff, - 0x0d40, 0xffffffff, 0x0, - 0x0d41, 0xffffffff, 0x10000800, - 0x0d41, 0xffffffff, 0xf, - 0x0d41, 0xffffffff, 0xf, - 0x0d40, 0xffffffff, 0x4, - 0x0d41, 0xffffffff, 0x1000051e, - 0x0d41, 0xffffffff, 0xffff, - 0x0d41, 0xffffffff, 0xffff, - 0x0d40, 0xffffffff, 0x8, - 0x0d41, 0xffffffff, 0x80500, - 0x0d40, 0xffffffff, 0x12, - 0x0d41, 0xffffffff, 0x9050c, - 0x0d40, 0xffffffff, 0x1d, - 0x0d41, 0xffffffff, 0xb052c, - 0x0d40, 0xffffffff, 0x2a, - 0x0d41, 0xffffffff, 0x1053e, - 0x0d40, 0xffffffff, 0x2d, - 0x0d41, 0xffffffff, 0x10546, - 0x0d40, 0xffffffff, 0x30, - 0x0d41, 0xffffffff, 0xa054e, - 0x0d40, 0xffffffff, 0x3c, - 0x0d41, 0xffffffff, 0x1055f, - 0x0d40, 0xffffffff, 0x3f, - 0x0d41, 0xffffffff, 0x10567, - 0x0d40, 0xffffffff, 0x42, - 0x0d41, 0xffffffff, 0x1056f, - 0x0d40, 0xffffffff, 0x45, - 0x0d41, 0xffffffff, 0x10572, - 0x0d40, 0xffffffff, 0x48, - 0x0d41, 0xffffffff, 0x20575, - 0x0d40, 0xffffffff, 0x4c, - 0x0d41, 0xffffffff, 0x190801, - 0x0d40, 0xffffffff, 0x67, - 0x0d41, 0xffffffff, 0x1082a, - 0x0d40, 0xffffffff, 0x6a, - 0x0d41, 0xffffffff, 0x1b082d, - 0x0d40, 0xffffffff, 0x87, - 0x0d41, 0xffffffff, 0x310851, - 0x0d40, 0xffffffff, 0xba, - 0x0d41, 0xffffffff, 0x891, - 0x0d40, 0xffffffff, 0xbc, - 0x0d41, 0xffffffff, 0x893, - 0x0d40, 0xffffffff, 0xbe, - 0x0d41, 0xffffffff, 0x20895, - 0x0d40, 0xffffffff, 0xc2, - 0x0d41, 0xffffffff, 0x20899, - 0x0d40, 0xffffffff, 0xc6, - 0x0d41, 0xffffffff, 0x2089d, - 0x0d40, 0xffffffff, 0xca, - 0x0d41, 0xffffffff, 0x8a1, - 0x0d40, 0xffffffff, 0xcc, - 0x0d41, 0xffffffff, 0x8a3, - 0x0d40, 0xffffffff, 0xce, - 0x0d41, 0xffffffff, 0x308a5, - 0x0d40, 0xffffffff, 0xd3, - 0x0d41, 0xffffffff, 0x6d08cd, - 0x0d40, 0xffffffff, 0x142, - 0x0d41, 0xffffffff, 0x2000095a, - 0x0d41, 0xffffffff, 0x1, - 0x0d40, 0xffffffff, 0x144, - 0x0d41, 0xffffffff, 0x301f095b, - 0x0d40, 0xffffffff, 0x165, - 0x0d41, 0xffffffff, 0xc094d, - 0x0d40, 0xffffffff, 0x173, - 0x0d41, 0xffffffff, 0xf096d, - 0x0d40, 0xffffffff, 0x184, - 0x0d41, 0xffffffff, 0x15097f, - 0x0d40, 0xffffffff, 0x19b, - 0x0d41, 0xffffffff, 0xc0998, - 0x0d40, 0xffffffff, 0x1a9, - 0x0d41, 0xffffffff, 0x409a7, - 0x0d40, 0xffffffff, 0x1af, - 0x0d41, 0xffffffff, 0xcdc, - 0x0d40, 0xffffffff, 0x1b1, - 0x0d41, 0xffffffff, 0x800, - 0x0d42, 0xffffffff, 0x6c9b2000, - 0x0d44, 0xfc00, 0x2000, - 0x0d51, 0xffffffff, 0xfc0, - 0x0a35, 0x00000100, 0x100 + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x40000, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x200010ff, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x7007, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x300010ff, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x400000, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x100010ff, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x120200, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x500010ff, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x1e1e16, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x600010ff, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x171f1e, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x700010ff, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0, + mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x9ff, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x0, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10000800, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1000051e, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x8, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x80500, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x12, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x9050c, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1d, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xb052c, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2a, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1053e, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2d, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10546, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x30, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xa054e, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3c, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1055f, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3f, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10567, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x42, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1056f, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x45, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10572, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x48, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20575, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4c, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x190801, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x67, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1082a, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x6a, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1b082d, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x87, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x310851, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xba, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x891, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbc, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x893, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbe, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20895, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc2, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20899, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc6, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2089d, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xca, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a1, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xcc, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a3, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xce, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x308a5, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xd3, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x6d08cd, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x142, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2000095a, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x144, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x301f095b, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x165, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc094d, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x173, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf096d, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x184, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x15097f, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x19b, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc0998, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1a9, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x409a7, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1af, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xcdc, + mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1b1, + mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x800, + mmGMCON_RENG_EXECUTE, 0xffffffff, 0x6c9b2000, + mmGMCON_MISC2, 0xfc00, 0x2000, + mmGMCON_MISC3, 0xffffffff, 0xfc0, + mmMC_PMG_AUTO_CFG, 0x00000100, 0x100, }; static const u32 verde_golden_rlc_registers[] = { - 0x263e, 0xffffffff, 0x02010002, - 0x3109, 0xffffffff, 0x033f1005, + mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002, + mmRLC_LB_PARAMS, 0xffffffff, 0x033f1005, 0x311f, 0xffffffff, 0x10808020, 0x3122, 0xffffffff, 0x00800008, - 0x30c5, 0xffffffff, 0x00001000, - 0x30c3, 0xffffffff, 0x80010014 + mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00001000, + mmRLC_LB_CNTL, 0xffffffff, 0x80010014, }; static const u32 verde_golden_registers[] = { - 0x17bc, 0x00000030, 0x00000011, - 0x2684, 0x00010000, 0x00018208, - 0x260c, 0xffffffff, 0x00000000, - 0x260d, 0xf00fffff, 0x00000400, - 0x260e, 0x0002021c, 0x00020200, - 0x031e, 0x00000080, 0x00000000, + mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, + mmCB_HW_CONTROL, 0x00010000, 0x00018208, + mmDB_DEBUG, 0xffffffff, 0x00000000, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDB_DEBUG3, 0x0002021c, 0x00020200, + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 0x340c, 0x000300c0, 0x00800040, 0x360c, 0x000300c0, 0x00800040, - 0x16ec, 0x000000f0, 0x00000070, - 0x16f0, 0x00200000, 0x50100000, - 0x1c0c, 0x31000311, 0x00000011, - 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0903, 0x000007ff, 0x00000000, - 0x2285, 0xf000001f, 0x00000007, - 0x22c9, 0xffffffff, 0x00ffffff, - 0x22c4, 0x0000ff0f, 0x00000000, - 0xa293, 0x07ffffff, 0x4e000000, - 0xa0d4, 0x3f3f3fff, 0x0000124a, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x00200000, 0x50100000, + mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011, + mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2, + mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000, + mmPA_CL_ENHANCE, 0xf000001f, 0x00000007, + mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000, + mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x0000124a, 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, - 0x2440, 0x07ffffff, 0x03000000, - 0x23a2, 0x01ff1f3f, 0x00000000, - 0x23a1, 0x01ff1f3f, 0x00000000, - 0x2418, 0x0000007f, 0x00000020, - 0x2542, 0x00010000, 0x00010000, - 0x2b05, 0x000003ff, 0x00000003, - 0x2b04, 0xffffffff, 0x00000000, - 0x2b03, 0xffffffff, 0x00001032, - 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400, - 0x052c, 0x0fffffff, 0xffffffff, - 0x052d, 0x0fffffff, 0x0fffffff, - 0x052e, 0x0fffffff, 0x0fffffff, - 0x052f, 0x0fffffff, 0x0fffffff + mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000, + mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000, + mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000, + mmSX_DEBUG_1, 0x0000007f, 0x00000020, + mmTA_CNTL_AUX, 0x00010000, 0x00010000, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x00000003, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001032, + mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010, + mmVM_L2_CG, 0x000c0fc0, 0x000c0400, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; static const u32 oland_golden_registers[] = { - 0x17bc, 0x00000030, 0x00000011, - 0x2684, 0x00010000, 0x00018208, - 0x260c, 0xffffffff, 0x00000000, - 0x260d, 0xf00fffff, 0x00000400, - 0x260e, 0x0002021c, 0x00020200, - 0x031e, 0x00000080, 0x00000000, + mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, + mmCB_HW_CONTROL, 0x00010000, 0x00018208, + mmDB_DEBUG, 0xffffffff, 0x00000000, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDB_DEBUG3, 0x0002021c, 0x00020200, + mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 0x340c, 0x000300c0, 0x00800040, 0x360c, 0x000300c0, 0x00800040, - 0x16ec, 0x000000f0, 0x00000070, - 0x16f0, 0x00200000, 0x50100000, - 0x1c0c, 0x31000311, 0x00000011, - 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0903, 0x000007ff, 0x00000000, - 0x2285, 0xf000001f, 0x00000007, - 0x22c9, 0xffffffff, 0x00ffffff, - 0x22c4, 0x0000ff0f, 0x00000000, - 0xa293, 0x07ffffff, 0x4e000000, - 0xa0d4, 0x3f3f3fff, 0x00000082, + mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, + mmFBC_MISC, 0x00200000, 0x50100000, + mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011, + mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2, + mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000, + mmPA_CL_ENHANCE, 0xf000001f, 0x00000007, + mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000, + mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000082, 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, - 0x2440, 0x07ffffff, 0x03000000, - 0x2418, 0x0000007f, 0x00000020, - 0x2542, 0x00010000, 0x00010000, - 0x2b05, 0x000003ff, 0x000000f3, - 0x2b04, 0xffffffff, 0x00000000, - 0x2b03, 0xffffffff, 0x00003210, - 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400, - 0x052c, 0x0fffffff, 0xffffffff, - 0x052d, 0x0fffffff, 0x0fffffff, - 0x052e, 0x0fffffff, 0x0fffffff, - 0x052f, 0x0fffffff, 0x0fffffff + mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000, + mmSX_DEBUG_1, 0x0000007f, 0x00000020, + mmTA_CNTL_AUX, 0x00010000, 0x00010000, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210, + mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010, + mmVM_L2_CG, 0x000c0fc0, 0x000c0400, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, + }; static const u32 oland_golden_rlc_registers[] = { - 0x263e, 0xffffffff, 0x02010002, - 0x3109, 0xffffffff, 0x00601005, + mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002, + mmRLC_LB_PARAMS, 0xffffffff, 0x00601005, 0x311f, 0xffffffff, 0x10104040, 0x3122, 0xffffffff, 0x0100000a, - 0x30c5, 0xffffffff, 0x00000800, - 0x30c3, 0xffffffff, 0x800000f4 + mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800, + mmRLC_LB_CNTL, 0xffffffff, 0x800000f4, }; static const u32 hainan_golden_registers[] = { 0x17bc, 0x00000030, 0x00000011, - 0x2684, 0x00010000, 0x00018208, - 0x260c, 0xffffffff, 0x00000000, - 0x260d, 0xf00fffff, 0x00000400, - 0x260e, 0x0002021c, 0x00020200, + mmCB_HW_CONTROL, 0x00010000, 0x00018208, + mmDB_DEBUG, 0xffffffff, 0x00000000, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmDB_DEBUG3, 0x0002021c, 0x00020200, 0x031e, 0x00000080, 0x00000000, 0x3430, 0xff000fff, 0x00000100, 0x340c, 0x000300c0, 0x00800040, @@ -379,63 +385,63 @@ static const u32 hainan_golden_registers[] = 0x16ec, 0x000000f0, 0x00000070, 0x16f0, 0x00200000, 0x50100000, 0x1c0c, 0x31000311, 0x00000011, - 0x0ab9, 0x00073ffe, 0x000022a2, - 0x0903, 0x000007ff, 0x00000000, - 0x2285, 0xf000001f, 0x00000007, - 0x22c9, 0xffffffff, 0x00ffffff, - 0x22c4, 0x0000ff0f, 0x00000000, - 0xa293, 0x07ffffff, 0x4e000000, - 0xa0d4, 0x3f3f3fff, 0x00000000, + mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2, + mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000, + mmPA_CL_ENHANCE, 0xf000001f, 0x00000007, + mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000, + mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000000, 0x000c, 0xffffffff, 0x0040, 0x000d, 0x00000040, 0x00004040, - 0x2440, 0x03e00000, 0x03600000, - 0x2418, 0x0000007f, 0x00000020, - 0x2542, 0x00010000, 0x00010000, - 0x2b05, 0x000003ff, 0x000000f1, - 0x2b04, 0xffffffff, 0x00000000, - 0x2b03, 0xffffffff, 0x00003210, - 0x2235, 0x0000001f, 0x00000010, - 0x0570, 0x000c0fc0, 0x000c0400, - 0x052c, 0x0fffffff, 0xffffffff, - 0x052d, 0x0fffffff, 0x0fffffff, - 0x052e, 0x0fffffff, 0x0fffffff, - 0x052f, 0x0fffffff, 0x0fffffff + mmSPI_CONFIG_CNTL, 0x03e00000, 0x03600000, + mmSX_DEBUG_1, 0x0000007f, 0x00000020, + mmTA_CNTL_AUX, 0x00010000, 0x00010000, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210, + mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010, + mmVM_L2_CG, 0x000c0fc0, 0x000c0400, + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, }; static const u32 hainan_golden_registers2[] = { - 0x263e, 0xffffffff, 0x2011003 + mmGB_ADDR_CONFIG, 0xffffffff, 0x2011003, }; static const u32 tahiti_mgcg_cgcg_init[] = { - 0x3100, 0xffffffff, 0xfffffffc, - 0x200b, 0xffffffff, 0xe0000000, - 0x2698, 0xffffffff, 0x00000100, - 0x24a9, 0xffffffff, 0x00000100, - 0x3059, 0xffffffff, 0x00000100, - 0x25dd, 0xffffffff, 0x00000100, - 0x2261, 0xffffffff, 0x06000100, - 0x2286, 0xffffffff, 0x00000100, - 0x24a8, 0xffffffff, 0x00000100, - 0x30e0, 0xffffffff, 0x00000100, - 0x22ca, 0xffffffff, 0x00000100, - 0x2451, 0xffffffff, 0x00000100, - 0x2362, 0xffffffff, 0x00000100, - 0x2363, 0xffffffff, 0x00000100, - 0x240c, 0xffffffff, 0x00000100, - 0x240d, 0xffffffff, 0x00000100, - 0x240e, 0xffffffff, 0x00000100, - 0x240f, 0xffffffff, 0x00000100, - 0x2b60, 0xffffffff, 0x00000100, - 0x2b15, 0xffffffff, 0x00000100, - 0x225f, 0xffffffff, 0x06000100, - 0x261a, 0xffffffff, 0x00000100, - 0x2544, 0xffffffff, 0x00000100, - 0x2bc1, 0xffffffff, 0x00000100, - 0x2b81, 0xffffffff, 0x00000100, - 0x2527, 0xffffffff, 0x00000100, - 0x200b, 0xffffffff, 0xe0000000, + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 0x2458, 0xffffffff, 0x00010000, 0x2459, 0xffffffff, 0x00030002, 0x245a, 0xffffffff, 0x00040007, @@ -516,55 +522,55 @@ static const u32 tahiti_mgcg_cgcg_init[] = 0x24a5, 0xffffffff, 0x00000015, 0x24a6, 0xffffffff, 0x00140013, 0x24a7, 0xffffffff, 0x00170016, - 0x2454, 0xffffffff, 0x96940200, - 0x21c2, 0xffffffff, 0x00900100, - 0x311e, 0xffffffff, 0x00000080, - 0x3101, 0xffffffff, 0x0020003f, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 0x000c, 0xffffffff, 0x0000001c, 0x000d, 0x000f0000, 0x000f0000, 0x0583, 0xffffffff, 0x00000100, - 0x0409, 0xffffffff, 0x00000100, - 0x040b, 0x00000101, 0x00000000, - 0x082a, 0xffffffff, 0x00000104, - 0x0993, 0x000c0000, 0x000c0000, - 0x0992, 0x000c0000, 0x000c0000, - 0x1579, 0xff000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000, + mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0x0bd4, 0x00000001, 0x00000001, - 0x0c33, 0xc0000fff, 0x00000104, - 0x3079, 0x00000001, 0x00000001, + mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, - 0x3630, 0xfffffff0, 0x00000100 + 0x3630, 0xfffffff0, 0x00000100, }; static const u32 pitcairn_mgcg_cgcg_init[] = { - 0x3100, 0xffffffff, 0xfffffffc, - 0x200b, 0xffffffff, 0xe0000000, - 0x2698, 0xffffffff, 0x00000100, - 0x24a9, 0xffffffff, 0x00000100, - 0x3059, 0xffffffff, 0x00000100, - 0x25dd, 0xffffffff, 0x00000100, - 0x2261, 0xffffffff, 0x06000100, - 0x2286, 0xffffffff, 0x00000100, - 0x24a8, 0xffffffff, 0x00000100, - 0x30e0, 0xffffffff, 0x00000100, - 0x22ca, 0xffffffff, 0x00000100, - 0x2451, 0xffffffff, 0x00000100, - 0x2362, 0xffffffff, 0x00000100, - 0x2363, 0xffffffff, 0x00000100, - 0x240c, 0xffffffff, 0x00000100, - 0x240d, 0xffffffff, 0x00000100, - 0x240e, 0xffffffff, 0x00000100, - 0x240f, 0xffffffff, 0x00000100, - 0x2b60, 0xffffffff, 0x00000100, - 0x2b15, 0xffffffff, 0x00000100, - 0x225f, 0xffffffff, 0x06000100, - 0x261a, 0xffffffff, 0x00000100, - 0x2544, 0xffffffff, 0x00000100, - 0x2bc1, 0xffffffff, 0x00000100, - 0x2b81, 0xffffffff, 0x00000100, - 0x2527, 0xffffffff, 0x00000100, - 0x200b, 0xffffffff, 0xe0000000, + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 0x2458, 0xffffffff, 0x00010000, 0x2459, 0xffffffff, 0x00030002, 0x245a, 0xffffffff, 0x00040007, @@ -615,53 +621,54 @@ static const u32 pitcairn_mgcg_cgcg_init[] = 0x2496, 0xffffffff, 0x00100013, 0x2497, 0xffffffff, 0x00120011, 0x2498, 0xffffffff, 0x00150014, - 0x2454, 0xffffffff, 0x96940200, - 0x21c2, 0xffffffff, 0x00900100, - 0x311e, 0xffffffff, 0x00000080, - 0x3101, 0xffffffff, 0x0020003f, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 0x000c, 0xffffffff, 0x0000001c, 0x000d, 0x000f0000, 0x000f0000, 0x0583, 0xffffffff, 0x00000100, - 0x0409, 0xffffffff, 0x00000100, - 0x040b, 0x00000101, 0x00000000, - 0x082a, 0xffffffff, 0x00000104, - 0x1579, 0xff000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0x0bd4, 0x00000001, 0x00000001, - 0x0c33, 0xc0000fff, 0x00000104, - 0x3079, 0x00000001, 0x00000001, + mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, - 0x3630, 0xfffffff0, 0x00000100 + 0x3630, 0xfffffff0, 0x00000100, }; + static const u32 verde_mgcg_cgcg_init[] = { - 0x3100, 0xffffffff, 0xfffffffc, - 0x200b, 0xffffffff, 0xe0000000, - 0x2698, 0xffffffff, 0x00000100, - 0x24a9, 0xffffffff, 0x00000100, - 0x3059, 0xffffffff, 0x00000100, - 0x25dd, 0xffffffff, 0x00000100, - 0x2261, 0xffffffff, 0x06000100, - 0x2286, 0xffffffff, 0x00000100, - 0x24a8, 0xffffffff, 0x00000100, - 0x30e0, 0xffffffff, 0x00000100, - 0x22ca, 0xffffffff, 0x00000100, - 0x2451, 0xffffffff, 0x00000100, - 0x2362, 0xffffffff, 0x00000100, - 0x2363, 0xffffffff, 0x00000100, - 0x240c, 0xffffffff, 0x00000100, - 0x240d, 0xffffffff, 0x00000100, - 0x240e, 0xffffffff, 0x00000100, - 0x240f, 0xffffffff, 0x00000100, - 0x2b60, 0xffffffff, 0x00000100, - 0x2b15, 0xffffffff, 0x00000100, - 0x225f, 0xffffffff, 0x06000100, - 0x261a, 0xffffffff, 0x00000100, - 0x2544, 0xffffffff, 0x00000100, - 0x2bc1, 0xffffffff, 0x00000100, - 0x2b81, 0xffffffff, 0x00000100, - 0x2527, 0xffffffff, 0x00000100, - 0x200b, 0xffffffff, 0xe0000000, + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 0x2458, 0xffffffff, 0x00010000, 0x2459, 0xffffffff, 0x00030002, 0x245a, 0xffffffff, 0x00040007, @@ -712,55 +719,56 @@ static const u32 verde_mgcg_cgcg_init[] = 0x2496, 0xffffffff, 0x00100013, 0x2497, 0xffffffff, 0x00120011, 0x2498, 0xffffffff, 0x00150014, - 0x2454, 0xffffffff, 0x96940200, - 0x21c2, 0xffffffff, 0x00900100, - 0x311e, 0xffffffff, 0x00000080, - 0x3101, 0xffffffff, 0x0020003f, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 0x000c, 0xffffffff, 0x0000001c, 0x000d, 0x000f0000, 0x000f0000, 0x0583, 0xffffffff, 0x00000100, - 0x0409, 0xffffffff, 0x00000100, - 0x040b, 0x00000101, 0x00000000, - 0x082a, 0xffffffff, 0x00000104, - 0x0993, 0x000c0000, 0x000c0000, - 0x0992, 0x000c0000, 0x000c0000, - 0x1579, 0xff000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000, + mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0x0bd4, 0x00000001, 0x00000001, - 0x0c33, 0xc0000fff, 0x00000104, - 0x3079, 0x00000001, 0x00000001, + mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, - 0x3630, 0xfffffff0, 0x00000100 + 0x3630, 0xfffffff0, 0x00000100, }; + static const u32 oland_mgcg_cgcg_init[] = { - 0x3100, 0xffffffff, 0xfffffffc, - 0x200b, 0xffffffff, 0xe0000000, - 0x2698, 0xffffffff, 0x00000100, - 0x24a9, 0xffffffff, 0x00000100, - 0x3059, 0xffffffff, 0x00000100, - 0x25dd, 0xffffffff, 0x00000100, - 0x2261, 0xffffffff, 0x06000100, - 0x2286, 0xffffffff, 0x00000100, - 0x24a8, 0xffffffff, 0x00000100, - 0x30e0, 0xffffffff, 0x00000100, - 0x22ca, 0xffffffff, 0x00000100, - 0x2451, 0xffffffff, 0x00000100, - 0x2362, 0xffffffff, 0x00000100, - 0x2363, 0xffffffff, 0x00000100, - 0x240c, 0xffffffff, 0x00000100, - 0x240d, 0xffffffff, 0x00000100, - 0x240e, 0xffffffff, 0x00000100, - 0x240f, 0xffffffff, 0x00000100, - 0x2b60, 0xffffffff, 0x00000100, - 0x2b15, 0xffffffff, 0x00000100, - 0x225f, 0xffffffff, 0x06000100, - 0x261a, 0xffffffff, 0x00000100, - 0x2544, 0xffffffff, 0x00000100, - 0x2bc1, 0xffffffff, 0x00000100, - 0x2b81, 0xffffffff, 0x00000100, - 0x2527, 0xffffffff, 0x00000100, - 0x200b, 0xffffffff, 0xe0000000, + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 0x2458, 0xffffffff, 0x00010000, 0x2459, 0xffffffff, 0x00030002, 0x245a, 0xffffffff, 0x00040007, @@ -791,55 +799,56 @@ static const u32 oland_mgcg_cgcg_init[] = 0x2473, 0xffffffff, 0x0000000b, 0x2474, 0xffffffff, 0x000a0009, 0x2475, 0xffffffff, 0x000d000c, - 0x2454, 0xffffffff, 0x96940200, - 0x21c2, 0xffffffff, 0x00900100, - 0x311e, 0xffffffff, 0x00000080, - 0x3101, 0xffffffff, 0x0020003f, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 0x000c, 0xffffffff, 0x0000001c, 0x000d, 0x000f0000, 0x000f0000, 0x0583, 0xffffffff, 0x00000100, - 0x0409, 0xffffffff, 0x00000100, - 0x040b, 0x00000101, 0x00000000, - 0x082a, 0xffffffff, 0x00000104, - 0x0993, 0x000c0000, 0x000c0000, - 0x0992, 0x000c0000, 0x000c0000, - 0x1579, 0xff000fff, 0x00000100, + mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, + mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000, + mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000, + mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 0x157a, 0x00000001, 0x00000001, - 0x0bd4, 0x00000001, 0x00000001, - 0x0c33, 0xc0000fff, 0x00000104, - 0x3079, 0x00000001, 0x00000001, + mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, - 0x3630, 0xfffffff0, 0x00000100 + 0x3630, 0xfffffff0, 0x00000100, }; + static const u32 hainan_mgcg_cgcg_init[] = { - 0x3100, 0xffffffff, 0xfffffffc, - 0x200b, 0xffffffff, 0xe0000000, - 0x2698, 0xffffffff, 0x00000100, - 0x24a9, 0xffffffff, 0x00000100, - 0x3059, 0xffffffff, 0x00000100, - 0x25dd, 0xffffffff, 0x00000100, - 0x2261, 0xffffffff, 0x06000100, - 0x2286, 0xffffffff, 0x00000100, - 0x24a8, 0xffffffff, 0x00000100, - 0x30e0, 0xffffffff, 0x00000100, - 0x22ca, 0xffffffff, 0x00000100, - 0x2451, 0xffffffff, 0x00000100, - 0x2362, 0xffffffff, 0x00000100, - 0x2363, 0xffffffff, 0x00000100, - 0x240c, 0xffffffff, 0x00000100, - 0x240d, 0xffffffff, 0x00000100, - 0x240e, 0xffffffff, 0x00000100, - 0x240f, 0xffffffff, 0x00000100, - 0x2b60, 0xffffffff, 0x00000100, - 0x2b15, 0xffffffff, 0x00000100, - 0x225f, 0xffffffff, 0x06000100, - 0x261a, 0xffffffff, 0x00000100, - 0x2544, 0xffffffff, 0x00000100, - 0x2bc1, 0xffffffff, 0x00000100, - 0x2b81, 0xffffffff, 0x00000100, - 0x2527, 0xffffffff, 0x00000100, - 0x200b, 0xffffffff, 0xe0000000, + mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, + mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, + mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, + mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, + mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, + mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, + mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, + mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, + mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, 0x2458, 0xffffffff, 0x00010000, 0x2459, 0xffffffff, 0x00030002, 0x245a, 0xffffffff, 0x00040007, @@ -870,22 +879,22 @@ static const u32 hainan_mgcg_cgcg_init[] = 0x2473, 0xffffffff, 0x0000000b, 0x2474, 0xffffffff, 0x000a0009, 0x2475, 0xffffffff, 0x000d000c, - 0x2454, 0xffffffff, 0x96940200, - 0x21c2, 0xffffffff, 0x00900100, - 0x311e, 0xffffffff, 0x00000080, - 0x3101, 0xffffffff, 0x0020003f, + mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, + mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, + mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080, + mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, 0x000c, 0xffffffff, 0x0000001c, 0x000d, 0x000f0000, 0x000f0000, 0x0583, 0xffffffff, 0x00000100, 0x0409, 0xffffffff, 0x00000100, - 0x082a, 0xffffffff, 0x00000104, - 0x0993, 0x000c0000, 0x000c0000, - 0x0992, 0x000c0000, 0x000c0000, - 0x0bd4, 0x00000001, 0x00000001, - 0x0c33, 0xc0000fff, 0x00000104, - 0x3079, 0x00000001, 0x00000001, + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104, + mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000, + mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000, + mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001, + mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, + mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 0x3430, 0xfffffff0, 0x00000100, - 0x3630, 0xfffffff0, 0x00000100 + 0x3630, 0xfffffff0, 0x00000100, }; static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg) @@ -1129,13 +1138,12 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) static void si_detect_hw_virtualization(struct amdgpu_device *adev) { if (is_virtual_machine()) /* passthrough mode */ - adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, - .detect_hw_virtualization = si_detect_hw_virtualization, .read_register = &si_read_register, .reset = &si_asic_reset, .set_vga_state = &si_vga_set_state, @@ -1852,6 +1860,8 @@ static const struct amdgpu_ip_block_version si_common_ip_block = int si_set_ip_blocks(struct amdgpu_device *adev) { + si_detect_hw_virtualization(adev); + switch (adev->asic_type) { case CHIP_VERDE: case CHIP_TAHITI: diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 3dd552ae0b59..3372a071bb85 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -24,7 +24,7 @@ #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_trace.h" -#include "si/sid.h" +#include "sid.h" const u32 sdma_offsets[SDMA_MAX_INSTANCE] = { @@ -301,7 +301,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 6c65a1a2de79..f55e45b52fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -26,7 +26,7 @@ #include "amdgpu_pm.h" #include "amdgpu_dpm.h" #include "amdgpu_atombios.h" -#include "si/sid.h" +#include "sid.h" #include "r600_dpm.h" #include "si_dpm.h" #include "atom.h" @@ -56,7 +56,6 @@ #define BIOS_SCRATCH_4 0x5cd MODULE_FIRMWARE("radeon/tahiti_smc.bin"); -MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); MODULE_FIRMWARE("radeon/verde_smc.bin"); @@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); union power_info { struct _ATOM_POWERPLAY_INFO info; @@ -3009,29 +3009,6 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev) return ret; } -struct si_dpm_quirk { - u32 chip_vendor; - u32 chip_device; - u32 subsys_vendor; - u32 subsys_device; - u32 max_sclk; - u32 max_mclk; -}; - -/* cards with dpm stability problems */ -static struct si_dpm_quirk si_dpm_quirk_list[] = { - /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ - { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, - { 0, 0, 0, 0 }, -}; - static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, u16 vce_voltage) { @@ -3477,42 +3454,8 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk = 0, max_mclk = 0; int i; - struct si_dpm_quirk *p = si_dpm_quirk_list; - /* limit all SI kickers */ - if (adev->asic_type == CHIP_PITCAIRN) { - if ((adev->pdev->revision == 0x81) || - (adev->pdev->device == 0x6810) || - (adev->pdev->device == 0x6811) || - (adev->pdev->device == 0x6816) || - (adev->pdev->device == 0x6817) || - (adev->pdev->device == 0x6806)) - max_mclk = 120000; - } else if (adev->asic_type == CHIP_VERDE) { - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6820) || - (adev->pdev->device == 0x6821) || - (adev->pdev->device == 0x6822) || - (adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682A) || - (adev->pdev->device == 0x682B)) { - max_sclk = 75000; - max_mclk = 80000; - } - } else if (adev->asic_type == CHIP_OLAND) { - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) { - max_sclk = 75000; - max_mclk = 80000; - } - } else if (adev->asic_type == CHIP_HAINAN) { + if (adev->asic_type == CHIP_HAINAN) { if ((adev->pdev->revision == 0x81) || (adev->pdev->revision == 0x83) || (adev->pdev->revision == 0xC3) || @@ -3520,20 +3463,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, (adev->pdev->device == 0x6665) || (adev->pdev->device == 0x6667)) { max_sclk = 75000; - max_mclk = 80000; - } - } - /* Apply dpm quirks */ - while (p && p->chip_device != 0) { - if (adev->pdev->vendor == p->chip_vendor && - adev->pdev->device == p->chip_device && - adev->pdev->subsystem_vendor == p->subsys_vendor && - adev->pdev->subsystem_device == p->subsys_device) { - max_sclk = p->max_sclk; - max_mclk = p->max_mclk; - break; } - ++p; } if (rps->vce_active) { @@ -3931,25 +3861,25 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad } static int si_dpm_force_performance_level(struct amdgpu_device *adev, - enum amdgpu_dpm_forced_level level) + enum amd_dpm_forced_level level) { struct amdgpu_ps *rps = adev->pm.dpm.current_ps; struct si_ps *ps = si_get_ps(rps); u32 levels = ps->performance_level_count; - if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { + if (level == AMD_DPM_FORCED_LEVEL_HIGH) { if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) return -EINVAL; if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) return -EINVAL; - } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { + } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) return -EINVAL; - } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { + } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; @@ -7687,50 +7617,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) chip_name = "tahiti"; break; case CHIP_PITCAIRN: - if ((adev->pdev->revision == 0x81) || - (adev->pdev->device == 0x6810) || - (adev->pdev->device == 0x6811) || - (adev->pdev->device == 0x6816) || - (adev->pdev->device == 0x6817) || - (adev->pdev->device == 0x6806)) + if ((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6810) || + (adev->pdev->device == 0x6811))) chip_name = "pitcairn_k"; else chip_name = "pitcairn"; break; case CHIP_VERDE: - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6820) || - (adev->pdev->device == 0x6821) || - (adev->pdev->device == 0x6822) || - (adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682A) || - (adev->pdev->device == 0x682B)) + if (((adev->pdev->device == 0x6820) && + ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83))) || + ((adev->pdev->device == 0x6821) && + ((adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87))) || + ((adev->pdev->revision == 0x87) && + ((adev->pdev->device == 0x6823) || + (adev->pdev->device == 0x682b)))) chip_name = "verde_k"; else chip_name = "verde"; break; case CHIP_OLAND: - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) + if (((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6600) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605) || + (adev->pdev->device == 0x6610))) || + ((adev->pdev->revision == 0x83) && + (adev->pdev->device == 0x6610))) chip_name = "oland_k"; else chip_name = "oland"; break; case CHIP_HAINAN: - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0xC3) || - (adev->pdev->device == 0x6664) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)) + if (((adev->pdev->revision == 0x81) && + (adev->pdev->device == 0x6660)) || + ((adev->pdev->revision == 0x83) && + ((adev->pdev->device == 0x6660) || + (adev->pdev->device == 0x6663) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)))) chip_name = "hainan_k"; + else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) + chip_name = "banks_k_2"; else chip_name = "hainan"; break; @@ -7770,7 +7701,7 @@ static int si_dpm_sw_init(void *handle) /* default to balanced state */ adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; + adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; adev->pm.default_sclk = adev->clock.default_sclk; adev->pm.default_mclk = adev->clock.default_mclk; adev->pm.current_sclk = adev->clock.default_sclk; @@ -8096,11 +8027,3 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; } -const struct amdgpu_ip_block_version si_dpm_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 6, - .minor = 0, - .rev = 0, - .funcs = &si_dpm_ip_funcs, -}; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index db0f36846661..81f90800ba73 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -23,7 +23,7 @@ #include "drmP.h" #include "amdgpu.h" #include "amdgpu_ih.h" -#include "si/sid.h" +#include "sid.h" #include "si_ih.h" static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c index 668ba99d6c05..0726bc3b6f90 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c @@ -25,7 +25,7 @@ #include <linux/firmware.h> #include "drmP.h" #include "amdgpu.h" -#include "si/sid.h" +#include "sid.h" #include "ppsmc.h" #include "amdgpu_ucode.h" #include "sislands_smc.h" diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h index c57eff159374..c57eff159374 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h +++ b/drivers/gpu/drm/amd/amdgpu/sid.h diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h deleted file mode 100644 index 880152c0f775..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU_UCODE_XFER_VI_H -#define SMU_UCODE_XFER_VI_H - -#define SMU_DRAMData_TOC_VERSION 1 -#define MAX_IH_REGISTER_COUNT 65535 -#define SMU_DIGEST_SIZE_BYTES 20 -#define SMU_FB_SIZE_BYTES 1048576 -#define SMU_MAX_ENTRIES 12 - -#define UCODE_ID_SMU 0 -#define UCODE_ID_SDMA0 1 -#define UCODE_ID_SDMA1 2 -#define UCODE_ID_CP_CE 3 -#define UCODE_ID_CP_PFP 4 -#define UCODE_ID_CP_ME 5 -#define UCODE_ID_CP_MEC 6 -#define UCODE_ID_CP_MEC_JT1 7 -#define UCODE_ID_CP_MEC_JT2 8 -#define UCODE_ID_GMCON_RENG 9 -#define UCODE_ID_RLC_G 10 -#define UCODE_ID_IH_REG_RESTORE 11 -#define UCODE_ID_VBIOS 12 -#define UCODE_ID_MISC_METADATA 13 -#define UCODE_ID_SMU_SK 14 -#define UCODE_ID_RLC_SCRATCH 32 -#define UCODE_ID_RLC_SRM_ARAM 33 -#define UCODE_ID_RLC_SRM_DRAM 34 -#define UCODE_ID_MEC_STORAGE 35 -#define UCODE_ID_VBIOS_PARAMETERS 36 -#define UCODE_META_DATA 0xFF - -#define UCODE_ID_SMU_MASK 0x00000001 -#define UCODE_ID_SDMA0_MASK 0x00000002 -#define UCODE_ID_SDMA1_MASK 0x00000004 -#define UCODE_ID_CP_CE_MASK 0x00000008 -#define UCODE_ID_CP_PFP_MASK 0x00000010 -#define UCODE_ID_CP_ME_MASK 0x00000020 -#define UCODE_ID_CP_MEC_MASK 0x00000040 -#define UCODE_ID_CP_MEC_JT1_MASK 0x00000080 -#define UCODE_ID_CP_MEC_JT2_MASK 0x00000100 -#define UCODE_ID_GMCON_RENG_MASK 0x00000200 -#define UCODE_ID_RLC_G_MASK 0x00000400 -#define UCODE_ID_IH_REG_RESTORE_MASK 0x00000800 -#define UCODE_ID_VBIOS_MASK 0x00001000 - -#define UCODE_FLAG_UNHALT_MASK 0x1 - -struct SMU_Entry { -#ifndef __BIG_ENDIAN - uint16_t id; - uint16_t version; - uint32_t image_addr_high; - uint32_t image_addr_low; - uint32_t meta_data_addr_high; - uint32_t meta_data_addr_low; - uint32_t data_size_byte; - uint16_t flags; - uint16_t num_register_entries; -#else - uint16_t version; - uint16_t id; - uint32_t image_addr_high; - uint32_t image_addr_low; - uint32_t meta_data_addr_high; - uint32_t meta_data_addr_low; - uint32_t data_size_byte; - uint16_t num_register_entries; - uint16_t flags; -#endif -}; - -struct SMU_DRAMData_TOC { - uint32_t structure_version; - uint32_t num_entries; - struct SMU_Entry entry[SMU_MAX_ENTRIES]; -}; - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 96444e4d862a..7fb9137dd89b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -40,13 +40,14 @@ #include "smu/smu_7_0_1_sh_mask.h" static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); -static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); static int uvd_v4_2_start(struct amdgpu_device *adev); static void uvd_v4_2_stop(struct amdgpu_device *adev); static int uvd_v4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state); +static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, + bool sw_mode); /** * uvd_v4_2_ring_get_rptr - get read pointer * @@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle) return r; } - +static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, + bool enable); /** * uvd_v4_2_hw_init - start and test UVD block * @@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle) uint32_t tmp; int r; - uvd_v4_2_init_cg(adev); - uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); + uvd_v4_2_enable_mgcg(adev, true); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); r = uvd_v4_2_start(adev); if (r) @@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz; int i, j, r; - /* disable byte swapping */ u32 lmi_swap_cntl = 0; u32 mp_swap_cntl = 0; + WREG32(mmUVD_CGC_GATE, 0); + uvd_v4_2_set_dcm(adev, true); + uvd_v4_2_mc_resume(adev); /* disable interupt */ @@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + uvd_v4_2_set_dcm(adev, false); } /** @@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); } -static void uvd_v4_2_init_cg(struct amdgpu_device *adev) -{ - bool hw_mode = true; - - if (hw_mode) { - uvd_v4_2_set_dcm(adev, false); - } else { - u32 tmp = RREG32(mmUVD_CGC_CTRL); - tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, tmp); - } -} - static bool uvd_v4_2_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, static int uvd_v4_2_set_clockgating_state(void *handle, enum amd_clockgating_state state) { - bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - - if (state == AMD_CG_STATE_GATE) - gate = true; - - uvd_v4_2_enable_mgcg(adev, gate); - return 0; } @@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index a79e283590fb..9b49824233ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - static int curstate = -1; if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; - if (curstate == state) - return 0; - - curstate = state; if (enable) { /* wait for STATUS to clear */ if (uvd_v5_0_wait_for_idle(handle)) @@ -827,16 +822,44 @@ static int uvd_v5_0_set_powergating_state(void *handle, * the smc and the hw blocks */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); - return 0; + adev->uvd.is_powergated = true; } else { - return uvd_v5_0_start(adev); + ret = uvd_v5_0_start(adev); + if (ret) + goto out; + adev->uvd.is_powergated = false; + } + +out: + return ret; +} + +static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + mutex_lock(&adev->pm.mutex); + + if (adev->uvd.is_powergated) { + DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); + goto out; } + + /* AMD_CG_SUPPORT_UVD_MGCG */ + data = RREG32(mmUVD_CGC_CTRL); + if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) + *flags |= AMD_CG_SUPPORT_UVD_MGCG; + +out: + mutex_unlock(&adev->pm.mutex); } static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { @@ -854,6 +877,7 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { .soft_reset = uvd_v5_0_soft_reset, .set_clockgating_state = uvd_v5_0_set_clockgating_state, .set_powergating_state = uvd_v5_0_set_powergating_state, + .get_clockgating_state = uvd_v5_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ba0bbf7138dc..de7e03544d00 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1047,6 +1047,7 @@ static int uvd_v6_0_set_powergating_state(void *handle, * the smc and the hw blocks */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; @@ -1055,10 +1056,37 @@ static int uvd_v6_0_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); - return 0; + adev->uvd.is_powergated = true; } else { - return uvd_v6_0_start(adev); + ret = uvd_v6_0_start(adev); + if (ret) + goto out; + adev->uvd.is_powergated = false; + } + +out: + return ret; +} + +static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + mutex_lock(&adev->pm.mutex); + + if (adev->uvd.is_powergated) { + DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); + goto out; } + + /* AMD_CG_SUPPORT_UVD_MGCG */ + data = RREG32(mmUVD_CGC_CTRL); + if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) + *flags |= AMD_CG_SUPPORT_UVD_MGCG; + +out: + mutex_unlock(&adev->pm.mutex); } static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { @@ -1079,6 +1107,7 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .post_soft_reset = uvd_v6_0_post_soft_reset, .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, + .get_clockgating_state = uvd_v6_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6b3293a1c7b8..8db26559fd1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,9 +43,13 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 + #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 + #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) @@ -54,6 +58,9 @@ #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) +#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ + | GRBM_GFX_INDEX__VCE_ALL_PIPE) + static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); @@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, WREG32(mmVCE_UENC_CLOCK_GATING_2, data); data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - data &= ~0xffc00000; + data &= ~0x3ff; WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); @@ -223,6 +230,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) struct amdgpu_ring *ring; int idx, r; + vce_v3_0_override_vce_clock_gating(adev, true); + if (!(adev->flags & AMD_IS_APU)) + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + ring = &adev->vce.ring[0]; WREG32(mmVCE_RB_RPTR, ring->wptr); WREG32(mmVCE_RB_WPTR, ring->wptr); @@ -249,7 +260,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); vce_v3_0_mc_resume(adev, idx); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); @@ -273,7 +284,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -288,7 +299,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); @@ -306,7 +317,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -320,11 +331,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) { u32 tmp; - /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ + /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */ if ((adev->asic_type == CHIP_FIJI) || (adev->asic_type == CHIP_STONEY) || (adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_POLARIS11)) + (adev->asic_type == CHIP_POLARIS11) || + (adev->asic_type == CHIP_POLARIS12)) return AMDGPU_VCE_HARVEST_VCE1; /* Tonga and CZ are dual or single pipe */ @@ -585,17 +597,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) * VCE team suggest use bit 3--bit 6 for busy status check */ mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { @@ -700,18 +712,6 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static void vce_v3_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -719,11 +719,6 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; - if ((adev->asic_type == CHIP_POLARIS10) || - (adev->asic_type == CHIP_TONGA) || - (adev->asic_type == CHIP_FIJI)) - vce_v3_0_set_bypass_mode(adev, enable); - if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; @@ -733,7 +728,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -752,7 +747,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -769,15 +764,46 @@ static int vce_v3_0_set_powergating_state(void *handle, * the smc and the hw blocks */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) return 0; - if (state == AMD_PG_STATE_GATE) + if (state == AMD_PG_STATE_GATE) { + adev->vce.is_powergated = true; /* XXX do we need a vce_v3_0_stop()? */ - return 0; - else - return vce_v3_0_start(adev); + } else { + ret = vce_v3_0_start(adev); + if (ret) + goto out; + adev->vce.is_powergated = false; + } + +out: + return ret; +} + +static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + mutex_lock(&adev->pm.mutex); + + if (adev->vce.is_powergated) { + DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); + goto out; + } + + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + + /* AMD_CG_SUPPORT_VCE_MGCG */ + data = RREG32(mmVCE_CLOCK_GATING_A); + if (data & (0x04 << 4)) + *flags |= AMD_CG_SUPPORT_VCE_MGCG; + +out: + mutex_unlock(&adev->pm.mutex); } static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, @@ -831,6 +857,7 @@ static const struct amd_ip_funcs vce_v3_0_ip_funcs = { .post_soft_reset = vce_v3_0_post_soft_reset, .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, + .get_clockgating_state = vce_v3_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index bf088d6d9bf1..4922fff08c3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -20,9 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#include <linux/firmware.h> #include <linux/slab.h> -#include <linux/module.h> #include "drmP.h" #include "amdgpu.h" #include "amdgpu_atombios.h" @@ -78,16 +76,7 @@ #include "amdgpu_acp.h" #endif #include "dce_virtual.h" - -MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); -MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); -MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); -MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); -MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); +#include "mxgpu_vi.h" /* * Indirect registers accessor @@ -284,6 +273,12 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) /* Some of the registers might be dependent on GRBM_GFX_INDEX */ mutex_lock(&adev->grbm_idx_mutex); + if (amdgpu_sriov_vf(adev)) { + xgpu_vi_init_golden_registers(adev); + mutex_unlock(&adev->grbm_idx_mutex); + return; + } + switch (adev->asic_type) { case CHIP_TOPAZ: amdgpu_program_register_sequence(adev, @@ -312,6 +307,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: default: break; } @@ -456,14 +452,14 @@ static void vi_detect_hw_virtualization(struct amdgpu_device *adev) /* bit0: 0 means pf and 1 means vf */ /* bit31: 0 means disable IOV and 1 means enable */ if (reg & 1) - adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; + adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; if (reg & 0x80000000) - adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; if (reg == 0) { if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ - adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } } @@ -671,6 +667,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, case CHIP_TONGA: case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: case CHIP_CARRIZO: case CHIP_STONEY: asic_register_table = cz_allowed_read_registers; @@ -798,7 +795,37 @@ static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) { - /* todo */ + int r, i; + struct atom_clock_dividers dividers; + u32 tmp; + + r = amdgpu_atombios_get_clock_dividers(adev, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + ecclk, false, ÷rs); + if (r) + return r; + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; + + tmp = RREG32_SMC(ixCG_ECLK_CNTL); + tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | + CG_ECLK_CNTL__ECLK_DIVIDER_MASK); + tmp |= dividers.post_divider; + WREG32_SMC(ixCG_ECLK_CNTL, tmp); + + for (i = 0; i < 100; i++) { + if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + break; + mdelay(10); + } + if (i == 100) + return -ETIMEDOUT; return 0; } @@ -866,7 +893,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, .read_bios_from_rom = &vi_read_bios_from_rom, - .detect_hw_virtualization = vi_detect_hw_virtualization, .read_register = &vi_read_register, .reset = &vi_asic_reset, .set_vga_state = &vi_vga_set_state, @@ -902,6 +928,11 @@ static int vi_common_early_init(void *handle) (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) smc_enabled = true; + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_setting(adev); + xgpu_vi_mailbox_set_irq_funcs(adev); + } + adev->rev_id = vi_get_rev_id(adev); adev->external_rev_id = 0xFF; switch (adev->asic_type) { @@ -994,6 +1025,11 @@ static int vi_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x50; break; + case CHIP_POLARIS12: + adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x64; + break; case CHIP_CARRIZO: adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_GFX_MGCG | @@ -1053,10 +1089,6 @@ static int vi_common_early_init(void *handle) return -EINVAL; } - /* in early init stage, vbios code won't work */ - if (adev->asic_funcs->detect_hw_virtualization) - amdgpu_asic_detect_hw_virtualization(adev); - if (amdgpu_smc_load_fw && smc_enabled) adev->firmware.smu_load = true; @@ -1065,8 +1097,23 @@ static int vi_common_early_init(void *handle) return 0; } +static int vi_common_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_vi_mailbox_get_irq(adev); + + return 0; +} + static int vi_common_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_vi_mailbox_add_irq_id(adev); + return 0; } @@ -1098,6 +1145,9 @@ static int vi_common_hw_fini(void *handle) /* enable the doorbell aperture */ vi_enable_doorbell_aperture(adev, false); + if (amdgpu_sriov_vf(adev)) + xgpu_vi_mailbox_put_irq(adev); + return 0; } @@ -1182,6 +1232,23 @@ static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, WREG32(mmHDP_MEM_POWER_LS, data); } +static void vi_update_drm_light_sleep(struct amdgpu_device *adev, + bool enable) +{ + uint32_t temp, data; + + temp = data = RREG32(0x157a); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) + data |= 1; + else + data &= ~1; + + if (temp != data) + WREG32(0x157a, data); +} + + static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable) { @@ -1342,10 +1409,13 @@ static int vi_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE ? true : false); vi_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); + vi_update_drm_light_sleep(adev, + state == AMD_CG_STATE_GATE ? true : false); break; case CHIP_TONGA: case CHIP_POLARIS10: case CHIP_POLARIS11: + case CHIP_POLARIS12: vi_common_set_clockgating_state_by_smu(adev, state); default: break; @@ -1359,10 +1429,36 @@ static int vi_common_set_powergating_state(void *handle, return 0; } +static void vi_common_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + /* AMD_CG_SUPPORT_BIF_LS */ + data = RREG32_PCIE(ixPCIE_CNTL2); + if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_BIF_LS; + + /* AMD_CG_SUPPORT_HDP_LS */ + data = RREG32(mmHDP_MEM_POWER_LS); + if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) + *flags |= AMD_CG_SUPPORT_HDP_LS; + + /* AMD_CG_SUPPORT_HDP_MGCG */ + data = RREG32(mmHDP_HOST_PATH_CNTL); + if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) + *flags |= AMD_CG_SUPPORT_HDP_MGCG; + + /* AMD_CG_SUPPORT_ROM_MGCG */ + data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + static const struct amd_ip_funcs vi_common_ip_funcs = { .name = "vi_common", .early_init = vi_common_early_init, - .late_init = NULL, + .late_init = vi_common_late_init, .sw_init = vi_common_sw_init, .sw_fini = vi_common_sw_fini, .hw_init = vi_common_hw_init, @@ -1374,6 +1470,7 @@ static const struct amd_ip_funcs vi_common_ip_funcs = { .soft_reset = vi_common_soft_reset, .set_clockgating_state = vi_common_set_clockgating_state, .set_powergating_state = vi_common_set_powergating_state, + .get_clockgating_state = vi_common_get_clockgating_state, }; static const struct amdgpu_ip_block_version vi_common_ip_block = @@ -1387,6 +1484,12 @@ static const struct amdgpu_ip_block_version vi_common_ip_block = int vi_set_ip_blocks(struct amdgpu_device *adev) { + /* in early init stage, vbios code won't work */ + vi_detect_hw_virtualization(adev); + + if (amdgpu_sriov_vf(adev)) + adev->virt.ops = &xgpu_vi_virt_ops; + switch (adev->asic_type) { case CHIP_TOPAZ: /* topaz has no DCE, UVD, VCE */ @@ -1404,31 +1507,36 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); amdgpu_ip_block_add(adev, &tonga_ih_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); - if (adev->enable_virtual_display) + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_ip_block_add(adev, &dce_virtual_ip_block); else amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); - amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); + if (!amdgpu_sriov_vf(adev)) { + amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); + amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); + } break; case CHIP_TONGA: amdgpu_ip_block_add(adev, &vi_common_ip_block); amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); amdgpu_ip_block_add(adev, &tonga_ih_ip_block); amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); - if (adev->enable_virtual_display) + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_ip_block_add(adev, &dce_virtual_ip_block); else amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); - amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); + if (!amdgpu_sriov_vf(adev)) { + amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); + amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); + } break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: amdgpu_ip_block_add(adev, &vi_common_ip_block); amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); amdgpu_ip_block_add(adev, &tonga_ih_ip_block); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 575d7aed5d32..719587b8b0cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -28,4 +28,116 @@ void vi_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int vi_set_ip_blocks(struct amdgpu_device *adev); +struct amdgpu_ce_ib_state +{ + uint32_t ce_ib_completion_status; + uint32_t ce_constegnine_count; + uint32_t ce_ibOffset_ib1; + uint32_t ce_ibOffset_ib2; +}; /* Total of 4 DWORD */ + +struct amdgpu_de_ib_state +{ + uint32_t ib_completion_status; + uint32_t de_constEngine_count; + uint32_t ib_offset_ib1; + uint32_t ib_offset_ib2; + uint32_t preamble_begin_ib1; + uint32_t preamble_begin_ib2; + uint32_t preamble_end_ib1; + uint32_t preamble_end_ib2; + uint32_t draw_indirect_baseLo; + uint32_t draw_indirect_baseHi; + uint32_t disp_indirect_baseLo; + uint32_t disp_indirect_baseHi; + uint32_t gds_backup_addrlo; + uint32_t gds_backup_addrhi; + uint32_t index_base_addrlo; + uint32_t index_base_addrhi; + uint32_t sample_cntl; +}; /* Total of 17 DWORD */ + +struct amdgpu_ce_ib_state_chained_ib +{ + /* section of non chained ib part */ + uint32_t ce_ib_completion_status; + uint32_t ce_constegnine_count; + uint32_t ce_ibOffset_ib1; + uint32_t ce_ibOffset_ib2; + + /* section of chained ib */ + uint32_t ce_chainib_addrlo_ib1; + uint32_t ce_chainib_addrlo_ib2; + uint32_t ce_chainib_addrhi_ib1; + uint32_t ce_chainib_addrhi_ib2; + uint32_t ce_chainib_size_ib1; + uint32_t ce_chainib_size_ib2; +}; /* total 10 DWORD */ + +struct amdgpu_de_ib_state_chained_ib +{ + /* section of non chained ib part */ + uint32_t ib_completion_status; + uint32_t de_constEngine_count; + uint32_t ib_offset_ib1; + uint32_t ib_offset_ib2; + + /* section of chained ib */ + uint32_t chain_ib_addrlo_ib1; + uint32_t chain_ib_addrlo_ib2; + uint32_t chain_ib_addrhi_ib1; + uint32_t chain_ib_addrhi_ib2; + uint32_t chain_ib_size_ib1; + uint32_t chain_ib_size_ib2; + + /* section of non chained ib part */ + uint32_t preamble_begin_ib1; + uint32_t preamble_begin_ib2; + uint32_t preamble_end_ib1; + uint32_t preamble_end_ib2; + + /* section of chained ib */ + uint32_t chain_ib_pream_addrlo_ib1; + uint32_t chain_ib_pream_addrlo_ib2; + uint32_t chain_ib_pream_addrhi_ib1; + uint32_t chain_ib_pream_addrhi_ib2; + + /* section of non chained ib part */ + uint32_t draw_indirect_baseLo; + uint32_t draw_indirect_baseHi; + uint32_t disp_indirect_baseLo; + uint32_t disp_indirect_baseHi; + uint32_t gds_backup_addrlo; + uint32_t gds_backup_addrhi; + uint32_t index_base_addrlo; + uint32_t index_base_addrhi; + uint32_t sample_cntl; +}; /* Total of 27 DWORD */ + +struct amdgpu_gfx_meta_data +{ + /* 4 DWORD, address must be 4KB aligned */ + struct amdgpu_ce_ib_state ce_payload; + uint32_t reserved1[60]; + /* 17 DWORD, address must be 64B aligned */ + struct amdgpu_de_ib_state de_payload; + /* PFP IB base address which get pre-empted */ + uint32_t DeIbBaseAddrLo; + uint32_t DeIbBaseAddrHi; + uint32_t reserved2[941]; +}; /* Total of 4K Bytes */ + +struct amdgpu_gfx_meta_data_chained_ib +{ + /* 10 DWORD, address must be 4KB aligned */ + struct amdgpu_ce_ib_state_chained_ib ce_payload; + uint32_t reserved1[54]; + /* 27 DWORD, address must be 64B aligned */ + struct amdgpu_de_ib_state_chained_ib de_payload; + /* PFP IB base address which get pre-empted */ + uint32_t DeIbBaseAddrLo; + uint32_t DeIbBaseAddrHi; + uint32_t reserved2[931]; +}; /* Total of 4K Bytes */ + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h index fc120ba18aad..c43e03fddfba 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h @@ -29,8 +29,4 @@ int cz_smu_init(struct amdgpu_device *adev); int cz_smu_start(struct amdgpu_device *adev); int cz_smu_fini(struct amdgpu_device *adev); -extern const struct amd_ip_funcs tonga_dpm_ip_funcs; -extern const struct amd_ip_funcs fiji_dpm_ip_funcs; -extern const struct amd_ip_funcs iceland_dpm_ip_funcs; - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 11746f22d0c5..7a3863a45f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -360,6 +360,8 @@ #define PACKET3_WAIT_ON_CE_COUNTER 0x86 #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 #define PACKET3_SWITCH_BUFFER 0x8B +#define PACKET3_SET_RESOURCES 0xA0 +#define PACKET3_MAP_QUEUES 0xA2 #define VCE_CMD_NO_OP 0x00000000 #define VCE_CMD_END 0x00000001 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ee3e04e10dae..6316aad43a73 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -486,7 +486,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, return status; } -static int kfd_ioctl_dbg_unrgesiter(struct file *filep, +static int kfd_ioctl_dbg_unregister(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_dbg_unregister_args *args = data; @@ -498,7 +498,7 @@ static int kfd_ioctl_dbg_unrgesiter(struct file *filep, return -EINVAL; if (dev->device_info->asic_family == CHIP_CARRIZO) { - pr_debug("kfd_ioctl_dbg_unrgesiter not supported on CZ\n"); + pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); return -EINVAL; } @@ -892,7 +892,7 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { kfd_ioctl_dbg_register, 0), AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER, - kfd_ioctl_dbg_unrgesiter, 0), + kfd_ioctl_dbg_unregister, 0), AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH, kfd_ioctl_dbg_address_watch, 0), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index a6a4b2b1c0d9..6a3470f84998 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p, struct kfd_event_data event_data; if (copy_from_user(&event_data, &events[i], - sizeof(struct kfd_event_data))) + sizeof(struct kfd_event_data))) { + ret = -EFAULT; goto fail; + } ret = init_event_waiter(p, &event_waiters[i], event_data.event_id, i); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index c02469ada9f1..43f45adeccd1 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -23,7 +23,7 @@ #ifndef __AMD_SHARED_H__ #define __AMD_SHARED_H__ -#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */ /* * Supported ASIC types @@ -46,6 +46,7 @@ enum amd_asic_type { CHIP_STONEY, CHIP_POLARIS10, CHIP_POLARIS11, + CHIP_POLARIS12, CHIP_LAST, }; @@ -79,6 +80,18 @@ enum amd_clockgating_state { AMD_CG_STATE_UNGATE, }; +enum amd_dpm_forced_level { + AMD_DPM_FORCED_LEVEL_AUTO = 0x1, + AMD_DPM_FORCED_LEVEL_MANUAL = 0x2, + AMD_DPM_FORCED_LEVEL_LOW = 0x4, + AMD_DPM_FORCED_LEVEL_HIGH = 0x8, + AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD = 0x10, + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK = 0x20, + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK = 0x40, + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK = 0x80, + AMD_DPM_FORCED_LEVEL_PROFILE_EXIT = 0x100, +}; + enum amd_powergating_state { AMD_PG_STATE_GATE = 0, AMD_PG_STATE_UNGATE, @@ -205,6 +218,8 @@ struct amd_ip_funcs { /* enable/disable pg for the IP block */ int (*set_powergating_state)(void *handle, enum amd_powergating_state state); + /* get current clockgating status */ + void (*get_clockgating_state)(void *handle, u32 *flags); }; #endif /* __AMD_SHARED_H__ */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h index 95570dbd18bb..813957a17a2d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_d.h @@ -4552,6 +4552,14 @@ #define mmDP4_DP_DPHY_PRBS_CNTL 0x4eb5 #define mmDP5_DP_DPHY_PRBS_CNTL 0x4fb5 #define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5 +#define mmDP_DPHY_SCRAM_CNTL 0x4ab6 +#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6 +#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6 +#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6 +#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6 +#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6 +#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6 +#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6 #define mmDP_DPHY_CRC_EN 0x4ab7 #define mmDP0_DP_DPHY_CRC_EN 0x4ab7 #define mmDP1_DP_DPHY_CRC_EN 0x4bb7 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h index 8a75eb9d732b..c755f43aaaf8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h @@ -8690,6 +8690,10 @@ #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8 #define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1 #define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0 #define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h index c39234ecedd0..6df651a94b0a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h @@ -4544,6 +4544,15 @@ #define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5 #define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5 #define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5 +#define mmDP_DPHY_SCRAM_CNTL 0x4ab6 +#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6 +#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6 +#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6 +#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6 +#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6 +#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6 +#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6 +#define mmDP8_DP_DPHY_SCRAM_CNTL 0x56b6 #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h index a438c2b6e280..14a3bacfcfd1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h @@ -6004,6 +6004,8 @@ #define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc #define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1 #define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0 +#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x2 +#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1 #define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4 #define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2 #define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8 @@ -8364,6 +8366,10 @@ #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8 #define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff #define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0 #define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h index 09a7df17570d..367b191d49fb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h @@ -5776,6 +5776,15 @@ #define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5 #define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5 #define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5 +#define mmDP_DPHY_SCRAM_CNTL 0x4ab6 +#define mmDP0_DP_DPHY_SCRAM_CNTL 0x4ab6 +#define mmDP1_DP_DPHY_SCRAM_CNTL 0x4bb6 +#define mmDP2_DP_DPHY_SCRAM_CNTL 0x4cb6 +#define mmDP3_DP_DPHY_SCRAM_CNTL 0x4db6 +#define mmDP4_DP_DPHY_SCRAM_CNTL 0x4eb6 +#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4fb6 +#define mmDP6_DP_DPHY_SCRAM_CNTL 0x54b6 +#define mmDP8_DP_DPHY_SCRAM_CNTL 0x56b6 #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h index 1ddc4183a1c9..106094ed0661 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h @@ -7088,6 +7088,8 @@ #define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc #define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1 #define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0 +#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x2 +#define HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1 #define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4 #define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2 #define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8 @@ -9626,6 +9628,10 @@ #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8 #define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff #define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0 #define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h index d3ccf5a86de0..93d84a475134 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_d.h @@ -3920,6 +3920,14 @@ #define mmDP4_DP_DPHY_PRBS_CNTL 0x48d4 #define mmDP5_DP_DPHY_PRBS_CNTL 0x4bd4 #define mmDP6_DP_DPHY_PRBS_CNTL 0x4ed4 +#define mmDP_DPHY_SCRAM_CNTL 0x1cd5 +#define mmDP0_DP_DPHY_SCRAM_CNTL 0x1cd5 +#define mmDP1_DP_DPHY_SCRAM_CNTL 0x1fd5 +#define mmDP2_DP_DPHY_SCRAM_CNTL 0x42d5 +#define mmDP3_DP_DPHY_SCRAM_CNTL 0x45d5 +#define mmDP4_DP_DPHY_SCRAM_CNTL 0x48d5 +#define mmDP5_DP_DPHY_SCRAM_CNTL 0x4bd5 +#define mmDP6_DP_DPHY_SCRAM_CNTL 0x4ed5 #define mmDP_DPHY_CRC_EN 0x1cd6 #define mmDP0_DP_DPHY_CRC_EN 0x1cd6 #define mmDP1_DP_DPHY_CRC_EN 0x1fd6 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h index c331c9fe7b81..9b6825b74cc1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h @@ -9214,6 +9214,10 @@ #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00 #define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x10 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x3ff00 +#define DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8 #define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1 #define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0 #define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10 diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h b/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h deleted file mode 100644 index 895c8e2353e3..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/si/si_reg.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2010 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ -#ifndef __SI_REG_H__ -#define __SI_REG_H__ - -/* SI */ -#define SI_DC_GPIO_HPD_MASK 0x196c -#define SI_DC_GPIO_HPD_A 0x196d -#define SI_DC_GPIO_HPD_EN 0x196e -#define SI_DC_GPIO_HPD_Y 0x196f - -#define SI_GRPH_CONTROL 0x1a01 -# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0) -# define SI_GRPH_DEPTH_8BPP 0 -# define SI_GRPH_DEPTH_16BPP 1 -# define SI_GRPH_DEPTH_32BPP 2 -# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) -# define SI_ADDR_SURF_2_BANK 0 -# define SI_ADDR_SURF_4_BANK 1 -# define SI_ADDR_SURF_8_BANK 2 -# define SI_ADDR_SURF_16_BANK 3 -# define SI_GRPH_Z(x) (((x) & 0x3) << 4) -# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) -# define SI_ADDR_SURF_BANK_WIDTH_1 0 -# define SI_ADDR_SURF_BANK_WIDTH_2 1 -# define SI_ADDR_SURF_BANK_WIDTH_4 2 -# define SI_ADDR_SURF_BANK_WIDTH_8 3 -# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8) -/* 8 BPP */ -# define SI_GRPH_FORMAT_INDEXED 0 -/* 16 BPP */ -# define SI_GRPH_FORMAT_ARGB1555 0 -# define SI_GRPH_FORMAT_ARGB565 1 -# define SI_GRPH_FORMAT_ARGB4444 2 -# define SI_GRPH_FORMAT_AI88 3 -# define SI_GRPH_FORMAT_MONO16 4 -# define SI_GRPH_FORMAT_BGRA5551 5 -/* 32 BPP */ -# define SI_GRPH_FORMAT_ARGB8888 0 -# define SI_GRPH_FORMAT_ARGB2101010 1 -# define SI_GRPH_FORMAT_32BPP_DIG 2 -# define SI_GRPH_FORMAT_8B_ARGB2101010 3 -# define SI_GRPH_FORMAT_BGRA1010102 4 -# define SI_GRPH_FORMAT_8B_BGRA1010102 5 -# define SI_GRPH_FORMAT_RGB111110 6 -# define SI_GRPH_FORMAT_BGR101111 7 -# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) -# define SI_ADDR_SURF_BANK_HEIGHT_1 0 -# define SI_ADDR_SURF_BANK_HEIGHT_2 1 -# define SI_ADDR_SURF_BANK_HEIGHT_4 2 -# define SI_ADDR_SURF_BANK_HEIGHT_8 3 -# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) -# define SI_ADDR_SURF_TILE_SPLIT_64B 0 -# define SI_ADDR_SURF_TILE_SPLIT_128B 1 -# define SI_ADDR_SURF_TILE_SPLIT_256B 2 -# define SI_ADDR_SURF_TILE_SPLIT_512B 3 -# define SI_ADDR_SURF_TILE_SPLIT_1KB 4 -# define SI_ADDR_SURF_TILE_SPLIT_2KB 5 -# define SI_ADDR_SURF_TILE_SPLIT_4KB 6 -# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) -# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0 -# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1 -# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2 -# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3 -# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) -# define SI_GRPH_ARRAY_LINEAR_GENERAL 0 -# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1 -# define SI_GRPH_ARRAY_1D_TILED_THIN1 2 -# define SI_GRPH_ARRAY_2D_TILED_THIN1 4 -# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24) -# define SI_ADDR_SURF_P2 0 -# define SI_ADDR_SURF_P4_8x16 4 -# define SI_ADDR_SURF_P4_16x16 5 -# define SI_ADDR_SURF_P4_16x32 6 -# define SI_ADDR_SURF_P4_32x32 7 -# define SI_ADDR_SURF_P8_16x16_8x16 8 -# define SI_ADDR_SURF_P8_16x32_8x16 9 -# define SI_ADDR_SURF_P8_32x32_8x16 10 -# define SI_ADDR_SURF_P8_16x32_16x16 11 -# define SI_ADDR_SURF_P8_32x32_16x16 12 -# define SI_ADDR_SURF_P8_32x32_16x32 13 -# define SI_ADDR_SURF_P8_32x64_32x32 14 - -#endif diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index e4a1697ec1d3..1d26ae768147 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -622,6 +622,8 @@ typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device, typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); +typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en); + struct cgs_ops { /* memory management calls (similar to KFD interface) */ cgs_gpu_mem_info_t gpu_mem_info; @@ -674,6 +676,7 @@ struct cgs_ops { /* get system info */ cgs_query_system_info query_system_info; cgs_is_virtualization_enabled_t is_virtualization_enabled; + cgs_enter_safe_mode enter_safe_mode; }; struct cgs_os_ops; /* To be define in OS-specific CGS header */ @@ -779,4 +782,8 @@ struct cgs_device #define cgs_is_virtualization_enabled(cgs_device) \ CGS_CALL(is_virtualization_enabled, cgs_device) + +#define cgs_enter_safe_mode(cgs_device, en) \ + CGS_CALL(enter_safe_mode, cgs_device, en) + #endif /* _CGS_COMMON_H */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index c81cf1412728..429f18b99323 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -20,6 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/gfp.h> @@ -29,153 +30,154 @@ #include "pp_instance.h" #include "power_state.h" #include "eventmanager.h" -#include "pp_debug.h" -#define PP_CHECK(handle) \ - do { \ - if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \ - return -EINVAL; \ - } while (0) +static inline int pp_check(struct pp_instance *handle) +{ + if (handle == NULL || handle->pp_valid != PP_VALID) + return -EINVAL; -#define PP_CHECK_HW(hwmgr) \ - do { \ - if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \ - return 0; \ - } while (0) + if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL) + return -EINVAL; + + if (handle->pm_en == 0) + return PP_DPM_DISABLED; + + if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL + || handle->eventmgr == NULL) + return PP_DPM_DISABLED; + + return 0; +} static int pp_early_init(void *handle) { + int ret; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + + ret = smum_early_init(pp_handle); + if (ret) + return ret; + + if ((pp_handle->pm_en == 0) + || cgs_is_virtualization_enabled(pp_handle->device)) + return PP_DPM_DISABLED; + + ret = hwmgr_early_init(pp_handle); + if (ret) { + pp_handle->pm_en = 0; + return PP_DPM_DISABLED; + } + + ret = eventmgr_early_init(pp_handle); + if (ret) { + kfree(pp_handle->hwmgr); + pp_handle->hwmgr = NULL; + pp_handle->pm_en = 0; + return PP_DPM_DISABLED; + } + return 0; } static int pp_sw_init(void *handle) { - struct pp_instance *pp_handle; - struct pp_hwmgr *hwmgr; + struct pp_smumgr *smumgr; int ret = 0; + struct pp_instance *pp_handle = (struct pp_instance *)handle; - if (handle == NULL) - return -EINVAL; - - pp_handle = (struct pp_instance *)handle; - hwmgr = pp_handle->hwmgr; - - PP_CHECK_HW(hwmgr); - - if (hwmgr->pptable_func == NULL || - hwmgr->pptable_func->pptable_init == NULL || - hwmgr->hwmgr_func->backend_init == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - ret = hwmgr->pptable_func->pptable_init(hwmgr); - if (ret) - goto err; + if (ret == 0 || ret == PP_DPM_DISABLED) { + smumgr = pp_handle->smu_mgr; - ret = hwmgr->hwmgr_func->backend_init(hwmgr); - if (ret) - goto err1; + if (smumgr->smumgr_funcs->smu_init == NULL) + return -EINVAL; - pr_info("amdgpu: powerplay initialized\n"); + ret = smumgr->smumgr_funcs->smu_init(smumgr); - return 0; -err1: - if (hwmgr->pptable_func->pptable_fini) - hwmgr->pptable_func->pptable_fini(hwmgr); -err: - pr_err("amdgpu: powerplay initialization failed\n"); + pr_info("amdgpu: powerplay sw initialized\n"); + } return ret; } static int pp_sw_fini(void *handle) { - struct pp_instance *pp_handle; - struct pp_hwmgr *hwmgr; + struct pp_smumgr *smumgr; int ret = 0; + struct pp_instance *pp_handle = (struct pp_instance *)handle; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); + if (ret == 0 || ret == PP_DPM_DISABLED) { + smumgr = pp_handle->smu_mgr; - pp_handle = (struct pp_instance *)handle; - hwmgr = pp_handle->hwmgr; - - PP_CHECK_HW(hwmgr); - - if (hwmgr->hwmgr_func->backend_fini != NULL) - ret = hwmgr->hwmgr_func->backend_fini(hwmgr); - - if (hwmgr->pptable_func->pptable_fini) - hwmgr->pptable_func->pptable_fini(hwmgr); + if (smumgr->smumgr_funcs->smu_fini == NULL) + return -EINVAL; + ret = smumgr->smumgr_funcs->smu_fini(smumgr); + } return ret; } static int pp_hw_init(void *handle) { - struct pp_instance *pp_handle; struct pp_smumgr *smumgr; struct pp_eventmgr *eventmgr; - struct pp_hwmgr *hwmgr; int ret = 0; + struct pp_instance *pp_handle = (struct pp_instance *)handle; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - pp_handle = (struct pp_instance *)handle; - smumgr = pp_handle->smu_mgr; - hwmgr = pp_handle->hwmgr; + if (ret == 0 || ret == PP_DPM_DISABLED) { + smumgr = pp_handle->smu_mgr; - if (smumgr == NULL || smumgr->smumgr_funcs == NULL || - smumgr->smumgr_funcs->smu_init == NULL || - smumgr->smumgr_funcs->start_smu == NULL) - return -EINVAL; - - ret = smumgr->smumgr_funcs->smu_init(smumgr); - if (ret) { - printk(KERN_ERR "[ powerplay ] smc initialization failed\n"); - return ret; - } + if (smumgr->smumgr_funcs->start_smu == NULL) + return -EINVAL; - ret = smumgr->smumgr_funcs->start_smu(smumgr); - if (ret) { - printk(KERN_ERR "[ powerplay ] smc start failed\n"); - smumgr->smumgr_funcs->smu_fini(smumgr); - return ret; + if(smumgr->smumgr_funcs->start_smu(smumgr)) { + pr_err("smc start failed\n"); + smumgr->smumgr_funcs->smu_fini(smumgr); + return -EINVAL;; + } + if (ret == PP_DPM_DISABLED) + return PP_DPM_DISABLED; } - PP_CHECK_HW(hwmgr); - - hw_init_power_state_table(hwmgr); + ret = hwmgr_hw_init(pp_handle); + if (ret) + goto err; eventmgr = pp_handle->eventmgr; - if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL) - return -EINVAL; + if (eventmgr->pp_eventmgr_init == NULL || + eventmgr->pp_eventmgr_init(eventmgr)) + goto err; - ret = eventmgr->pp_eventmgr_init(eventmgr); return 0; +err: + pp_handle->pm_en = 0; + kfree(pp_handle->eventmgr); + kfree(pp_handle->hwmgr); + pp_handle->hwmgr = NULL; + pp_handle->eventmgr = NULL; + return PP_DPM_DISABLED; } static int pp_hw_fini(void *handle) { - struct pp_instance *pp_handle; - struct pp_smumgr *smumgr; struct pp_eventmgr *eventmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; - - pp_handle = (struct pp_instance *)handle; - eventmgr = pp_handle->eventmgr; + ret = pp_check(pp_handle); - if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL) - eventmgr->pp_eventmgr_fini(eventmgr); + if (ret == 0) { + eventmgr = pp_handle->eventmgr; - smumgr = pp_handle->smu_mgr; - - if (smumgr != NULL && smumgr->smumgr_funcs != NULL && - smumgr->smumgr_funcs->smu_fini != NULL) - smumgr->smumgr_funcs->smu_fini(smumgr); + if (eventmgr->pp_eventmgr_fini != NULL) + eventmgr->pp_eventmgr_fini(eventmgr); + hwmgr_hw_fini(pp_handle); + } return 0; } @@ -198,16 +200,18 @@ static int pp_sw_reset(void *handle) int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -218,16 +222,18 @@ static int pp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -238,49 +244,53 @@ static int pp_set_powergating_state(void *handle, static int pp_suspend(void *handle) { - struct pp_instance *pp_handle; struct pp_eventmgr *eventmgr; struct pem_event_data event_data = { {0} }; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); + + if (ret != 0) + return ret; - pp_handle = (struct pp_instance *)handle; eventmgr = pp_handle->eventmgr; + pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data); - if (eventmgr != NULL) - pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data); return 0; } static int pp_resume(void *handle) { - struct pp_instance *pp_handle; struct pp_eventmgr *eventmgr; struct pem_event_data event_data = { {0} }; struct pp_smumgr *smumgr; - int ret; + int ret, ret1; + struct pp_instance *pp_handle = (struct pp_instance *)handle; - if (handle == NULL) - return -EINVAL; + ret1 = pp_check(pp_handle); + + if (ret1 != 0 && ret1 != PP_DPM_DISABLED) + return ret1; - pp_handle = (struct pp_instance *)handle; smumgr = pp_handle->smu_mgr; - if (smumgr == NULL || smumgr->smumgr_funcs == NULL || - smumgr->smumgr_funcs->start_smu == NULL) + if (smumgr->smumgr_funcs->start_smu == NULL) return -EINVAL; ret = smumgr->smumgr_funcs->start_smu(smumgr); if (ret) { - printk(KERN_ERR "[ powerplay ] smc start failed\n"); + pr_err("smc start failed\n"); smumgr->smumgr_funcs->smu_fini(smumgr); return ret; } + if (ret1 == PP_DPM_DISABLED) + return ret1; + eventmgr = pp_handle->eventmgr; - if (eventmgr != NULL) - pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data); + + pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data); return 0; } @@ -315,20 +325,19 @@ static int pp_dpm_fw_loading_complete(void *handle) static int pp_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { - struct pp_instance *pp_handle; struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - pp_handle = (struct pp_instance *)handle; + if (ret != 0) + return ret; hwmgr = pp_handle->hwmgr; - PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->force_dpm_level == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -341,30 +350,34 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( void *handle) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; - return (((struct pp_instance *)handle)->hwmgr->dpm_level); + return hwmgr->dpm_level; } static int pp_dpm_get_sclk(void *handle, bool low) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_sclk == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -374,16 +387,18 @@ static int pp_dpm_get_sclk(void *handle, bool low) static int pp_dpm_get_mclk(void *handle, bool low) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_mclk == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -393,16 +408,18 @@ static int pp_dpm_get_mclk(void *handle, bool low) static int pp_dpm_powergate_vce(void *handle, bool gate) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->powergate_vce == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -412,16 +429,18 @@ static int pp_dpm_powergate_vce(void *handle, bool gate) static int pp_dpm_powergate_uvd(void *handle, bool gate) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->powergate_uvd == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -446,16 +465,13 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output) { int ret = 0; - struct pp_instance *pp_handle; struct pem_event_data data = { {0} }; + struct pp_instance *pp_handle = (struct pp_instance *)handle; - pp_handle = (struct pp_instance *)handle; + ret = pp_check(pp_handle); - if (pp_handle == NULL) - return -EINVAL; - - if (pp_handle->eventmgr == NULL) - return 0; + if (ret != 0) + return ret; switch (event_id) { case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE: @@ -489,13 +505,17 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) { struct pp_hwmgr *hwmgr; struct pp_power_state *state; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - if (hwmgr == NULL || hwmgr->current_ps == NULL) + hwmgr = pp_handle->hwmgr; + + if (hwmgr->current_ps == NULL) return -EINVAL; state = hwmgr->current_ps; @@ -518,16 +538,18 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -537,16 +559,18 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) static int pp_dpm_get_fan_control_mode(void *handle) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -556,16 +580,18 @@ static int pp_dpm_get_fan_control_mode(void *handle) static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -575,16 +601,18 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -594,13 +622,15 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) return -EINVAL; @@ -611,16 +641,18 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) static int pp_dpm_get_temperature(void *handle) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_temperature == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -632,13 +664,17 @@ static int pp_dpm_get_pp_num_states(void *handle, { struct pp_hwmgr *hwmgr; int i; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; + + hwmgr = pp_handle->hwmgr; - if (hwmgr == NULL || hwmgr->ps == NULL) + if (hwmgr->ps == NULL) return -EINVAL; data->nums = hwmgr->num_ps; @@ -670,13 +706,15 @@ static int pp_dpm_get_pp_num_states(void *handle, static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (!hwmgr->soft_pp_table) return -EINVAL; @@ -689,13 +727,15 @@ static int pp_dpm_get_pp_table(void *handle, char **table) static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (!hwmgr->hardcode_pp_table) { hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, @@ -717,16 +757,18 @@ static int pp_dpm_force_clock_level(void *handle, enum pp_clock_type type, uint32_t mask) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->force_clock_level == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -737,16 +779,18 @@ static int pp_dpm_print_clock_levels(void *handle, enum pp_clock_type type, char *buf) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->print_clock_levels == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); @@ -755,16 +799,18 @@ static int pp_dpm_print_clock_levels(void *handle, static int pp_dpm_get_sclk_od(void *handle) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_sclk_od == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -774,16 +820,18 @@ static int pp_dpm_get_sclk_od(void *handle) static int pp_dpm_set_sclk_od(void *handle, uint32_t value) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->set_sclk_od == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -793,16 +841,18 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) static int pp_dpm_get_mclk_od(void *handle) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->get_mclk_od == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -812,16 +862,18 @@ static int pp_dpm_get_mclk_od(void *handle) static int pp_dpm_set_mclk_od(void *handle, uint32_t value) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->set_mclk_od == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -831,16 +883,18 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (!handle) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; if (hwmgr->hwmgr_func->read_sensor == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); + pr_info("%s was not implemented.\n", __func__); return 0; } @@ -851,13 +905,18 @@ static struct amd_vce_state* pp_dpm_get_vce_clock_state(void *handle, unsigned idx) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - if (handle) { - hwmgr = ((struct pp_instance *)handle)->hwmgr; + ret = pp_check(pp_handle); - if (hwmgr && idx < hwmgr->num_vce_state_tables) - return &hwmgr->vce_states[idx]; - } + if (ret != 0) + return NULL; + + hwmgr = pp_handle->hwmgr; + + if (hwmgr && idx < hwmgr->num_vce_state_tables) + return &hwmgr->vce_states[idx]; return NULL; } @@ -892,89 +951,44 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .get_vce_clock_state = pp_dpm_get_vce_clock_state, }; -static int amd_pp_instance_init(struct amd_pp_init *pp_init, - struct amd_powerplay *amd_pp) +int amd_powerplay_create(struct amd_pp_init *pp_init, + void **handle) { - int ret; - struct pp_instance *handle; - - handle = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); - if (handle == NULL) - return -ENOMEM; - - handle->pp_valid = PP_VALID; - - ret = smum_init(pp_init, handle); - if (ret) - goto fail_smum; - - - amd_pp->pp_handle = handle; + struct pp_instance *instance; - if ((amdgpu_dpm == 0) - || cgs_is_virtualization_enabled(pp_init->device)) - return 0; + if (pp_init == NULL || handle == NULL) + return -EINVAL; - ret = hwmgr_init(pp_init, handle); - if (ret) - goto fail_hwmgr; + instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); + if (instance == NULL) + return -ENOMEM; - ret = eventmgr_init(handle); - if (ret) - goto fail_eventmgr; + instance->pp_valid = PP_VALID; + instance->chip_family = pp_init->chip_family; + instance->chip_id = pp_init->chip_id; + instance->pm_en = pp_init->pm_en; + instance->feature_mask = pp_init->feature_mask; + instance->device = pp_init->device; + *handle = instance; return 0; - -fail_eventmgr: - hwmgr_fini(handle->hwmgr); -fail_hwmgr: - smum_fini(handle->smu_mgr); -fail_smum: - kfree(handle); - return ret; } -static int amd_pp_instance_fini(void *handle) +int amd_powerplay_destroy(void *handle) { struct pp_instance *instance = (struct pp_instance *)handle; - if (instance == NULL) - return -EINVAL; - - if ((amdgpu_dpm != 0) - && !cgs_is_virtualization_enabled(instance->smu_mgr->device)) { - eventmgr_fini(instance->eventmgr); - hwmgr_fini(instance->hwmgr); + if (instance->pm_en) { + kfree(instance->eventmgr); + kfree(instance->hwmgr); + instance->hwmgr = NULL; + instance->eventmgr = NULL; } - smum_fini(instance->smu_mgr); - kfree(handle); - return 0; -} - -int amd_powerplay_init(struct amd_pp_init *pp_init, - struct amd_powerplay *amd_pp) -{ - int ret; - - if (pp_init == NULL || amd_pp == NULL) - return -EINVAL; - - ret = amd_pp_instance_init(pp_init, amd_pp); - - if (ret) - return ret; - - amd_pp->ip_funcs = &pp_ip_funcs; - amd_pp->pp_funcs = &pp_dpm_funcs; - - return 0; -} - -int amd_powerplay_fini(void *handle) -{ - amd_pp_instance_fini(handle); - + kfree(instance->smu_mgr); + instance->smu_mgr = NULL; + kfree(instance); + instance = NULL; return 0; } @@ -985,33 +999,25 @@ int amd_powerplay_reset(void *handle) struct pem_event_data event_data = { {0} }; int ret; - if (instance == NULL) - return -EINVAL; - - eventmgr = instance->eventmgr; - if (!eventmgr || !eventmgr->pp_eventmgr_fini) - return -EINVAL; - - eventmgr->pp_eventmgr_fini(eventmgr); + if (cgs_is_virtualization_enabled(instance->smu_mgr->device)) + return PP_DPM_DISABLED; - ret = pp_sw_fini(handle); - if (ret) + ret = pp_check(instance); + if (ret != 0) return ret; - kfree(instance->hwmgr->ps); - - ret = pp_sw_init(handle); + ret = pp_hw_fini(handle); if (ret) return ret; - if ((amdgpu_dpm == 0) - || cgs_is_virtualization_enabled(instance->smu_mgr->device)) - return 0; + ret = hwmgr_hw_init(instance); + if (ret) + return PP_DPM_DISABLED; - hw_init_power_state_table(instance->hwmgr); + eventmgr = instance->eventmgr; - if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL) - return -EINVAL; + if (eventmgr->pp_eventmgr_init == NULL) + return PP_DPM_DISABLED; ret = eventmgr->pp_eventmgr_init(eventmgr); if (ret) @@ -1026,12 +1032,15 @@ int amd_powerplay_display_configuration_change(void *handle, const struct amd_pp_display_configuration *display_config) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - PP_CHECK((struct pp_instance *)handle); + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; phm_store_dal_configuration_data(hwmgr, display_config); @@ -1042,15 +1051,18 @@ int amd_powerplay_get_display_power_level(void *handle, struct amd_pp_simple_clock_info *output) { struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - PP_CHECK((struct pp_instance *)handle); + ret = pp_check(pp_handle); - if (output == NULL) - return -EINVAL; + if (ret != 0) + return ret; - hwmgr = ((struct pp_instance *)handle)->hwmgr; + hwmgr = pp_handle->hwmgr; - PP_CHECK_HW(hwmgr); + if (output == NULL) + return -EINVAL; return phm_get_dal_power_level(hwmgr, output); } @@ -1058,18 +1070,18 @@ int amd_powerplay_get_display_power_level(void *handle, int amd_powerplay_get_current_clocks(void *handle, struct amd_pp_clock_info *clocks) { - struct pp_hwmgr *hwmgr; struct amd_pp_simple_clock_info simple_clocks; struct pp_clock_info hw_clocks; + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - PP_CHECK((struct pp_instance *)handle); - - if (clocks == NULL) - return -EINVAL; + ret = pp_check(pp_handle); - hwmgr = ((struct pp_instance *)handle)->hwmgr; + if (ret != 0) + return ret; - PP_CHECK_HW(hwmgr); + hwmgr = pp_handle->hwmgr; phm_get_dal_power_level(hwmgr, &simple_clocks); @@ -1105,18 +1117,20 @@ int amd_powerplay_get_current_clocks(void *handle, int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) { int result = -1; + struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - struct pp_hwmgr *hwmgr; + ret = pp_check(pp_handle); - PP_CHECK((struct pp_instance *)handle); + if (ret != 0) + return ret; + + hwmgr = pp_handle->hwmgr; if (clocks == NULL) return -EINVAL; - hwmgr = ((struct pp_instance *)handle)->hwmgr; - - PP_CHECK_HW(hwmgr); - result = phm_get_clock_by_type(hwmgr, type, clocks); return result; @@ -1125,21 +1139,24 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s int amd_powerplay_get_display_mode_validation_clocks(void *handle, struct amd_pp_simple_clock_info *clocks) { - int result = -1; struct pp_hwmgr *hwmgr; + struct pp_instance *pp_handle = (struct pp_instance *)handle; + int ret = 0; - PP_CHECK((struct pp_instance *)handle); + ret = pp_check(pp_handle); - if (clocks == NULL) - return -EINVAL; + if (ret != 0) + return ret; + + hwmgr = pp_handle->hwmgr; - hwmgr = ((struct pp_instance *)handle)->hwmgr; - PP_CHECK_HW(hwmgr); + if (clocks == NULL) + return -EINVAL; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) - result = phm_get_max_high_clocks(hwmgr, clocks); + ret = phm_get_max_high_clocks(hwmgr, clocks); - return result; + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c index d5ec8ccbe97d..a3cd230d636d 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c @@ -151,7 +151,7 @@ static int thermal_interrupt_callback(void *private_data, unsigned src_id, const uint32_t *iv_entry) { /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/ - printk("current thermal is out of range \n"); + pr_info("current thermal is out of range \n"); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index fb88e4e5d625..781e53dcf128 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c @@ -60,9 +60,8 @@ static void pem_fini(struct pp_eventmgr *eventmgr) pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); } -int eventmgr_init(struct pp_instance *handle) +int eventmgr_early_init(struct pp_instance *handle) { - int result = 0; struct pp_eventmgr *eventmgr; if (handle == NULL) @@ -79,12 +78,6 @@ int eventmgr_init(struct pp_instance *handle) eventmgr->pp_eventmgr_init = pem_init; eventmgr->pp_eventmgr_fini = pem_fini; - return result; -} - -int eventmgr_fini(struct pp_eventmgr *eventmgr) -{ - kfree(eventmgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c index ec36c0e28388..e04216ec7ee1 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c @@ -38,10 +38,13 @@ int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) { + if (eventmgr == NULL || eventmgr->hwmgr == NULL) + return -EINVAL; + if (pem_is_hw_access_blocked(eventmgr)) return 0; - phm_force_dpm_levels(eventmgr->hwmgr, AMD_DPM_FORCED_LEVEL_AUTO); + phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b0c63c5f54c9..3eccac735db3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_CG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; @@ -240,10 +240,16 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = { /*we don't need an exit table here, because there is only D3 cold on Kv*/ - { phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize }, - { phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize }, + { + .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating, + .tableFunction = cz_tf_uvd_power_gating_initialize + }, + { + .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating, + .tableFunction = cz_tf_vce_power_gating_initialize + }, /* to do { NULL, cz_tf_xdma_power_gating_enable }, */ - { NULL, NULL } + { } }; const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 4b14f259a147..a4cde3d778b8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -20,13 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include "atom-types.h" #include "atombios.h" #include "processpptables.h" -#include "pp_debug.h" #include "cgs_common.h" #include "smu/smu_8_0_d.h" #include "smu8_fusion.h" @@ -38,7 +38,6 @@ #include "cz_hwmgr.h" #include "power_state.h" #include "cz_clockpowergating.h" -#include "pp_debug.h" #define ixSMUSVI_NB_CURRENTVID 0xD8230044 #define CURRENT_NB_VID_MASK 0xff000000 @@ -288,7 +287,7 @@ static int cz_init_dynamic_state_adjustment_rule_settings( kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { - printk(KERN_ERR "[ powerplay ] Can not allocate memory!\n"); + pr_err("Can not allocate memory!\n"); return -ENOMEM; } @@ -329,12 +328,12 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) &size, &frev, &crev); if (crev != 9) { - printk(KERN_ERR "[ powerplay ] Unsupported IGP table: %d %d\n", frev, crev); + pr_err("Unsupported IGP table: %d %d\n", frev, crev); return -EINVAL; } if (info == NULL) { - printk(KERN_ERR "[ powerplay ] Could not retrieve the Integrated System Info Table!\n"); + pr_err("Could not retrieve the Integrated System Info Table!\n"); return -EINVAL; } @@ -361,7 +360,7 @@ static int cz_get_system_info_data(struct pp_hwmgr *hwmgr) if (cz_hwmgr->sys_info.htc_tmp_lmt <= cz_hwmgr->sys_info.htc_hyst_lmt) { - printk(KERN_ERR "[ powerplay ] The htcTmpLmt should be larger than htcHystLmt.\n"); + pr_err("The htcTmpLmt should be larger than htcHystLmt.\n"); return -EINVAL; } @@ -723,7 +722,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, clock = hwmgr->display_config.min_core_set_clock; if (clock == 0) - printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n"); + pr_info("min_core_set_clock not set\n"); if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) { cz_hwmgr->sclk_dpm.hard_min_clk = clock; @@ -888,13 +887,13 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item cz_set_power_state_list[] = { - {NULL, cz_tf_update_sclk_limit}, - {NULL, cz_tf_set_deep_sleep_sclk_threshold}, - {NULL, cz_tf_set_watermark_threshold}, - {NULL, cz_tf_set_enabled_levels}, - {NULL, cz_tf_enable_nb_dpm}, - {NULL, cz_tf_update_low_mem_pstate}, - {NULL, NULL} + { .tableFunction = cz_tf_update_sclk_limit }, + { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold }, + { .tableFunction = cz_tf_set_watermark_threshold }, + { .tableFunction = cz_tf_set_enabled_levels }, + { .tableFunction = cz_tf_enable_nb_dpm }, + { .tableFunction = cz_tf_update_low_mem_pstate }, + { } }; static const struct phm_master_table_header cz_set_power_state_master = { @@ -904,15 +903,15 @@ static const struct phm_master_table_header cz_set_power_state_master = { }; static const struct phm_master_table_item cz_setup_asic_list[] = { - {NULL, cz_tf_reset_active_process_mask}, - {NULL, cz_tf_upload_pptable_to_smu}, - {NULL, cz_tf_init_sclk_limit}, - {NULL, cz_tf_init_uvd_limit}, - {NULL, cz_tf_init_vce_limit}, - {NULL, cz_tf_init_acp_limit}, - {NULL, cz_tf_init_power_gate_state}, - {NULL, cz_tf_init_sclk_threshold}, - {NULL, NULL} + { .tableFunction = cz_tf_reset_active_process_mask }, + { .tableFunction = cz_tf_upload_pptable_to_smu }, + { .tableFunction = cz_tf_init_sclk_limit }, + { .tableFunction = cz_tf_init_uvd_limit }, + { .tableFunction = cz_tf_init_vce_limit }, + { .tableFunction = cz_tf_init_acp_limit }, + { .tableFunction = cz_tf_init_power_gate_state }, + { .tableFunction = cz_tf_init_sclk_threshold }, + { } }; static const struct phm_master_table_header cz_setup_asic_master = { @@ -957,10 +956,10 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item cz_power_down_asic_list[] = { - {NULL, cz_tf_power_up_display_clock_sys_pll}, - {NULL, cz_tf_clear_nb_dpm_flag}, - {NULL, cz_tf_reset_cc6_data}, - {NULL, NULL} + { .tableFunction = cz_tf_power_up_display_clock_sys_pll }, + { .tableFunction = cz_tf_clear_nb_dpm_flag }, + { .tableFunction = cz_tf_reset_cc6_data }, + { } }; static const struct phm_master_table_header cz_power_down_asic_master = { @@ -1068,8 +1067,8 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr, } static const struct phm_master_table_item cz_disable_dpm_list[] = { - { NULL, cz_tf_check_for_dpm_enabled}, - {NULL, NULL}, + { .tableFunction = cz_tf_check_for_dpm_enabled }, + { }, }; @@ -1080,13 +1079,13 @@ static const struct phm_master_table_header cz_disable_dpm_master = { }; static const struct phm_master_table_item cz_enable_dpm_list[] = { - { NULL, cz_tf_check_for_dpm_disabled }, - { NULL, cz_tf_program_voting_clients }, - { NULL, cz_tf_start_dpm}, - { NULL, cz_tf_program_bootup_state}, - { NULL, cz_tf_enable_didt }, - { NULL, cz_tf_reset_acp_boot_level }, - {NULL, NULL}, + { .tableFunction = cz_tf_check_for_dpm_disabled }, + { .tableFunction = cz_tf_program_voting_clients }, + { .tableFunction = cz_tf_start_dpm }, + { .tableFunction = cz_tf_program_bootup_state }, + { .tableFunction = cz_tf_enable_didt }, + { .tableFunction = cz_tf_reset_acp_boot_level }, + { }, }; static const struct phm_master_table_header cz_enable_dpm_master = { @@ -1162,13 +1161,13 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) result = cz_initialize_dpm_defaults(hwmgr); if (result != 0) { - printk(KERN_ERR "[ powerplay ] cz_initialize_dpm_defaults failed\n"); + pr_err("cz_initialize_dpm_defaults failed\n"); return result; } result = cz_get_system_info_data(hwmgr); if (result != 0) { - printk(KERN_ERR "[ powerplay ] cz_get_system_info_data failed\n"); + pr_err("cz_get_system_info_data failed\n"); return result; } @@ -1177,40 +1176,40 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) result = phm_construct_table(hwmgr, &cz_setup_asic_master, &(hwmgr->setup_asic)); if (result != 0) { - printk(KERN_ERR "[ powerplay ] Fail to construct setup ASIC\n"); + pr_err("Fail to construct setup ASIC\n"); return result; } result = phm_construct_table(hwmgr, &cz_power_down_asic_master, &(hwmgr->power_down_asic)); if (result != 0) { - printk(KERN_ERR "[ powerplay ] Fail to construct power down ASIC\n"); + pr_err("Fail to construct power down ASIC\n"); return result; } result = phm_construct_table(hwmgr, &cz_disable_dpm_master, &(hwmgr->disable_dynamic_state_management)); if (result != 0) { - printk(KERN_ERR "[ powerplay ] Fail to disable_dynamic_state\n"); + pr_err("Fail to disable_dynamic_state\n"); return result; } result = phm_construct_table(hwmgr, &cz_enable_dpm_master, &(hwmgr->enable_dynamic_state_management)); if (result != 0) { - printk(KERN_ERR "[ powerplay ] Fail to enable_dynamic_state\n"); + pr_err("Fail to enable_dynamic_state\n"); return result; } result = phm_construct_table(hwmgr, &cz_set_power_state_master, &(hwmgr->set_power_state)); if (result != 0) { - printk(KERN_ERR "[ powerplay ] Fail to construct set_power_state\n"); + pr_err("Fail to construct set_power_state\n"); return result; } hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS; result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings)); if (result != 0) { - printk(KERN_ERR "[ powerplay ] Fail to construct enable_clock_power_gatings\n"); + pr_err("Fail to construct enable_clock_power_gatings\n"); return result; } return result; @@ -1218,9 +1217,15 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr != NULL && hwmgr->backend != NULL) { + if (hwmgr != NULL) { + phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings)); + phm_destroy_table(hwmgr, &(hwmgr->set_power_state)); + phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management)); + phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management)); + phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); + phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); kfree(hwmgr->backend); - kfree(hwmgr); + hwmgr->backend = NULL; } return 0; } @@ -1402,14 +1407,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk, PPSMC_MSG_SetEclkHardMin)); } else { - /*EPR# 419220 -HW limitation to to */ - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(hwmgr, - cz_hwmgr->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - + /*Program HardMin based on the vce_arbiter.ecclk */ + if (hwmgr->vce_arbiter.ecclk == 0) { + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, 0); + /* disable ECLK DPM 0. Otherwise VCE could hang if + * switching SCLK from DPM 0 to 6/7 */ + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkSoftMin, 1); + } else { + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + } } return 0; } @@ -1931,7 +1944,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { .read_sensor = cz_read_sensor, }; -int cz_hwmgr_init(struct pp_hwmgr *hwmgr) +int cz_init_function_pointers(struct pp_hwmgr *hwmgr) { hwmgr->hwmgr_func = &cz_hwmgr_funcs; hwmgr->pptable_func = &pptable_funcs; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h index c477f1cf3f23..508b422d6159 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h @@ -316,7 +316,6 @@ struct cz_hwmgr { struct pp_hwmgr; -int cz_hwmgr_init(struct pp_hwmgr *hwmgr); int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr); int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr); int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c index 71822ae73a12..bc7d8bd7e7cb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c @@ -35,7 +35,7 @@ static int phm_run_table(struct pp_hwmgr *hwmgr, phm_table_function *function; if (rt_table->function_list == NULL) { - pr_debug("[ powerplay ] this function not implement!\n"); + pr_debug("this function not implement!\n"); return 0; } @@ -63,14 +63,14 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, void *temp_storage; if (hwmgr == NULL || rt_table == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); + pr_err("Invalid Parameter!\n"); return -EINVAL; } if (0 != rt_table->storage_size) { temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL); if (temp_storage == NULL) { - printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n"); + pr_err("Could not allocate table temporary storage\n"); return -ENOMEM; } } else { @@ -95,7 +95,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr, phm_table_function *rtf; if (hwmgr == NULL || master_table == NULL || rt_table == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); + pr_err("Invalid Parameter!\n"); return -EINVAL; } @@ -116,7 +116,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr, for (table_item = master_table->master_list; NULL != table_item->tableFunction; table_item++) { if ((rtf - run_time_list) > function_count) { - printk(KERN_ERR "[ powerplay ] Check function results have changed\n"); + pr_err("Check function results have changed\n"); kfree(run_time_list); return -EINVAL; } @@ -128,7 +128,7 @@ int phm_construct_table(struct pp_hwmgr *hwmgr, } if ((rtf - run_time_list) > function_count) { - printk(KERN_ERR "[ powerplay ] Check function results have changed\n"); + pr_err("Check function results have changed\n"); kfree(run_time_list); return -EINVAL; } @@ -144,7 +144,7 @@ int phm_destroy_table(struct pp_hwmgr *hwmgr, struct phm_runtime_table_header *rt_table) { if (hwmgr == NULL || rt_table == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Parameter\n"); + pr_err("Invalid Parameter\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index c355a0f51663..0eb8e886bf35 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -20,11 +20,11 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/errno.h> #include "hwmgr.h" #include "hardwaremanager.h" #include "power_state.h" -#include "pp_debug.h" #define PHM_FUNC_CHECK(hw) \ do { \ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index dc6700aee18f..2ea9c0e78689 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -20,6 +20,8 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ + +#include "pp_debug.h" #include "linux/delay.h" #include <linux/types.h> #include <linux/kernel.h> @@ -29,13 +31,12 @@ #include "power_state.h" #include "hwmgr.h" #include "pppcielanes.h" -#include "pp_debug.h" #include "ppatomctrl.h" #include "ppsmc.h" #include "pp_acpi.h" #include "amd_acpi.h" -extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr); static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr); static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr); @@ -49,11 +50,11 @@ uint8_t convert_to_vid(uint16_t vddc) return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); } -int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) +int hwmgr_early_init(struct pp_instance *handle) { struct pp_hwmgr *hwmgr; - if ((handle == NULL) || (pp_init == NULL)) + if (handle == NULL) return -EINVAL; hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); @@ -62,19 +63,20 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) handle->hwmgr = hwmgr; hwmgr->smumgr = handle->smu_mgr; - hwmgr->device = pp_init->device; - hwmgr->chip_family = pp_init->chip_family; - hwmgr->chip_id = pp_init->chip_id; + hwmgr->device = handle->device; + hwmgr->chip_family = handle->chip_family; + hwmgr->chip_id = handle->chip_id; + hwmgr->feature_mask = handle->feature_mask; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; hwmgr->pp_table_version = PP_TABLE_V1; - + hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; hwmgr_init_default_caps(hwmgr); hwmgr_set_user_specify_caps(hwmgr); switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CZ: - cz_hwmgr_init(hwmgr); + cz_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { @@ -95,13 +97,14 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS12: polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; default: return -EINVAL; } - smu7_hwmgr_init(hwmgr); + smu7_init_function_pointers(hwmgr); break; default: return -EINVAL; @@ -110,28 +113,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) return 0; } -int hwmgr_fini(struct pp_hwmgr *hwmgr) -{ - if (hwmgr == NULL || hwmgr->ps == NULL) - return -EINVAL; - - /* do hwmgr finish*/ - kfree(hwmgr->hardcode_pp_table); - - kfree(hwmgr->backend); - - kfree(hwmgr->start_thermal_controller.function_list); - - kfree(hwmgr->set_temperature_range.function_list); - - kfree(hwmgr->ps); - kfree(hwmgr->current_ps); - kfree(hwmgr->request_ps); - kfree(hwmgr); - return 0; -} - -int hw_init_power_state_table(struct pp_hwmgr *hwmgr) +static int hw_init_power_state_table(struct pp_hwmgr *hwmgr) { int result; unsigned int i; @@ -155,12 +137,20 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr) return -ENOMEM; hwmgr->request_ps = kzalloc(size, GFP_KERNEL); - if (hwmgr->request_ps == NULL) + if (hwmgr->request_ps == NULL) { + kfree(hwmgr->ps); + hwmgr->ps = NULL; return -ENOMEM; + } hwmgr->current_ps = kzalloc(size, GFP_KERNEL); - if (hwmgr->current_ps == NULL) + if (hwmgr->current_ps == NULL) { + kfree(hwmgr->request_ps); + kfree(hwmgr->ps); + hwmgr->request_ps = NULL; + hwmgr->ps = NULL; return -ENOMEM; + } state = hwmgr->ps; @@ -180,10 +170,77 @@ int hw_init_power_state_table(struct pp_hwmgr *hwmgr) state = (struct pp_power_state *)((unsigned long)state + size); } + return 0; +} +static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr) +{ + if (hwmgr == NULL) + return -EINVAL; + + kfree(hwmgr->current_ps); + kfree(hwmgr->request_ps); + kfree(hwmgr->ps); + hwmgr->request_ps = NULL; + hwmgr->ps = NULL; + hwmgr->current_ps = NULL; return 0; } +int hwmgr_hw_init(struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + int ret = 0; + + if (handle == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + + if (hwmgr->pptable_func == NULL || + hwmgr->pptable_func->pptable_init == NULL || + hwmgr->hwmgr_func->backend_init == NULL) + return -EINVAL; + + ret = hwmgr->pptable_func->pptable_init(hwmgr); + if (ret) + goto err; + + ret = hwmgr->hwmgr_func->backend_init(hwmgr); + if (ret) + goto err1; + + ret = hw_init_power_state_table(hwmgr); + if (ret) + goto err2; + return 0; +err2: + if (hwmgr->hwmgr_func->backend_fini) + hwmgr->hwmgr_func->backend_fini(hwmgr); +err1: + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); +err: + pr_err("amdgpu: powerplay initialization failed\n"); + return ret; +} + +int hwmgr_hw_fini(struct pp_instance *handle) +{ + struct pp_hwmgr *hwmgr; + + if (handle == NULL) + return -EINVAL; + + hwmgr = handle->hwmgr; + + if (hwmgr->hwmgr_func->backend_fini) + hwmgr->hwmgr_func->backend_fini(hwmgr); + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); + return hw_fini_power_state_table(hwmgr); +} + /** * Returns once the part of the register indicated by the mask has @@ -196,7 +253,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t cur_value; if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); + pr_err("Invalid Hardware Manager!"); return -EINVAL; } @@ -226,7 +283,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t mask) { if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); + pr_err("Invalid Hardware Manager!"); return; } @@ -287,7 +344,7 @@ int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table) memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); kfree(table); - + table = NULL; return 0; } @@ -548,7 +605,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { - printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); + pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n"); return -ENOMEM; } else { table_clk_vlt->count = 4; @@ -568,21 +625,6 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr return 0; } -int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) -{ - if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; - } - - if (NULL != hwmgr->backend) { - kfree(hwmgr->backend); - hwmgr->backend = NULL; - } - - return 0; -} - uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) { uint32_t level = 0; @@ -624,7 +666,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) return; } } - printk(KERN_ERR "DAL requested level can not" + pr_err("DAL requested level can not" " found a available voltage in VDDC DPM Table \n"); } @@ -682,14 +724,14 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr) int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr) { - if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK) + if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); else phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); - if (amdgpu_pp_feature_mask & PP_POWER_CONTAINMENT_MASK) { + if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment); phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -700,7 +742,6 @@ int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); } - hwmgr->feature_mask = amdgpu_pp_feature_mask; return 0; } @@ -726,17 +767,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) { - /* power tune caps Assume disabled */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_DBRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TDRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_TCPRamping); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot); phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -745,9 +779,19 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TablelessHardwareInterface); - if (hwmgr->chip_id == CHIP_POLARIS11) + + if (hwmgr->chip_id != CHIP_POLARIS10) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SPLLShutdownSupport); + + if (hwmgr->chip_id != CHIP_POLARIS11) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 0894527d932f..4b0a94cc995e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -20,13 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/module.h> #include <linux/slab.h> #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" -#include "pp_debug.h" #include "ppevvmath.h" #define MEM_ID_MASK 0xff000000 @@ -145,10 +145,10 @@ int atomctrl_initialize_mc_reg_table( GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); if (module_index >= vram_info->ucNumOfVRAMModule) { - printk(KERN_ERR "[ powerplay ] Invalid VramInfo table."); + pr_err("Invalid VramInfo table."); result = -1; } else if (vram_info->sHeader.ucTableFormatRevision < 2) { - printk(KERN_ERR "[ powerplay ] Invalid VramInfo table."); + pr_err("Invalid VramInfo table."); result = -1; } @@ -688,7 +688,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000); break; default: - printk(KERN_ERR "DPM Level not supported\n"); + pr_err("DPM Level not supported\n"); fPowerDPMx = Convert_ULONG_ToFraction(1); fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index c45bd2560468..84f01fd33aff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -20,13 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/module.h> #include <linux/slab.h> #include "process_pptables_v1_0.h" #include "ppatomctrl.h" #include "atombios.h" -#include "pp_debug.h" #include "hwmgr.h" #include "cgs_common.h" #include "pptable_v1_0.h" @@ -535,7 +535,7 @@ static int get_pcie_table( if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count) pcie_count = (uint32_t)atom_pcie_table->ucNumEntries; else - printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \ + pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \ Disregarding the excess entries... \n"); pcie_table->count = pcie_count; @@ -577,7 +577,7 @@ static int get_pcie_table( if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count) pcie_count = (uint32_t)atom_pcie_table->ucNumEntries; else - printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \ + pr_err("Number of Pcie Entries exceed the number of SCLK Dpm Levels! \ Disregarding the excess entries... \n"); pcie_table->count = pcie_count; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index a4e9cf429e62..ed6c934927fb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -20,6 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -27,7 +28,6 @@ #include "processpptables.h" #include <atom-types.h> #include <atombios.h> -#include "pp_debug.h" #include "pptable.h" #include "power_state.h" #include "hwmgr.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h index d52a28c343e3..c96ed9ed7eaf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h @@ -22,7 +22,7 @@ */ #ifndef _SMU7_CLOCK_POWER_GATING_H_ -#define _SMU7_CLOCK__POWER_GATING_H_ +#define _SMU7_CLOCK_POWER_GATING_H_ #include "smu7_hwmgr.h" #include "pp_asicblocks.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index a74f60a575ae..0a6c833720df 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -20,13 +20,13 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/module.h> #include <linux/slab.h> #include <linux/fb.h> #include <asm/div64.h> #include "linux/delay.h" #include "pp_acpi.h" -#include "pp_debug.h" #include "ppatomctrl.h" #include "atombios.h" #include "pptable_v1_0.h" @@ -40,6 +40,8 @@ #include "hwmgr.h" #include "smu7_hwmgr.h" +#include "smu7_smumgr.h" +#include "smu_ucode_xfer_vi.h" #include "smu7_powertune.h" #include "smu7_dyn_defaults.h" #include "smu7_thermal.h" @@ -88,6 +90,8 @@ enum DPM_EVENT_SRC { }; static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); +static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask); static struct smu7_power_state *cast_phw_smu7_power_state( struct pp_hw_power_state *hw_ps) @@ -994,7 +998,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr) SWRST_COMMAND_1, RESETLC, 0x0); if (smu7_enable_sclk_mclk_dpm(hwmgr)) { - printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!"); + pr_err("Failed to enable Sclk DPM and Mclk DPM!"); return -EINVAL; } @@ -1079,7 +1083,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) switch (sources) { default: - printk(KERN_ERR "Unknown throttling event sources."); + pr_err("Unknown throttling event sources."); /* fall through */ case 0: protection = false; @@ -1292,6 +1296,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((tmp_result == 0), "Failed to disable SMC CAC!", result = tmp_result); + tmp_result = smu7_disable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable DIDT!", result = tmp_result); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -1499,7 +1507,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) data->vddcgfx_leakage.count++; } } else { - printk("Error retrieving EVV voltage value!\n"); + pr_info("Error retrieving EVV voltage value!\n"); } } } else { @@ -1527,7 +1535,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) if (vddc >= 2000 || vddc == 0) return -EINVAL; } else { - printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); + pr_warning("failed to retrieving EVV voltage!\n"); continue; } @@ -1567,7 +1575,7 @@ static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, } if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); + pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); } /** @@ -2032,7 +2040,7 @@ static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, } if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) - printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n"); + pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); } @@ -2267,6 +2275,21 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) return 0; } +static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + } + pp_smu7_thermal_fini(hwmgr); + if (NULL != hwmgr->backend) { + kfree(hwmgr->backend); + hwmgr->backend = NULL; + } + + return 0; +} + static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data; @@ -2277,6 +2300,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) return -ENOMEM; hwmgr->backend = data; + pp_smu7_thermal_initialize(hwmgr); smu7_patch_voltage_workaround(hwmgr); smu7_init_dpm_defaults(hwmgr); @@ -2285,7 +2309,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) result = smu7_get_evv_voltages(hwmgr); if (result) { - printk("Get EVV Voltage Failed. Abort Driver loading!\n"); + pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); return -EINVAL; } @@ -2334,7 +2358,7 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) smu7_thermal_parameter_init(hwmgr); } else { /* Ignore return value in here, we are cleaning up a mess. */ - phm_hwmgr_backend_fini(hwmgr); + smu7_hwmgr_backend_fini(hwmgr); } return 0; @@ -2466,36 +2490,155 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) } return 0; +} +static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, + uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) +{ + uint32_t percentage; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; + int32_t tmp_mclk; + int32_t tmp_sclk; + int32_t count; + + if (golden_dpm_table->mclk_table.count < 1) + return -EINVAL; + + percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; + + if (golden_dpm_table->mclk_table.count == 1) { + percentage = 70; + tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; + *mclk_mask = golden_dpm_table->mclk_table.count - 1; + } else { + tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; + *mclk_mask = golden_dpm_table->mclk_table.count - 2; + } + + tmp_sclk = tmp_mclk * percentage / 100; + + if (hwmgr->pp_table_version == PP_TABLE_V0) { + for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; + count >= 0; count--) { + if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { + tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; + *sclk_mask = count; + break; + } + } + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + *sclk_mask = 0; + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; + } else if (hwmgr->pp_table_version == PP_TABLE_V1) { + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { + if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { + tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; + *sclk_mask = count; + break; + } + } + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + *sclk_mask = 0; + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; + } + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + *mclk_mask = 0; + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + *mclk_mask = golden_dpm_table->mclk_table.count - 1; + + *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; + return 0; } + static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { int ret = 0; + uint32_t sclk_mask = 0; + uint32_t mclk_mask = 0; + uint32_t pcie_mask = 0; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + + if (level == hwmgr->dpm_level) + return ret; + + if (!(hwmgr->dpm_level & profile_mode_mask)) { + /* enter profile mode, save current level, disable gfx cg*/ + if (level & profile_mode_mask) { + hwmgr->saved_dpm_level = hwmgr->dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + } + } else { + /* exit profile mode, restore level, enable gfx cg*/ + if (!(level & profile_mode_mask)) { + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) + level = hwmgr->saved_dpm_level; + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + } + } switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: ret = smu7_force_dpm_highest(hwmgr); if (ret) return ret; + hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_LOW: ret = smu7_force_dpm_lowest(hwmgr); if (ret) return ret; + hwmgr->dpm_level = level; break; case AMD_DPM_FORCED_LEVEL_AUTO: ret = smu7_unforce_dpm_levels(hwmgr); if (ret) return ret; + hwmgr->dpm_level = level; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); + if (ret) + return ret; + hwmgr->dpm_level = level; + smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); + smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); + smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + hwmgr->dpm_level = level; break; + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: break; } - hwmgr->dpm_level = level; + if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH)) + smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + else + smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); - return ret; + return 0; } static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) @@ -2898,11 +3041,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { if (dep_mclk_table->entries[0].clk != data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + pr_err("Single MCLK entry VDDCI/MCLK dependency table " "does not match VBIOS boot MCLK level"); if (dep_mclk_table->entries[0].vddci != data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + pr_err("Single VDDCI entry VDDCI/MCLK dependency table " "does not match VBIOS boot VDDCI level"); } @@ -3046,11 +3189,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { if (dep_mclk_table->entries[0].clk != data->vbios_boot_state.mclk_bootup_value) - printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + pr_err("Single MCLK entry VDDCI/MCLK dependency table " "does not match VBIOS boot MCLK level"); if (dep_mclk_table->entries[0].v != data->vbios_boot_state.vddci_bootup_value) - printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + pr_err("Single VDDCI entry VDDCI/MCLK dependency table " "does not match VBIOS boot VDDCI level"); } @@ -3590,9 +3733,9 @@ static int smu7_notify_link_speed_change_after_state_change( if (acpi_pcie_perf_request(hwmgr->device, request, false)) { if (PP_PCIEGen2 == target_link_speed) - printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); else - printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); } } @@ -4029,7 +4172,9 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO | + AMD_DPM_FORCED_LEVEL_LOW | + AMD_DPM_FORCED_LEVEL_HIGH)) return -EINVAL; switch (type) { @@ -4324,9 +4469,35 @@ static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type return 0; } +static int smu7_request_firmware(struct pp_hwmgr *hwmgr) +{ + int ret; + struct cgs_firmware_info info = {0}; + + ret = cgs_get_firmware_info(hwmgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), + &info); + if (ret || !info.kptr) + return -EINVAL; + + return 0; +} + +static int smu7_release_firmware(struct pp_hwmgr *hwmgr) +{ + int ret; + + ret = cgs_rel_firmware(hwmgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU)); + if (ret) + return -EINVAL; + + return 0; +} + static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .backend_init = &smu7_hwmgr_backend_init, - .backend_fini = &phm_hwmgr_backend_fini, + .backend_fini = &smu7_hwmgr_backend_fini, .asic_setup = &smu7_setup_asic_task, .dynamic_state_management_enable = &smu7_enable_dpm_tasks, .apply_state_adjust_rules = smu7_apply_state_adjust_rules, @@ -4371,6 +4542,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_clock_by_type = smu7_get_clock_by_type, .read_sensor = smu7_read_sensor, .dynamic_state_management_disable = smu7_disable_dpm_tasks, + .request_firmware = smu7_request_firmware, + .release_firmware = smu7_release_firmware, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, @@ -4390,7 +4563,7 @@ uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, return i; } -int smu7_hwmgr_init(struct pp_hwmgr *hwmgr) +int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) { int ret = 0; @@ -4400,7 +4573,6 @@ int smu7_hwmgr_init(struct pp_hwmgr *hwmgr) else if (hwmgr->pp_table_version == PP_TABLE_V1) hwmgr->pptable_func = &pptable_v1_0_funcs; - pp_smu7_thermal_initialize(hwmgr); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 26477f0f09dc..3341c0fbd069 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -20,17 +20,19 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include "hwmgr.h" #include "smumgr.h" #include "smu7_hwmgr.h" #include "smu7_powertune.h" -#include "pp_debug.h" #include "smu7_common.h" #define VOLTAGE_SCALE 4 static uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; +static uint32_t Polaris11_DIDTBlock_Info = SQ_PCC_MASK | TCP_IR_MASK | TD_PCC_MASK; + static const struct gpu_pt_config_reg GCCACConfig_Polaris10[] = { /* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- * Offset Mask Shift Value Type @@ -261,9 +263,9 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = { { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, @@ -271,12 +273,12 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = { { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, @@ -284,8 +286,8 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = { { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, - { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, @@ -373,55 +375,160 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11[] = { { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } +}; + +static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, { 0xFFFFFFFF } }; static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) { - uint32_t en = enable ? 1 : 0; + uint32_t block_en = 0; int32_t result = 0; + uint32_t didt_block; uint32_t data; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); - DIDTBlock_Info &= ~SQ_Enable_MASK; - DIDTBlock_Info |= en << SQ_Enable_SHIFT; - } + if (hwmgr->chip_id == CHIP_POLARIS11) + didt_block = Polaris11_DIDTBlock_Info; + else + didt_block = DIDTBlock_Info; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); - DIDTBlock_Info &= ~DB_Enable_MASK; - DIDTBlock_Info |= en << DB_Enable_SHIFT; - } + block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); - DIDTBlock_Info &= ~TD_Enable_MASK; - DIDTBlock_Info |= en << TD_Enable_SHIFT; - } + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + didt_block &= ~SQ_Enable_MASK; + didt_block |= block_en << SQ_Enable_SHIFT; + + block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0; + + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + didt_block &= ~DB_Enable_MASK; + didt_block |= block_en << DB_Enable_SHIFT; + + block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0; + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + didt_block &= ~TD_Enable_MASK; + didt_block |= block_en << TD_Enable_SHIFT; + + block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0; + + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + didt_block &= ~TCP_Enable_MASK; + didt_block |= block_en << TCP_Enable_SHIFT; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); - DIDTBlock_Info &= ~TCP_Enable_MASK; - DIDTBlock_Info |= en << TCP_Enable_SHIFT; - } if (enable) - result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info); + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block); return result; } @@ -498,7 +605,6 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); - if (result == 0) num_se = sys_info.value; @@ -507,7 +613,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - /* TO DO Pre DIDT disable clock gating */ + cgs_enter_safe_mode(hwmgr->device, true); value = 0; value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { @@ -526,6 +632,11 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } else if (hwmgr->chip_id == CHIP_POLARIS12) { + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); } } cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); @@ -533,7 +644,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) result = smu7_enable_didt(hwmgr, true); PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); - /* TO DO Post DIDT enable clock gating */ + if (hwmgr->chip_id == CHIP_POLARIS11) { + result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDpmDidt)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to enable DPM DIDT.", return result); + } + cgs_enter_safe_mode(hwmgr->device, false); } return 0; @@ -547,11 +664,20 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { - /* TO DO Pre DIDT disable clock gating */ + + cgs_enter_safe_mode(hwmgr->device, true); result = smu7_enable_didt(hwmgr, false); - PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); - /* TO DO Post DIDT enable clock gating */ + PP_ASSERT_WITH_CODE((result == 0), + "Post DIDT enable clock gating failed.", + return result); + if (hwmgr->chip_id == CHIP_POLARIS11) { + result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDpmDidt)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to disable DPM DIDT.", return result); + } + cgs_enter_safe_mode(hwmgr->device, false); } return 0; @@ -651,7 +777,7 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) POWERCONTAINMENT_FEATURE_PkgPwrLimit; if (smu7_set_power_limit(hwmgr, default_limit)) - printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + pr_err("Failed to set Default Power Limit in SMC!"); } } } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 29d0319b22e6..436ca5ce8248 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -506,18 +506,18 @@ static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr, static const struct phm_master_table_item phm_thermal_start_thermal_controller_master_list[] = { - {NULL, tf_smu7_thermal_initialize}, - {NULL, tf_smu7_thermal_set_temperature_range}, - {NULL, tf_smu7_thermal_enable_alert}, - {NULL, smum_thermal_avfs_enable}, + { .tableFunction = tf_smu7_thermal_initialize }, + { .tableFunction = tf_smu7_thermal_set_temperature_range }, + { .tableFunction = tf_smu7_thermal_enable_alert }, + { .tableFunction = smum_thermal_avfs_enable }, /* We should restrict performance levels to low before we halt the SMC. * On the other hand we are still in boot state when we do this * so it would be pointless. * If this assumption changes we have to revisit this table. */ - {NULL, smum_thermal_setup_fan_table}, - {NULL, tf_smu7_thermal_start_smc_fan_control}, - {NULL, NULL} + { .tableFunction = smum_thermal_setup_fan_table }, + { .tableFunction = tf_smu7_thermal_start_smc_fan_control }, + { } }; static const struct phm_master_table_header @@ -529,10 +529,10 @@ phm_thermal_start_thermal_controller_master = { static const struct phm_master_table_item phm_thermal_set_temperature_range_master_list[] = { - {NULL, tf_smu7_thermal_disable_alert}, - {NULL, tf_smu7_thermal_set_temperature_range}, - {NULL, tf_smu7_thermal_enable_alert}, - {NULL, NULL} + { .tableFunction = tf_smu7_thermal_disable_alert }, + { .tableFunction = tf_smu7_thermal_set_temperature_range }, + { .tableFunction = tf_smu7_thermal_enable_alert }, + { } }; static const struct phm_master_table_header @@ -575,3 +575,9 @@ int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr) return result; } +void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr) +{ + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller)); + return; +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h index 6face973be43..2ed774db42c7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h @@ -47,6 +47,7 @@ extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr); +extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr); extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 3a883e6c601a..6dd5f0e9ef87 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -29,7 +29,10 @@ #include "amd_shared.h" #include "cgs_common.h" -extern int amdgpu_dpm; +extern const struct amd_ip_funcs pp_ip_funcs; +extern const struct amd_powerplay_funcs pp_dpm_funcs; + +#define PP_DPM_DISABLED 0xCCCC enum amd_pp_sensors { AMDGPU_PP_SENSOR_GFX_SCLK = 0, @@ -135,17 +138,12 @@ enum amd_pp_event { AMD_PP_EVENT_MAX }; -enum amd_dpm_forced_level { - AMD_DPM_FORCED_LEVEL_AUTO = 0, - AMD_DPM_FORCED_LEVEL_LOW = 1, - AMD_DPM_FORCED_LEVEL_HIGH = 2, - AMD_DPM_FORCED_LEVEL_MANUAL = 3, -}; - struct amd_pp_init { struct cgs_device *device; uint32_t chip_family; uint32_t chip_id; + bool pm_en; + uint32_t feature_mask; }; enum amd_pp_display_config_type{ @@ -371,10 +369,10 @@ struct amd_powerplay { const struct amd_powerplay_funcs *pp_funcs; }; -int amd_powerplay_init(struct amd_pp_init *pp_init, - struct amd_powerplay *amd_pp); +int amd_powerplay_create(struct amd_pp_init *pp_init, + void **handle); -int amd_powerplay_fini(void *handle); +int amd_powerplay_destroy(void *handle); int amd_powerplay_reset(void *handle); diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h index d63ef83b2628..7bd8a7e57080 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h @@ -119,7 +119,6 @@ struct pp_eventmgr { void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr); }; -int eventmgr_init(struct pp_instance *handle); -int eventmgr_fini(struct pp_eventmgr *eventmgr); +int eventmgr_early_init(struct pp_instance *handle); #endif /* _EVENTMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 6cdb7cbf515e..7275a29293eb 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -38,8 +38,6 @@ struct pp_hwmgr; struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; -extern unsigned amdgpu_pp_feature_mask; - #define VOLTAGE_SCALE 4 uint8_t convert_to_vid(uint16_t vddc); @@ -358,6 +356,8 @@ struct pp_hwmgr_func { int (*get_mclk_od)(struct pp_hwmgr *hwmgr); int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, int32_t *value); + int (*request_firmware)(struct pp_hwmgr *hwmgr); + int (*release_firmware)(struct pp_hwmgr *hwmgr); }; struct pp_table_func { @@ -612,6 +612,7 @@ struct pp_hwmgr { uint32_t num_vce_state_tables; enum amd_dpm_forced_level dpm_level; + enum amd_dpm_forced_level saved_dpm_level; bool block_hw_access; struct phm_gfx_arbiter gfx_arbiter; struct phm_acp_arbiter acp_arbiter; @@ -651,19 +652,12 @@ struct pp_hwmgr { uint32_t feature_mask; }; - -extern int hwmgr_init(struct amd_pp_init *pp_init, - struct pp_instance *handle); - -extern int hwmgr_fini(struct pp_hwmgr *hwmgr); - -extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr); - +extern int hwmgr_early_init(struct pp_instance *handle); +extern int hwmgr_hw_init(struct pp_instance *handle); +extern int hwmgr_hw_fini(struct pp_instance *handle); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); - - extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, uint32_t index, @@ -692,11 +686,10 @@ extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t virtual_voltage_id, int32_t *sclk); extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); -extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); -extern int smu7_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t id, uint16_t *voltage); diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h index bfdbec10cdd5..072880130cfb 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h @@ -24,6 +24,12 @@ #ifndef PP_DEBUG_H #define PP_DEBUG_H +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "amdgpu: [powerplay] " fmt + #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -31,7 +37,7 @@ #define PP_ASSERT_WITH_CODE(cond, msg, code) \ do { \ if (!(cond)) { \ - printk("%s\n", msg); \ + pr_warning("%s\n", msg); \ code; \ } \ } while (0) @@ -39,7 +45,7 @@ #define PP_DBG_LOG(fmt, ...) \ do { \ - if(0)printk(KERN_INFO "[ pp_dbg ] " fmt, ##__VA_ARGS__); \ + pr_debug(fmt, ##__VA_ARGS__); \ } while (0) diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h index 4d8ed1f33de4..ab8494fb5c6b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h @@ -31,6 +31,11 @@ struct pp_instance { uint32_t pp_valid; + uint32_t chip_family; + uint32_t chip_id; + bool pm_en; + uint32_t feature_mask; + void *device; struct pp_smumgr *smu_mgr; struct pp_hwmgr *hwmgr; struct pp_eventmgr *eventmgr; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h index bce00096d80d..fbc504c70b8b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h @@ -394,6 +394,9 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306) +#define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309) +#define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A) + #define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600) #define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601) #define PPSMC_MSG_SetAddress ((uint16_t) 0x800) diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 2139072065cc..9b6531bd6350 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -33,6 +33,12 @@ struct pp_hwmgr; #define smu_lower_32_bits(n) ((uint32_t)(n)) #define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16)) +extern const struct pp_smumgr_func cz_smu_funcs; +extern const struct pp_smumgr_func iceland_smu_funcs; +extern const struct pp_smumgr_func tonga_smu_funcs; +extern const struct pp_smumgr_func fiji_smu_funcs; +extern const struct pp_smumgr_func polaris10_smu_funcs; + enum AVFS_BTC_STATUS { AVFS_BTC_BOOT = 0, AVFS_BTC_BOOT_STARTEDSMU, @@ -133,11 +139,7 @@ struct pp_smumgr { const struct pp_smumgr_func *smumgr_funcs; }; - -extern int smum_init(struct amd_pp_init *pp_init, - struct pp_instance *handle); - -extern int smum_fini(struct pp_smumgr *smumgr); +extern int smum_early_init(struct pp_instance *handle); extern int smum_get_argument(struct pp_smumgr *smumgr); @@ -172,13 +174,6 @@ extern int smu_allocate_memory(void *device, uint32_t size, void **kptr, void *handle); extern int smu_free_memory(void *device, void *handle); - -extern int cz_smum_init(struct pp_smumgr *smumgr); -extern int iceland_smum_init(struct pp_smumgr *smumgr); -extern int tonga_smum_init(struct pp_smumgr *smumgr); -extern int fiji_smum_init(struct pp_smumgr *smumgr); -extern int polaris10_smum_init(struct pp_smumgr *smumgr); - extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 5a44485526d2..1f6744a443d4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -70,7 +70,7 @@ static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr, result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { - printk(KERN_ERR "[ powerplay ] cz_send_msg_to_smc_async failed\n"); + pr_err("cz_send_msg_to_smc_async failed\n"); return result; } @@ -100,12 +100,12 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, return -EINVAL; if (0 != (3 & smc_address)) { - printk(KERN_ERR "[ powerplay ] SMC address must be 4 byte aligned\n"); + pr_err("SMC address must be 4 byte aligned\n"); return -EINVAL; } if (limit <= (smc_address + 3)) { - printk(KERN_ERR "[ powerplay ] SMC address beyond the SMC RAM area\n"); + pr_err("SMC address beyond the SMC RAM area\n"); return -EINVAL; } @@ -141,42 +141,6 @@ static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, return cz_send_msg_to_smc(smumgr, msg); } -static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) -{ - struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); - uint32_t smc_address; - - if (!smumgr->reload_fw) { - printk(KERN_INFO "[ powerplay ] skip reloading...\n"); - return 0; - } - - smc_address = SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - - cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4); - - cz_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DriverDramAddrHi, - cz_smu->toc_buffer.mc_addr_high); - - cz_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_DriverDramAddrLo, - cz_smu->toc_buffer.mc_addr_low); - - cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs); - - cz_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_ExecuteJob, - cz_smu->toc_entry_aram); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, - cz_smu->toc_entry_power_profiling_index); - - return cz_send_msg_to_smc_with_parameter(smumgr, - PPSMC_MSG_ExecuteJob, - cz_smu->toc_entry_initialize_index); -} - static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t firmware) { @@ -198,7 +162,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, } if (i >= smumgr->usec_timeout) { - printk(KERN_ERR "[ powerplay ] SMU check loaded firmware failed.\n"); + pr_err("SMU check loaded firmware failed.\n"); return -EINVAL; } @@ -250,34 +214,6 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr) return 0; } -static int cz_start_smu(struct pp_smumgr *smumgr) -{ - int ret = 0; - uint32_t fw_to_check = UCODE_ID_RLC_G_MASK | - UCODE_ID_SDMA0_MASK | - UCODE_ID_SDMA1_MASK | - UCODE_ID_CP_CE_MASK | - UCODE_ID_CP_ME_MASK | - UCODE_ID_CP_PFP_MASK | - UCODE_ID_CP_MEC_JT1_MASK | - UCODE_ID_CP_MEC_JT2_MASK; - - if (smumgr->chip_id == CHIP_STONEY) - fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - - ret = cz_request_smu_load_fw(smumgr); - if (ret) - printk(KERN_ERR "[ powerplay] SMU firmware load failed\n"); - - cz_check_fw_load_finish(smumgr, fw_to_check); - - ret = cz_load_mec_firmware(smumgr); - if (ret) - printk(KERN_ERR "[ powerplay ] Mec Firmware load failed\n"); - - return ret; -} - static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, enum cz_scratch_entry firmware_enum) { @@ -406,7 +342,7 @@ static int cz_smu_populate_single_scratch_task( break; if (i >= cz_smu->scratch_buffer_length) { - printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n"); + pr_err("Invalid Firmware Type\n"); return -EINVAL; } @@ -443,7 +379,7 @@ static int cz_smu_populate_single_ucode_load_task( break; if (i >= cz_smu->driver_buffer_length) { - printk(KERN_ERR "[ powerplay ] Invalid Firmware Type\n"); + pr_err("Invalid Firmware Type\n"); return -EINVAL; } @@ -729,11 +665,87 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) return 0; } +static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) +{ + struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); + uint32_t smc_address; + + if (!smumgr->reload_fw) { + pr_info("skip reloading...\n"); + return 0; + } + + cz_smu_populate_firmware_entries(smumgr); + + cz_smu_construct_toc(smumgr); + + smc_address = SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); + + cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DriverDramAddrHi, + cz_smu->toc_buffer.mc_addr_high); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DriverDramAddrLo, + cz_smu->toc_buffer.mc_addr_low); + + cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs); + + cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_aram); + cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_power_profiling_index); + + return cz_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_ExecuteJob, + cz_smu->toc_entry_initialize_index); +} + +static int cz_start_smu(struct pp_smumgr *smumgr) +{ + int ret = 0; + uint32_t fw_to_check = 0; + + fw_to_check = UCODE_ID_RLC_G_MASK | + UCODE_ID_SDMA0_MASK | + UCODE_ID_SDMA1_MASK | + UCODE_ID_CP_CE_MASK | + UCODE_ID_CP_ME_MASK | + UCODE_ID_CP_PFP_MASK | + UCODE_ID_CP_MEC_JT1_MASK | + UCODE_ID_CP_MEC_JT2_MASK; + + if (smumgr->chip_id == CHIP_STONEY) + fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); + + ret = cz_request_smu_load_fw(smumgr); + if (ret) + pr_err("SMU firmware load failed\n"); + + cz_check_fw_load_finish(smumgr, fw_to_check); + + ret = cz_load_mec_firmware(smumgr); + if (ret) + pr_err("Mec Firmware load failed\n"); + + return ret; +} + static int cz_smu_init(struct pp_smumgr *smumgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; uint64_t mc_addr = 0; int ret = 0; + struct cz_smumgr *cz_smu; + + cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL); + if (cz_smu == NULL) + return -ENOMEM; + + smumgr->backend = cz_smu; cz_smu->toc_buffer.data_size = 4096; cz_smu->smu_buffer.data_size = @@ -769,12 +781,11 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - cz_smu_populate_firmware_entries(smumgr); if (0 != cz_smu_populate_single_scratch_entry(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, UCODE_ID_RLC_SCRATCH_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { - printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + pr_err("Error when Populate Firmware Entry.\n"); return -1; } @@ -782,14 +793,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr) CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { - printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + pr_err("Error when Populate Firmware Entry.\n"); return -1; } if (0 != cz_smu_populate_single_scratch_entry(smumgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { - printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + pr_err("Error when Populate Firmware Entry.\n"); return -1; } @@ -797,7 +808,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, sizeof(struct SMU8_MultimediaPowerLogData), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { - printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + pr_err("Error when Populate Firmware Entry.\n"); return -1; } @@ -805,10 +816,9 @@ static int cz_smu_init(struct pp_smumgr *smumgr) CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, sizeof(struct SMU8_Fusion_ClkTable), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { - printk(KERN_ERR "[ powerplay ] Error when Populate Firmware Entry.\n"); + pr_err("Error when Populate Firmware Entry.\n"); return -1; } - cz_smu_construct_toc(smumgr); return 0; } @@ -827,13 +837,12 @@ static int cz_smu_fini(struct pp_smumgr *smumgr) cgs_free_gpu_mem(smumgr->device, cz_smu->smu_buffer.handle); kfree(cz_smu); - kfree(smumgr); } return 0; } -static const struct pp_smumgr_func cz_smu_funcs = { +const struct pp_smumgr_func cz_smu_funcs = { .smu_init = cz_smu_init, .smu_fini = cz_smu_fini, .start_smu = cz_start_smu, @@ -847,15 +856,3 @@ static const struct pp_smumgr_func cz_smu_funcs = { .upload_pptable_settings = cz_upload_pptable_settings, }; -int cz_smum_init(struct pp_smumgr *smumgr) -{ - struct cz_smumgr *cz_smu; - - cz_smu = kzalloc(sizeof(struct cz_smumgr), GFP_KERNEL); - if (cz_smu == NULL) - return -ENOMEM; - - smumgr->backend = cz_smu; - smumgr->smumgr_funcs = &cz_smu_funcs; - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h index 883818039248..7c3a290c8957 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h @@ -95,8 +95,4 @@ struct cz_smumgr { struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; }; -struct pp_smumgr; - -extern int cz_smum_init(struct pp_smumgr *smumgr); - #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 6aeb1d20cc3b..0f7a77b7312e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -21,13 +21,13 @@ * */ +#include "pp_debug.h" #include "fiji_smc.h" #include "smu7_dyn_defaults.h" #include "smu7_hwmgr.h" #include "hardwaremanager.h" #include "ppatomctrl.h" -#include "pp_debug.h" #include "cgs_common.h" #include "atombios.h" #include "fiji_smumgr.h" @@ -2131,7 +2131,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } } - printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); + pr_warning("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2156,7 +2156,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) return SMU73_MAX_LEVELS_MVDD; } - printk(KERN_WARNING "can't get the mac of %x\n", value); + pr_warning("can't get the mac of %x\n", value); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 26eff56b4a99..54b347366b5d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -21,6 +21,7 @@ * */ +#include "pp_debug.h" #include "smumgr.h" #include "smu73.h" #include "smu_ucode_xfer_vi.h" @@ -36,7 +37,6 @@ #include "gca/gfx_8_0_d.h" #include "bif/bif_5_0_d.h" #include "bif/bif_5_0_sh_mask.h" -#include "pp_debug.h" #include "fiji_pwrvirus.h" #include "fiji_smc.h" @@ -179,7 +179,7 @@ static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) result = 0; break; default: - printk(KERN_ERR "Table Exit with Invalid Command!"); + pr_err("Table Exit with Invalid Command!"); priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL; result = -1; break; @@ -202,13 +202,13 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED; result = 0; } else { - printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] Attempt" + pr_err("[AVFS][fiji_start_avfs_btc] Attempt" " to Enable AVFS Failed!"); smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs); result = -1; } } else { - printk(KERN_ERR "[AVFS][fiji_start_avfs_btc] " + pr_err("[AVFS][fiji_start_avfs_btc] " "PerformBTC SMU msg failed"); result = -1; } @@ -384,7 +384,7 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) case AVFS_BTC_NOTSUPPORTED: /* Do nothing */ break; default: - printk(KERN_ERR "[AVFS] Something is broken. See log!"); + pr_err("[AVFS] Something is broken. See log!"); break; } return 0; @@ -464,13 +464,20 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) */ static int fiji_smu_init(struct pp_smumgr *smumgr) { - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); int i; + struct fiji_smumgr *fiji_priv = NULL; + + fiji_priv = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL); + + if (fiji_priv == NULL) + return -ENOMEM; + + smumgr->backend = fiji_priv; if (smu7_init(smumgr)) return -EINVAL; - priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; + fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT; if (fiji_is_hw_avfs_present(smumgr)) /* AVFS Parameter * 0 - BTC DC disabled, BTC AC disabled @@ -479,18 +486,18 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) * 3 - BTC DC enabled, BTC AC enabled * Default is 0 - BTC DC disabled, BTC AC disabled */ - priv->avfs.AvfsBtcParam = 0; + fiji_priv->avfs.AvfsBtcParam = 0; else - priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; + fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED; for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) - priv->activity_target[i] = 30; + fiji_priv->activity_target[i] = 30; return 0; } -static const struct pp_smumgr_func fiji_smu_funcs = { +const struct pp_smumgr_func fiji_smu_funcs = { .smu_init = &fiji_smu_init, .smu_fini = &smu7_smu_fini, .start_smu = &fiji_start_smu, @@ -513,18 +520,3 @@ static const struct pp_smumgr_func fiji_smu_funcs = { .initialize_mc_reg_table = fiji_initialize_mc_reg_table, .is_dpm_running = fiji_is_dpm_running, }; - -int fiji_smum_init(struct pp_smumgr *smumgr) -{ - struct fiji_smumgr *fiji_smu = NULL; - - fiji_smu = kzalloc(sizeof(struct fiji_smumgr), GFP_KERNEL); - - if (fiji_smu == NULL) - return -ENOMEM; - - smumgr->backend = fiji_smu; - smumgr->smumgr_funcs = &fiji_smu_funcs; - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index a24971a33bfd..ad82161df831 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -21,13 +21,13 @@ * */ +#include "pp_debug.h" #include "iceland_smc.h" #include "smu7_dyn_defaults.h" #include "smu7_hwmgr.h" #include "hardwaremanager.h" #include "ppatomctrl.h" -#include "pp_debug.h" #include "cgs_common.h" #include "atombios.h" #include "pppcielanes.h" @@ -1545,7 +1545,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, if (0 != result) { smu_data->smc_state_table.GraphicsBootLevel = 0; - printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + pr_err("VBIOS did not find boot engine clock value \ in dependency table. Using Graphics DPM level 0!"); result = 0; } @@ -1556,7 +1556,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, if (0 != result) { smu_data->smc_state_table.MemoryBootLevel = 0; - printk(KERN_ERR "[ powerplay ] VBIOS did not find boot engine clock value \ + pr_err("VBIOS did not find boot engine clock value \ in dependency table. Using Memory DPM level 0!"); result = 0; } @@ -2146,7 +2146,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); } } - printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); + pr_warning("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2169,7 +2169,7 @@ uint32_t iceland_get_mac_definition(uint32_t value) return SMU71_MAX_LEVELS_MVDD; } - printk(KERN_WARNING "can't get the mac of %x\n", value); + pr_warning("can't get the mac of %x\n", value); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index eeafefc4acba..0bf2def3b659 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -22,6 +22,7 @@ * Author: Huang Rui <ray.huang@amd.com> * */ +#include "pp_debug.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -29,7 +30,6 @@ #include "smumgr.h" #include "iceland_smumgr.h" -#include "pp_debug.h" #include "smu_ucode_xfer_vi.h" #include "ppsmc.h" #include "smu/smu_7_1_1_d.h" @@ -176,7 +176,7 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) return result; if (!smu7_is_smc_ram_running(smumgr)) { - printk("smu not running, upload firmware again \n"); + pr_info("smu not running, upload firmware again \n"); result = iceland_smu_upload_firmware_image(smumgr); if (result) return result; @@ -201,17 +201,25 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) static int iceland_smu_init(struct pp_smumgr *smumgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *iceland_priv = NULL; + + iceland_priv = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); + + if (iceland_priv == NULL) + return -ENOMEM; + + smumgr->backend = iceland_priv; + if (smu7_init(smumgr)) return -EINVAL; for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++) - smu_data->activity_target[i] = 30; + iceland_priv->activity_target[i] = 30; return 0; } -static const struct pp_smumgr_func iceland_smu_funcs = { +const struct pp_smumgr_func iceland_smu_funcs = { .smu_init = &iceland_smu_init, .smu_fini = &smu7_smu_fini, .start_smu = &iceland_start_smu, @@ -234,17 +242,3 @@ static const struct pp_smumgr_func iceland_smu_funcs = { .is_dpm_running = iceland_is_dpm_running, }; -int iceland_smum_init(struct pp_smumgr *smumgr) -{ - struct iceland_smumgr *iceland_smu = NULL; - - iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); - - if (iceland_smu == NULL) - return -ENOMEM; - - smumgr->backend = iceland_smu; - smumgr->smumgr_funcs = &iceland_smu_funcs; - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 5190e821200c..0e26900e459e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -21,13 +21,13 @@ * */ +#include "pp_debug.h" #include "polaris10_smc.h" #include "smu7_dyn_defaults.h" #include "smu7_hwmgr.h" #include "hardwaremanager.h" #include "ppatomctrl.h" -#include "pp_debug.h" #include "cgs_common.h" #include "atombios.h" #include "polaris10_smumgr.h" @@ -2180,7 +2180,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } } - printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); + pr_warning("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2207,7 +2207,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value) return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; } - printk(KERN_WARNING "can't get the mac of %x\n", value); + pr_warning("can't get the mac of %x\n", value); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index f38a68747df0..ce20ae2e520e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -21,6 +21,7 @@ * */ +#include "pp_debug.h" #include "smumgr.h" #include "smu74.h" #include "smu_ucode_xfer_vi.h" @@ -36,7 +37,6 @@ #include "bif/bif_5_0_sh_mask.h" #include "polaris10_pwrvirus.h" #include "ppatomctrl.h" -#include "pp_debug.h" #include "cgs_common.h" #include "polaris10_smc.h" #include "smu7_ppsmc.h" @@ -84,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) break; default: - printk("Table Exit with Invalid Command!"); + pr_info("Table Exit with Invalid Command!"); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; result = -1; break; @@ -102,7 +102,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) if (0 != smu_data->avfs.avfs_btc_param) { if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { - printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); + pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } } @@ -189,7 +189,7 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) return -1); if (smu_data->avfs.avfs_btc_param > 1) { - printk("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); + pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr), "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", @@ -208,7 +208,7 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) break; default: - printk("[AVFS] Something is broken. See log!"); + pr_info("[AVFS] Something is broken. See log!"); break; } @@ -328,6 +328,7 @@ static int polaris10_start_smu(struct pp_smumgr *smumgr) /* If failed, try with different security Key. */ if (result != 0) { smu_data->smu7_data.security_hard_key ^= 1; + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); result = polaris10_start_smu_in_protection_mode(smumgr); } } @@ -363,9 +364,15 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) static int polaris10_smu_init(struct pp_smumgr *smumgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data; int i; + smu_data = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); + if (smu_data == NULL) + return -ENOMEM; + + smumgr->backend = smu_data; + if (smu7_init(smumgr)) return -EINVAL; @@ -380,7 +387,7 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) return 0; } -static const struct pp_smumgr_func polaris10_smu_funcs = { +const struct pp_smumgr_func polaris10_smu_funcs = { .smu_init = polaris10_smu_init, .smu_fini = smu7_smu_fini, .start_smu = polaris10_start_smu, @@ -403,18 +410,3 @@ static const struct pp_smumgr_func polaris10_smu_funcs = { .get_mac_definition = polaris10_get_mac_definition, .is_dpm_running = polaris10_is_dpm_running, }; - -int polaris10_smum_init(struct pp_smumgr *smumgr) -{ - struct polaris10_smumgr *polaris10_smu = NULL; - - polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL); - - if (polaris10_smu == NULL) - return -EINVAL; - - smumgr->backend = polaris10_smu; - smumgr->smumgr_funcs = &polaris10_smu_funcs; - - return 0; -} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index f49b5487b951..6749fbe26c74 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -22,12 +22,12 @@ */ +#include "pp_debug.h" #include "smumgr.h" #include "smu_ucode_xfer_vi.h" #include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_sh_mask.h" #include "ppatomctrl.h" -#include "pp_debug.h" #include "cgs_common.h" #include "smu7_ppsmc.h" #include "smu7_smumgr.h" @@ -175,7 +175,7 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) - printk("\n failed to send pre message %x ret is %d \n", msg, ret); + pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); @@ -184,7 +184,7 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) - printk("\n failed to send message %x ret is %d \n", msg, ret); + pr_info("\n failed to send message %x ret is %d \n", msg, ret); return 0; } @@ -225,7 +225,7 @@ int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr) SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Message.\n"); + pr_info("Failed to send Message.\n"); return 0; } @@ -347,7 +347,7 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) result = UCODE_ID_RLC_G_MASK; break; default: - printk("UCode type is out of range! \n"); + pr_info("UCode type is out of range! \n"); result = 0; } @@ -396,7 +396,7 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) struct SMU_DRAMData_TOC *toc; if (!smumgr->reload_fw) { - printk(KERN_INFO "[ powerplay ] skip reloading...\n"); + pr_info("skip reloading...\n"); return 0; } @@ -474,7 +474,7 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) - printk(KERN_ERR "Fail to Request SMU Load uCode"); + pr_err("Fail to Request SMU Load uCode"); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index e5812aa456f3..60c36928284c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -22,6 +22,7 @@ */ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/slab.h> #include <drm/amdgpu_drm.h> #include "pp_instance.h" @@ -29,43 +30,55 @@ #include "cgs_common.h" #include "linux/delay.h" +MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); +MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); -int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) + +int smum_early_init(struct pp_instance *handle) { struct pp_smumgr *smumgr; - if ((handle == NULL) || (pp_init == NULL)) + if (handle == NULL) return -EINVAL; smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL); if (smumgr == NULL) return -ENOMEM; - smumgr->device = pp_init->device; - smumgr->chip_family = pp_init->chip_family; - smumgr->chip_id = pp_init->chip_id; + smumgr->device = handle->device; + smumgr->chip_family = handle->chip_family; + smumgr->chip_id = handle->chip_id; smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; smumgr->reload_fw = 1; handle->smu_mgr = smumgr; switch (smumgr->chip_family) { case AMDGPU_FAMILY_CZ: - cz_smum_init(smumgr); + smumgr->smumgr_funcs = &cz_smu_funcs; break; case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { case CHIP_TOPAZ: - iceland_smum_init(smumgr); + smumgr->smumgr_funcs = &iceland_smu_funcs; break; case CHIP_TONGA: - tonga_smum_init(smumgr); + smumgr->smumgr_funcs = &tonga_smu_funcs; break; case CHIP_FIJI: - fiji_smum_init(smumgr); + smumgr->smumgr_funcs = &fiji_smu_funcs; break; case CHIP_POLARIS11: case CHIP_POLARIS10: - polaris10_smum_init(smumgr); + case CHIP_POLARIS12: + smumgr->smumgr_funcs = &polaris10_smu_funcs; break; default: return -EINVAL; @@ -79,13 +92,6 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) return 0; } -int smum_fini(struct pp_smumgr *smumgr) -{ - kfree(smumgr->device); - kfree(smumgr); - return 0; -} - int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 2e1493ce1bb5..331b0aba4a13 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -21,13 +21,13 @@ * */ +#include "pp_debug.h" #include "tonga_smc.h" #include "smu7_dyn_defaults.h" #include "smu7_hwmgr.h" #include "hardwaremanager.h" #include "ppatomctrl.h" -#include "pp_debug.h" #include "cgs_common.h" #include "atombios.h" #include "tonga_smumgr.h" @@ -656,7 +656,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) } } else { if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask) - printk(KERN_ERR "[ powerplay ] Pcie Dpm Enablemask is 0 !"); + pr_err("Pcie Dpm Enablemask is 0 !"); while (data->dpm_level_enable_mask.pcie_dpm_enable_mask && ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & @@ -1503,7 +1503,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, if (result != 0) { smu_data->smc_state_table.GraphicsBootLevel = 0; - printk(KERN_ERR "[powerplay] VBIOS did not find boot engine " + pr_err("[powerplay] VBIOS did not find boot engine " "clock value in dependency table. " "Using Graphics DPM level 0 !"); result = 0; @@ -1515,7 +1515,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, if (result != 0) { smu_data->smc_state_table.MemoryBootLevel = 0; - printk(KERN_ERR "[powerplay] VBIOS did not find boot " + pr_err("[powerplay] VBIOS did not find boot " "engine clock value in dependency table." "Using Memory DPM level 0 !"); result = 0; @@ -1739,7 +1739,7 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, config = VR_SVI2_PLANE_2; table->VRConfig |= config; } else { - printk(KERN_ERR "[ powerplay ] VDDC and VDDGFX should " + pr_err("VDDC and VDDGFX should " "be both on SVI2 control in splitted mode !\n"); } } else { @@ -1752,7 +1752,7 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, config = VR_SVI2_PLANE_1; table->VRConfig |= config; } else { - printk(KERN_ERR "[ powerplay ] VDDC should be on " + pr_err("VDDC should be on " "SVI2 control in merged mode !\n"); } } @@ -2657,7 +2657,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } } - printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); + pr_warning("can't get the offset of type %x member %x\n", type, member); return 0; } @@ -2681,7 +2681,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) case SMU_MAX_LEVELS_MVDD: return SMU72_MAX_LEVELS_MVDD; } - printk(KERN_WARNING "can't get the mac value %x\n", value); + pr_warning("can't get the mac value %x\n", value); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index eff9a232e72e..a7d55366f2d2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -20,6 +20,7 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ +#include "pp_debug.h" #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -27,7 +28,6 @@ #include "smumgr.h" #include "tonga_smumgr.h" -#include "pp_debug.h" #include "smu_ucode_xfer_vi.h" #include "tonga_ppsmc.h" #include "smu/smu_7_1_2_d.h" @@ -84,7 +84,7 @@ static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) /* Check pass/failed indicator */ if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { - printk(KERN_ERR "[ powerplay ] SMU Firmware start failed\n"); + pr_err("SMU Firmware start failed\n"); return -EINVAL; } @@ -169,20 +169,25 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) */ static int tonga_smu_init(struct pp_smumgr *smumgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *tonga_priv = NULL; + int i; + + tonga_priv = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL); + if (tonga_priv == NULL) + return -ENOMEM; - int i; + smumgr->backend = tonga_priv; if (smu7_init(smumgr)) return -EINVAL; for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) - smu_data->activity_target[i] = 30; + tonga_priv->activity_target[i] = 30; return 0; } -static const struct pp_smumgr_func tonga_smu_funcs = { +const struct pp_smumgr_func tonga_smu_funcs = { .smu_init = &tonga_smu_init, .smu_fini = &smu7_smu_fini, .start_smu = &tonga_start_smu, @@ -205,18 +210,3 @@ static const struct pp_smumgr_func tonga_smu_funcs = { .initialize_mc_reg_table = tonga_initialize_mc_reg_table, .is_dpm_running = tonga_is_dpm_running, }; - -int tonga_smum_init(struct pp_smumgr *smumgr) -{ - struct tonga_smumgr *tonga_smu = NULL; - - tonga_smu = kzalloc(sizeof(struct tonga_smumgr), GFP_KERNEL); - - if (tonga_smu == NULL) - return -ENOMEM; - - smumgr->backend = tonga_smu; - smumgr->smumgr_funcs = &tonga_smu_funcs; - - return 0; -} diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index e5f4f4a6546d..a2e5b04cdee3 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -255,12 +255,6 @@ static int hdlcd_debugfs_init(struct drm_minor *minor) return drm_debugfs_create_files(hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor); } - -static void hdlcd_debugfs_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(hdlcd_debugfs_list, - ARRAY_SIZE(hdlcd_debugfs_list), minor); -} #endif static const struct file_operations fops = { @@ -303,7 +297,6 @@ static struct drm_driver hdlcd_driver = { .gem_prime_mmap = drm_gem_cma_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = hdlcd_debugfs_init, - .debugfs_cleanup = hdlcd_debugfs_cleanup, #endif .fops = &fops, .name = "hdlcd", diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 32f746e31379..99fb0ab39191 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -22,7 +22,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_of.h> @@ -256,6 +255,60 @@ static const struct of_device_id malidp_drm_of_match[] = { }; MODULE_DEVICE_TABLE(of, malidp_drm_of_match); +static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev, + const struct of_device_id *dev_id) +{ + u32 core_id; + const char *compatstr_dp500 = "arm,mali-dp500"; + bool is_dp500; + bool dt_is_dp500; + + /* + * The DP500 CORE_ID register is in a different location, so check it + * first. If the product id field matches, then this is DP500, otherwise + * check the DP550/650 CORE_ID register. + */ + core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID); + /* Offset 0x18 will never read 0x500 on products other than DP500. */ + is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500); + dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500, + sizeof(dev_id->compatible)) != NULL; + if (is_dp500 != dt_is_dp500) { + DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n", + dev_id->compatible, is_dp500 ? "is" : "is not"); + return false; + } else if (!dt_is_dp500) { + u16 product_id; + char buf[32]; + + core_id = malidp_hw_read(hwdev, + MALIDP550_DC_BASE + MALIDP_DE_CORE_ID); + product_id = MALIDP_PRODUCT_ID(core_id); + snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id); + if (!strnstr(dev_id->compatible, buf, + sizeof(dev_id->compatible))) { + DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n", + dev_id->compatible, product_id); + return false; + } + } + return true; +} + +static bool malidp_has_sufficient_address_space(const struct resource *res, + const struct of_device_id *dev_id) +{ + resource_size_t res_size = resource_size(res); + const char *compatstr_dp500 = "arm,mali-dp500"; + + if (!strnstr(dev_id->compatible, compatstr_dp500, + sizeof(dev_id->compatible))) + return res_size >= MALIDP550_ADDR_SPACE_SIZE; + else if (res_size < MALIDP500_ADDR_SPACE_SIZE) + return false; + return true; +} + #define MAX_OUTPUT_CHANNELS 3 static int malidp_bind(struct device *dev) @@ -266,6 +319,7 @@ static int malidp_bind(struct device *dev) struct malidp_drm *malidp; struct malidp_hw_device *hwdev; struct platform_device *pdev = to_platform_device(dev); + struct of_device_id const *dev_id; /* number of lines for the R, G and B output */ u8 output_width[MAX_OUTPUT_CHANNELS]; int ret = 0, i; @@ -286,7 +340,6 @@ static int malidp_bind(struct device *dev) memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); malidp->dev = hwdev; - INIT_LIST_HEAD(&malidp->event_list); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwdev->regs = devm_ioremap_resource(dev, res); @@ -329,6 +382,23 @@ static int malidp_bind(struct device *dev) clk_prepare_enable(hwdev->aclk); clk_prepare_enable(hwdev->mclk); + dev_id = of_match_device(malidp_drm_of_match, dev); + if (!dev_id) { + ret = -EINVAL; + goto query_hw_fail; + } + + if (!malidp_has_sufficient_address_space(res, dev_id)) { + DRM_ERROR("Insufficient address space in device-tree.\n"); + ret = -EINVAL; + goto query_hw_fail; + } + + if (!malidp_is_compatible_hw_id(hwdev, dev_id)) { + ret = -EINVAL; + goto query_hw_fail; + } + ret = hwdev->query_hw(hwdev); if (ret) { DRM_ERROR("Invalid HW configuration\n"); diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h index 9fc8a2e405e4..dbc617c6e4ef 100644 --- a/drivers/gpu/drm/arm/malidp_drv.h +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -15,12 +15,12 @@ #include <linux/mutex.h> #include <linux/wait.h> +#include <drm/drmP.h> #include "malidp_hw.h" struct malidp_drm { struct malidp_hw_device *dev; struct drm_fbdev_cma *fbdev; - struct list_head event_list; struct drm_crtc crtc; wait_queue_head_t wq; atomic_t config_valid; diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 4bdf531f7844..488aedf5b58d 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -21,7 +21,7 @@ #include "malidp_drv.h" #include "malidp_hw.h" -static const struct malidp_input_format malidp500_de_formats[] = { +static const struct malidp_format_id malidp500_de_formats[] = { /* fourcc, layers supporting the format, internal id */ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 }, { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 }, @@ -69,21 +69,21 @@ static const struct malidp_input_format malidp500_de_formats[] = { { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) } -static const struct malidp_input_format malidp550_de_formats[] = { +static const struct malidp_format_id malidp550_de_formats[] = { MALIDP_COMMON_FORMATS, }; static const struct malidp_layer malidp500_layers[] = { - { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE }, - { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE }, - { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE }, + { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, + { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE }, + { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE }, }; static const struct malidp_layer malidp550_layers[] = { - { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE }, - { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE }, - { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE }, - { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE }, + { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, + { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, + { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, + { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, }; #define MALIDP_DE_DEFAULT_PREFETCH_START 5 @@ -436,8 +436,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .irq_mask = MALIDP500_DE_IRQ_CONF_VALID, .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID, }, - .input_formats = malidp500_de_formats, - .n_input_formats = ARRAY_SIZE(malidp500_de_formats), + .pixel_formats = malidp500_de_formats, + .n_pixel_formats = ARRAY_SIZE(malidp500_de_formats), .bus_align_bytes = 8, }, .query_hw = malidp500_query_hw, @@ -447,6 +447,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .set_config_valid = malidp500_set_config_valid, .modeset = malidp500_modeset, .rotmem_required = malidp500_rotmem_required, + .features = MALIDP_DEVICE_LV_HAS_3_STRIDES, }, [MALIDP_550] = { .map = { @@ -469,8 +470,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .irq_mask = MALIDP550_DC_IRQ_CONF_VALID, .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, }, - .input_formats = malidp550_de_formats, - .n_input_formats = ARRAY_SIZE(malidp550_de_formats), + .pixel_formats = malidp550_de_formats, + .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats), .bus_align_bytes = 8, }, .query_hw = malidp550_query_hw, @@ -480,6 +481,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .set_config_valid = malidp550_set_config_valid, .modeset = malidp550_modeset, .rotmem_required = malidp550_rotmem_required, + .features = 0, }, [MALIDP_650] = { .map = { @@ -503,8 +505,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .irq_mask = MALIDP550_DC_IRQ_CONF_VALID, .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID, }, - .input_formats = malidp550_de_formats, - .n_input_formats = ARRAY_SIZE(malidp550_de_formats), + .pixel_formats = malidp550_de_formats, + .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats), .bus_align_bytes = 16, }, .query_hw = malidp650_query_hw, @@ -514,6 +516,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .set_config_valid = malidp550_set_config_valid, .modeset = malidp550_modeset, .rotmem_required = malidp550_rotmem_required, + .features = 0, }, }; @@ -522,10 +525,10 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, { unsigned int i; - for (i = 0; i < map->n_input_formats; i++) { - if (((map->input_formats[i].layer & layer_id) == layer_id) && - (map->input_formats[i].format == format)) - return map->input_formats[i].id; + for (i = 0; i < map->n_pixel_formats; i++) { + if (((map->pixel_formats[i].layer & layer_id) == layer_id) && + (map->pixel_formats[i].format == format)) + return map->pixel_formats[i].id; } return MALIDP_INVALID_FORMAT_ID; diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 087e1202db3d..00974b59407d 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -35,7 +35,7 @@ enum { DE_SMART = BIT(4), }; -struct malidp_input_format { +struct malidp_format_id { u32 format; /* DRM fourcc */ u8 layer; /* bitmask of layers supporting it */ u8 id; /* used internally */ @@ -58,6 +58,7 @@ struct malidp_layer { u16 id; /* layer ID */ u16 base; /* address offset for the register bank */ u16 ptr; /* address offset for the pointer register */ + u16 stride_offset; /* Offset to the first stride register. */ }; /* regmap features */ @@ -85,14 +86,18 @@ struct malidp_hw_regmap { const struct malidp_irq_map se_irq_map; const struct malidp_irq_map dc_irq_map; - /* list of supported input formats for each layer */ - const struct malidp_input_format *input_formats; - const u8 n_input_formats; + /* list of supported pixel formats for each layer */ + const struct malidp_format_id *pixel_formats; + const u8 n_pixel_formats; /* pitch alignment requirement in bytes */ const u8 bus_align_bytes; }; +/* device features */ +/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */ +#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0) + struct malidp_hw_device { const struct malidp_hw_regmap map; void __iomem *regs; diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index eff2fe47e26a..414aada10fe5 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -11,6 +11,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -36,7 +37,6 @@ #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) #define MALIDP_LAYER_COMP_SIZE 0x010 #define MALIDP_LAYER_OFFSET 0x014 -#define MALIDP_LAYER_STRIDE 0x018 /* * This 4-entry look-up-table is used to determine the full 8-bit alpha value @@ -67,13 +67,14 @@ drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); - if (state) { - m_state = to_malidp_plane_state(plane->state); - __drm_atomic_helper_plane_duplicate_state(plane, &state->base); - state->rotmem_size = m_state->rotmem_size; - state->format = m_state->format; - state->n_planes = m_state->n_planes; - } + if (!state) + return NULL; + + m_state = to_malidp_plane_state(plane->state); + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + state->rotmem_size = m_state->rotmem_size; + state->format = m_state->format; + state->n_planes = m_state->n_planes; return &state->base; } @@ -102,8 +103,10 @@ static int malidp_de_plane_check(struct drm_plane *plane, { struct malidp_plane *mp = to_malidp_plane(plane); struct malidp_plane_state *ms = to_malidp_plane_state(state); + struct drm_crtc_state *crtc_state; struct drm_framebuffer *fb; - int i; + struct drm_rect clip = { 0 }; + int i, ret; u32 src_w, src_h; if (!state->crtc || !state->fb) @@ -131,8 +134,17 @@ static int malidp_de_plane_check(struct drm_plane *plane, if ((state->crtc_w > mp->hwdev->max_line_size) || (state->crtc_h > mp->hwdev->max_line_size) || (state->crtc_w < mp->hwdev->min_line_size) || - (state->crtc_h < mp->hwdev->min_line_size) || - (state->crtc_w != src_w) || (state->crtc_h != src_h)) + (state->crtc_h < mp->hwdev->min_line_size)) + return -EINVAL; + + /* + * DP550/650 video layers can accept 3 plane formats only if + * fb->pitches[1] == fb->pitches[2] since they don't have a + * third plane stride register. + */ + if (ms->n_planes == 3 && + !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && + (state->fb->pitches[1] != state->fb->pitches[2])) return -EINVAL; /* packed RGB888 / BGR888 can't be rotated or flipped */ @@ -141,6 +153,16 @@ static int malidp_de_plane_check(struct drm_plane *plane, fb->format->format == DRM_FORMAT_BGR888)) return -EINVAL; + crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + ret = drm_plane_helper_check_state(state, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); + if (ret) + return ret; + ms->rotmem_size = 0; if (state->rotation & MALIDP_ROTATED_MASK) { int val; @@ -157,6 +179,25 @@ static int malidp_de_plane_check(struct drm_plane *plane, return 0; } +static void malidp_de_set_plane_pitches(struct malidp_plane *mp, + int num_planes, unsigned int pitches[3]) +{ + int i; + int num_strides = num_planes; + + if (!mp->layer->stride_offset) + return; + + if (num_planes == 3) + num_strides = (mp->hwdev->features & + MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2; + + for (i = 0; i < num_strides; ++i) + malidp_hw_write(mp->hwdev, pitches[i], + mp->layer->base + + mp->layer->stride_offset + i * 4); +} + static void malidp_de_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -174,13 +215,8 @@ static void malidp_de_plane_update(struct drm_plane *plane, /* convert src values from Q16 fixed point to integer */ src_w = plane->state->src_w >> 16; src_h = plane->state->src_h >> 16; - if (plane->state->rotation & MALIDP_ROTATED_MASK) { - dest_w = plane->state->crtc_h; - dest_h = plane->state->crtc_w; - } else { - dest_w = plane->state->crtc_w; - dest_h = plane->state->crtc_h; - } + dest_w = plane->state->crtc_w; + dest_h = plane->state->crtc_h; malidp_hw_write(mp->hwdev, ms->format, mp->layer->base); @@ -189,11 +225,12 @@ static void malidp_de_plane_update(struct drm_plane *plane, ptr = mp->layer->ptr + (i << 4); obj = drm_fb_cma_get_gem_obj(plane->state->fb, i); + obj->paddr += plane->state->fb->offsets[i]; malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr); malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4); - malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i], - mp->layer->base + MALIDP_LAYER_STRIDE); } + malidp_de_set_plane_pitches(mp, ms->n_planes, + plane->state->fb->pitches); malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), mp->layer->base + MALIDP_LAYER_SIZE); @@ -211,11 +248,12 @@ static void malidp_de_plane_update(struct drm_plane *plane, /* setup the rotation and axis flip bits */ if (plane->state->rotation & DRM_ROTATE_MASK) - val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET; + val |= ilog2(plane->state->rotation & DRM_ROTATE_MASK) << + LAYER_ROT_OFFSET; if (plane->state->rotation & DRM_REFLECT_X) - val |= LAYER_V_FLIP; - if (plane->state->rotation & DRM_REFLECT_Y) val |= LAYER_H_FLIP; + if (plane->state->rotation & DRM_REFLECT_Y) + val |= LAYER_V_FLIP; /* * always enable pixel alpha blending until we have a way to change @@ -258,7 +296,7 @@ int malidp_de_planes_init(struct drm_device *drm) u32 *formats; int ret, i, j, n; - formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL); + formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); if (!formats) { ret = -ENOMEM; goto cleanup; @@ -274,9 +312,9 @@ int malidp_de_planes_init(struct drm_device *drm) } /* build the list of DRM supported formats based on the map */ - for (n = 0, j = 0; j < map->n_input_formats; j++) { - if ((map->input_formats[j].layer & id) == id) - formats[n++] = map->input_formats[j].format; + for (n = 0, j = 0; j < map->n_pixel_formats; j++) { + if ((map->pixel_formats[j].layer & id) == id) + formats[n++] = map->pixel_formats[j].format; } plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 73fecb38f955..aff6d4a84e99 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -81,6 +81,10 @@ #define MALIDP_DE_SYNC_WIDTH 0x8 #define MALIDP_DE_HV_ACTIVE 0xc +/* Stride register offsets relative to Lx_BASE */ +#define MALIDP_DE_LG_STRIDE 0x18 +#define MALIDP_DE_LV_STRIDE0 0x18 + /* macros to set values into registers */ #define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) #define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16) @@ -92,7 +96,10 @@ #define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0) #define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16) +#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16) + /* register offsets and bits specific to DP500 */ +#define MALIDP500_ADDR_SPACE_SIZE 0x01000 #define MALIDP500_DC_BASE 0x00000 #define MALIDP500_DC_CONTROL 0x0000c #define MALIDP500_DC_CONFIG_REQ (1 << 17) @@ -125,6 +132,7 @@ #define MALIDP500_CONFIG_ID 0x00fd4 /* register offsets and bits specific to DP550/DP650 */ +#define MALIDP550_ADDR_SPACE_SIZE 0x10000 #define MALIDP550_DE_CONTROL 0x00010 #define MALIDP550_DE_LINE_COUNTER 0x00014 #define MALIDP550_DE_AXI_CONTROL 0x00018 diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig index 15f3ecfb16f1..eafaeeb7b5b1 100644 --- a/drivers/gpu/drm/armada/Kconfig +++ b/drivers/gpu/drm/armada/Kconfig @@ -1,6 +1,6 @@ config DRM_ARMADA tristate "DRM support for Marvell Armada SoCs" - depends on DRM && HAVE_CLK && ARM + depends on DRM && HAVE_CLK && ARM && MMU select DRM_KMS_HELPER help Support the "LCD" controllers found on the Marvell Armada 510 diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 2a1368fac1d1..50c910efa13d 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -236,8 +236,6 @@ struct ttm_bo_driver ast_bo_driver = { .verify_access = ast_bo_verify_access, .io_mem_reserve = &ast_ttm_io_mem_reserve, .io_mem_free = &ast_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int ast_mm_init(struct ast_private *ast) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index cbd0070265c9..0bf32d6ac39b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -431,15 +431,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; - if (dc->fbdev) { + if (dc->fbdev) drm_fbdev_cma_hotplug_event(dc->fbdev); - } else { - dc->fbdev = drm_fbdev_cma_init(dev, 24, - dev->mode_config.num_crtc, - dev->mode_config.num_connector); - if (IS_ERR(dc->fbdev)) - dc->fbdev = NULL; - } } struct atmel_hlcdc_dc_commit { @@ -653,10 +646,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) platform_set_drvdata(pdev, dev); - drm_kms_helper_poll_init(dev); + dc->fbdev = drm_fbdev_cma_init(dev, 24, + dev->mode_config.num_crtc, + dev->mode_config.num_connector); + if (IS_ERR(dc->fbdev)) + dc->fbdev = NULL; - /* force connectors detection */ - drm_helper_hpd_irq_event(dev); + drm_kms_helper_poll_init(dev); return 0; diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index ceb1fecf02dd..857755ac2d70 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -205,8 +205,6 @@ struct ttm_bo_driver bochs_bo_driver = { .verify_access = bochs_bo_verify_access, .io_mem_reserve = &bochs_ttm_io_mem_reserve, .io_mem_free = &bochs_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int bochs_mm_init(struct bochs_device *bochs) diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 992d76ce02bb..fe18a5d2d84b 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -12,6 +12,7 @@ #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_mipi_dsi.h> @@ -317,6 +318,8 @@ struct adv7511 { bool edid_read; wait_queue_head_t wq; + struct work_struct hpd_work; + struct drm_bridge bridge; struct drm_connector connector; @@ -329,6 +332,9 @@ struct adv7511 { struct gpio_desc *gpio_pd; + struct regulator_bulk_data *supplies; + unsigned int num_supplies; + /* ADV7533 DSI RX related params */ struct device_node *host_node; struct mipi_dsi_device *dsi; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 8dba729f6ef9..f75ab6278113 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -325,7 +325,7 @@ static void adv7511_set_link_config(struct adv7511 *adv7511, adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB; } -static void adv7511_power_on(struct adv7511 *adv7511) +static void __adv7511_power_on(struct adv7511 *adv7511) { adv7511->current_edid_segment = -1; @@ -338,7 +338,7 @@ static void adv7511_power_on(struct adv7511 *adv7511) * Still, let's be safe and stick to the documentation. */ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), - ADV7511_INT0_EDID_READY); + ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD); regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), ADV7511_INT1_DDC_ERROR); } @@ -354,6 +354,11 @@ static void adv7511_power_on(struct adv7511 *adv7511) regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7511_REG_POWER2_HPD_SRC_MASK, ADV7511_REG_POWER2_HPD_SRC_NONE); +} + +static void adv7511_power_on(struct adv7511 *adv7511) +{ + __adv7511_power_on(adv7511); /* * Most of the registers are reset during power down or when HPD is low. @@ -362,21 +367,23 @@ static void adv7511_power_on(struct adv7511 *adv7511) if (adv7511->type == ADV7533) adv7533_dsi_power_on(adv7511); - adv7511->powered = true; } -static void adv7511_power_off(struct adv7511 *adv7511) +static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); regcache_mark_dirty(adv7511->regmap); +} +static void adv7511_power_off(struct adv7511 *adv7511) +{ + __adv7511_power_off(adv7511); if (adv7511->type == ADV7533) adv7533_dsi_power_off(adv7511); - adv7511->powered = false; } @@ -402,6 +409,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511) return false; } +static void adv7511_hpd_work(struct work_struct *work) +{ + struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work); + enum drm_connector_status status; + unsigned int val; + int ret; + + ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val); + if (ret < 0) + status = connector_status_disconnected; + else if (val & ADV7511_STATUS_HPD) + status = connector_status_connected; + else + status = connector_status_disconnected; + + if (adv7511->connector.status != status) { + adv7511->connector.status = status; + drm_kms_helper_hotplug_event(adv7511->connector.dev); + } +} + static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) { unsigned int irq0, irq1; @@ -419,7 +447,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) - drm_helper_hpd_irq_event(adv7511->connector.dev); + schedule_work(&adv7511->hpd_work); if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { adv7511->edid_read = true; @@ -546,23 +574,20 @@ static int adv7511_get_modes(struct adv7511 *adv7511, /* Reading the EDID only works if the device is powered */ if (!adv7511->powered) { - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, 0); - if (adv7511->i2c_main->irq) { - regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), - ADV7511_INT0_EDID_READY); - regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), - ADV7511_INT1_DDC_ERROR); - } - adv7511->current_edid_segment = -1; + unsigned int edid_i2c_addr = + (adv7511->i2c_main->addr << 1) + 4; + + __adv7511_power_on(adv7511); + + /* Reset the EDID_I2C_ADDR register as it might be cleared */ + regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, + edid_i2c_addr); } edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511); if (!adv7511->powered) - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, - ADV7511_POWER_POWER_DOWN); + __adv7511_power_off(adv7511); kfree(adv7511->edid); adv7511->edid = edid; @@ -825,6 +850,10 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge) if (adv->type == ADV7533) ret = adv7533_attach_dsi(adv); + if (adv->i2c_main->irq) + regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), + ADV7511_INT0_HPD); + return ret; } @@ -839,6 +868,58 @@ static struct drm_bridge_funcs adv7511_bridge_funcs = { * Probe & remove */ +static const char * const adv7511_supply_names[] = { + "avdd", + "dvdd", + "pvdd", + "bgvdd", + "dvdd-3v", +}; + +static const char * const adv7533_supply_names[] = { + "avdd", + "dvdd", + "pvdd", + "a2vdd", + "v3p3", + "v1p2", +}; + +static int adv7511_init_regulators(struct adv7511 *adv) +{ + struct device *dev = &adv->i2c_main->dev; + const char * const *supply_names; + unsigned int i; + int ret; + + if (adv->type == ADV7511) { + supply_names = adv7511_supply_names; + adv->num_supplies = ARRAY_SIZE(adv7511_supply_names); + } else { + supply_names = adv7533_supply_names; + adv->num_supplies = ARRAY_SIZE(adv7533_supply_names); + } + + adv->supplies = devm_kcalloc(dev, adv->num_supplies, + sizeof(*adv->supplies), GFP_KERNEL); + if (!adv->supplies) + return -ENOMEM; + + for (i = 0; i < adv->num_supplies; i++) + adv->supplies[i].supply = supply_names[i]; + + ret = devm_regulator_bulk_get(dev, adv->num_supplies, adv->supplies); + if (ret) + return ret; + + return regulator_bulk_enable(adv->num_supplies, adv->supplies); +} + +static void adv7511_uninit_regulators(struct adv7511 *adv) +{ + regulator_bulk_disable(adv->num_supplies, adv->supplies); +} + static int adv7511_parse_dt(struct device_node *np, struct adv7511_link_config *config) { @@ -939,6 +1020,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (!adv7511) return -ENOMEM; + adv7511->i2c_main = i2c; adv7511->powered = false; adv7511->status = connector_status_disconnected; @@ -956,13 +1038,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (ret) return ret; + ret = adv7511_init_regulators(adv7511); + if (ret) { + dev_err(dev, "failed to init regulators\n"); + return ret; + } + /* * The power down GPIO is optional. If present, toggle it from active to * inactive to wake up the encoder. */ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH); - if (IS_ERR(adv7511->gpio_pd)) - return PTR_ERR(adv7511->gpio_pd); + if (IS_ERR(adv7511->gpio_pd)) { + ret = PTR_ERR(adv7511->gpio_pd); + goto uninit_regulators; + } if (adv7511->gpio_pd) { mdelay(5); @@ -970,12 +1060,14 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) } adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config); - if (IS_ERR(adv7511->regmap)) - return PTR_ERR(adv7511->regmap); + if (IS_ERR(adv7511->regmap)) { + ret = PTR_ERR(adv7511->regmap); + goto uninit_regulators; + } ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val); if (ret) - return ret; + goto uninit_regulators; dev_dbg(dev, "Rev. %d\n", val); if (adv7511->type == ADV7511) @@ -985,7 +1077,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) else ret = adv7533_patch_registers(adv7511); if (ret) - return ret; + goto uninit_regulators; regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr); regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR, @@ -995,10 +1087,11 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511_packet_disable(adv7511, 0xffff); - adv7511->i2c_main = i2c; adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1); - if (!adv7511->i2c_edid) - return -ENOMEM; + if (!adv7511->i2c_edid) { + ret = -ENOMEM; + goto uninit_regulators; + } if (adv7511->type == ADV7533) { ret = adv7533_init_cec(adv7511); @@ -1006,6 +1099,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) goto err_i2c_unregister_edid; } + INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); + if (i2c->irq) { init_waitqueue_head(&adv7511->wq); @@ -1045,6 +1140,8 @@ err_unregister_cec: adv7533_uninit_cec(adv7511); err_i2c_unregister_edid: i2c_unregister_device(adv7511->i2c_edid); +uninit_regulators: + adv7511_uninit_regulators(adv7511); return ret; } @@ -1058,6 +1155,8 @@ static int adv7511_remove(struct i2c_client *i2c) adv7533_uninit_cec(adv7511); } + adv7511_uninit_regulators(adv7511); + drm_bridge_remove(&adv7511->bridge); adv7511_audio_exit(adv7511); diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 02b97bf64ee4..e7cd1056ff2d 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1385,6 +1385,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, pm_runtime_enable(dev); + pm_runtime_get_sync(dev); phy_power_on(dp->phy); analogix_dp_init_dp(dp); @@ -1417,9 +1418,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, goto err_disable_pm_runtime; } + phy_power_off(dp->phy); + pm_runtime_put(dev); + return 0; err_disable_pm_runtime: + + phy_power_off(dp->phy); + pm_runtime_put(dev); pm_runtime_disable(dev); return ret; diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index f5009ae39b89..9a9ec27d9e28 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -113,13 +113,20 @@ struct dw_hdmi_i2c { bool is_regaddr; }; +struct dw_hdmi_phy_data { + enum dw_hdmi_phy_type type; + const char *name; + bool has_svsret; +}; + struct dw_hdmi { struct drm_connector connector; - struct drm_encoder *encoder; - struct drm_bridge *bridge; + struct drm_bridge bridge; - struct platform_device *audio; enum dw_hdmi_devtype dev_type; + unsigned int version; + + struct platform_device *audio; struct device *dev; struct clk *isfr_clk; struct clk *iahb_clk; @@ -133,7 +140,9 @@ struct dw_hdmi { u8 edid[HDMI_EDID_LEN]; bool cable_plugin; + const struct dw_hdmi_phy_data *phy; bool phy_enabled; + struct drm_display_mode previous_mode; struct i2c_adapter *ddc; @@ -868,7 +877,7 @@ static bool hdmi_phy_wait_i2c_done(struct dw_hdmi *hdmi, int msec) return true; } -static void __hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, +static void hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, unsigned char addr) { hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); @@ -882,13 +891,6 @@ static void __hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, hdmi_phy_wait_i2c_done(hdmi, 1000); } -static int hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, - unsigned char addr) -{ - __hdmi_phy_i2c_write(hdmi, data, addr); - return 0; -} - static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable) { hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0, @@ -903,11 +905,11 @@ static void dw_hdmi_phy_enable_tmds(struct dw_hdmi *hdmi, u8 enable) HDMI_PHY_CONF0_ENTMDS_MASK); } -static void dw_hdmi_phy_enable_spare(struct dw_hdmi *hdmi, u8 enable) +static void dw_hdmi_phy_enable_svsret(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, - HDMI_PHY_CONF0_SPARECTRL_OFFSET, - HDMI_PHY_CONF0_SPARECTRL_MASK); + HDMI_PHY_CONF0_SVSRET_OFFSET, + HDMI_PHY_CONF0_SVSRET_MASK); } static void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable) @@ -938,34 +940,14 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable) HDMI_PHY_CONF0_SELDIPIF_MASK); } -static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, - unsigned char res, int cscon) +static int hdmi_phy_configure(struct dw_hdmi *hdmi, int cscon) { - unsigned res_idx; u8 val, msec; const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg; const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr; const struct dw_hdmi_phy_config *phy_config = pdata->phy_config; - if (prep) - return -EINVAL; - - switch (res) { - case 0: /* color resolution 0 is 8 bit colour depth */ - case 8: - res_idx = DW_HDMI_RES_8; - break; - case 10: - res_idx = DW_HDMI_RES_10; - break; - case 12: - res_idx = DW_HDMI_RES_12; - break; - default: - return -EINVAL; - } - /* PLL/MPLL Cfg - always match on final entry */ for (; mpll_config->mpixelclock != ~0UL; mpll_config++) if (hdmi->hdmi_data.video_mode.mpixelclock <= @@ -1004,9 +986,13 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, /* gen2 pddq */ dw_hdmi_phy_gen2_pddq(hdmi, 1); - /* PHY reset */ - hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ); - hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ); + /* Leave low power consumption mode by asserting SVSRET. */ + if (hdmi->phy->has_svsret) + dw_hdmi_phy_enable_svsret(hdmi, 1); + + /* PHY reset. The reset signal is active high on Gen2 PHYs. */ + hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); + hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST); @@ -1015,21 +1001,26 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, HDMI_PHY_I2CM_SLAVE_ADDR); hdmi_phy_test_clear(hdmi, 0); - hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06); - hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15); - - /* CURRCTRL */ - hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10); + hdmi_phy_i2c_write(hdmi, mpll_config->res[0].cpce, + HDMI_3D_TX_PHY_CPCE_CTRL); + hdmi_phy_i2c_write(hdmi, mpll_config->res[0].gmp, + HDMI_3D_TX_PHY_GMPCTRL); + hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[0], + HDMI_3D_TX_PHY_CURRCTRL); - hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */ - hdmi_phy_i2c_write(hdmi, 0x0006, 0x17); + hdmi_phy_i2c_write(hdmi, 0, HDMI_3D_TX_PHY_PLLPHBYCTRL); + hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_FB_CLK, + HDMI_3D_TX_PHY_MSM_CTRL); - hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19); /* TXTERM */ - hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */ - hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */ + hdmi_phy_i2c_write(hdmi, phy_config->term, HDMI_3D_TX_PHY_TXTERM); + hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, + HDMI_3D_TX_PHY_CKSYMTXCTRL); + hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, + HDMI_3D_TX_PHY_VLEVCTRL); - /* REMOVE CLK TERM */ - hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */ + /* Override and disable clock termination. */ + hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE, + HDMI_3D_TX_PHY_CKCALCTRL); dw_hdmi_phy_enable_powerdown(hdmi, false); @@ -1041,10 +1032,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep, dw_hdmi_phy_gen2_txpwron(hdmi, 1); dw_hdmi_phy_gen2_pddq(hdmi, 0); - if (hdmi->dev_type == RK3288_HDMI) - dw_hdmi_phy_enable_spare(hdmi, 1); - - /*Wait for PHY PLL lock */ + /* Wait for PHY PLL lock */ msec = 5; do { val = hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK; @@ -1079,7 +1067,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi) dw_hdmi_phy_enable_powerdown(hdmi, true); /* Enable CSC */ - ret = hdmi_phy_configure(hdmi, 0, 8, cscon); + ret = hdmi_phy_configure(hdmi, cscon); if (ret) return ret; } @@ -1351,19 +1339,38 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi) /* Workaround to clear the overflow condition */ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) { - int count; + unsigned int count; + unsigned int i; u8 val; - /* TMDS software reset */ - hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); + /* + * Under some circumstances the Frame Composer arithmetic unit can miss + * an FC register write due to being busy processing the previous one. + * The issue can be worked around by issuing a TMDS software reset and + * then write one of the FC registers several times. + * + * The number of iterations matters and depends on the HDMI TX revision + * (and possibly on the platform). So far only i.MX6Q (v1.30a) and + * i.MX6DL (v1.31a) have been identified as needing the workaround, with + * 4 and 1 iterations respectively. + */ - val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF); - if (hdmi->dev_type == IMX6DL_HDMI) { - hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF); + switch (hdmi->version) { + case 0x130a: + count = 4; + break; + case 0x131a: + count = 1; + break; + default: return; } - for (count = 0; count < 4; count++) + /* TMDS software reset */ + hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); + + val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF); + for (i = 0; i < count; i++) hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF); } @@ -1586,42 +1593,6 @@ static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi) hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0); } -static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, - struct drm_display_mode *orig_mode, - struct drm_display_mode *mode) -{ - struct dw_hdmi *hdmi = bridge->driver_private; - - mutex_lock(&hdmi->mutex); - - /* Store the display mode for plugin/DKMS poweron events */ - memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode)); - - mutex_unlock(&hdmi->mutex); -} - -static void dw_hdmi_bridge_disable(struct drm_bridge *bridge) -{ - struct dw_hdmi *hdmi = bridge->driver_private; - - mutex_lock(&hdmi->mutex); - hdmi->disabled = true; - dw_hdmi_update_power(hdmi); - dw_hdmi_update_phy_mask(hdmi); - mutex_unlock(&hdmi->mutex); -} - -static void dw_hdmi_bridge_enable(struct drm_bridge *bridge) -{ - struct dw_hdmi *hdmi = bridge->driver_private; - - mutex_lock(&hdmi->mutex); - hdmi->disabled = false; - dw_hdmi_update_power(hdmi); - dw_hdmi_update_phy_mask(hdmi); - mutex_unlock(&hdmi->mutex); -} - static enum drm_connector_status dw_hdmi_connector_detect(struct drm_connector *connector, bool force) { @@ -1714,7 +1685,63 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = .best_encoder = drm_atomic_helper_best_encoder, }; +static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) +{ + struct dw_hdmi *hdmi = bridge->driver_private; + struct drm_encoder *encoder = bridge->encoder; + struct drm_connector *connector = &hdmi->connector; + + connector->interlace_allowed = 1; + connector->polled = DRM_CONNECTOR_POLL_HPD; + + drm_connector_helper_add(connector, &dw_hdmi_connector_helper_funcs); + + drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs, + DRM_MODE_CONNECTOR_HDMIA); + + drm_mode_connector_attach_encoder(connector, encoder); + + return 0; +} + +static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *orig_mode, + struct drm_display_mode *mode) +{ + struct dw_hdmi *hdmi = bridge->driver_private; + + mutex_lock(&hdmi->mutex); + + /* Store the display mode for plugin/DKMS poweron events */ + memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode)); + + mutex_unlock(&hdmi->mutex); +} + +static void dw_hdmi_bridge_disable(struct drm_bridge *bridge) +{ + struct dw_hdmi *hdmi = bridge->driver_private; + + mutex_lock(&hdmi->mutex); + hdmi->disabled = true; + dw_hdmi_update_power(hdmi); + dw_hdmi_update_phy_mask(hdmi); + mutex_unlock(&hdmi->mutex); +} + +static void dw_hdmi_bridge_enable(struct drm_bridge *bridge) +{ + struct dw_hdmi *hdmi = bridge->driver_private; + + mutex_lock(&hdmi->mutex); + hdmi->disabled = false; + dw_hdmi_update_power(hdmi); + dw_hdmi_update_phy_mask(hdmi); + mutex_unlock(&hdmi->mutex); +} + static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { + .attach = dw_hdmi_bridge_attach, .enable = dw_hdmi_bridge_enable, .disable = dw_hdmi_bridge_disable, .mode_set = dw_hdmi_bridge_mode_set, @@ -1816,7 +1843,8 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) if (intr_stat & HDMI_IH_PHY_STAT0_HPD) { dev_dbg(hdmi->dev, "EVENT=%s\n", phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout"); - drm_helper_hpd_irq_event(hdmi->bridge->dev); + if (hdmi->bridge.dev) + drm_helper_hpd_irq_event(hdmi->bridge.dev); } hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); @@ -1826,67 +1854,80 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) -{ - struct drm_encoder *encoder = hdmi->encoder; - struct drm_bridge *bridge; - int ret; - - bridge = devm_kzalloc(drm->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("Failed to allocate drm bridge\n"); - return -ENOMEM; - } - - hdmi->bridge = bridge; - bridge->driver_private = hdmi; - bridge->funcs = &dw_hdmi_bridge_funcs; - ret = drm_bridge_attach(encoder, bridge, NULL); - if (ret) { - DRM_ERROR("Failed to initialize bridge with drm\n"); - return -EINVAL; +static const struct dw_hdmi_phy_data dw_hdmi_phys[] = { + { + .type = DW_HDMI_PHY_DWC_HDMI_TX_PHY, + .name = "DWC HDMI TX PHY", + }, { + .type = DW_HDMI_PHY_DWC_MHL_PHY_HEAC, + .name = "DWC MHL PHY + HEAC PHY", + .has_svsret = true, + }, { + .type = DW_HDMI_PHY_DWC_MHL_PHY, + .name = "DWC MHL PHY", + .has_svsret = true, + }, { + .type = DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC, + .name = "DWC HDMI 3D TX PHY + HEAC PHY", + }, { + .type = DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY, + .name = "DWC HDMI 3D TX PHY", + }, { + .type = DW_HDMI_PHY_DWC_HDMI20_TX_PHY, + .name = "DWC HDMI 2.0 TX PHY", + .has_svsret = true, } +}; - hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; +static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi) +{ + unsigned int i; + u8 phy_type; - drm_connector_helper_add(&hdmi->connector, - &dw_hdmi_connector_helper_funcs); + phy_type = hdmi_readb(hdmi, HDMI_CONFIG2_ID); - drm_connector_init(drm, &hdmi->connector, - &dw_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); + for (i = 0; i < ARRAY_SIZE(dw_hdmi_phys); ++i) { + if (dw_hdmi_phys[i].type == phy_type) { + hdmi->phy = &dw_hdmi_phys[i]; + return 0; + } + } - drm_mode_connector_attach_encoder(&hdmi->connector, encoder); + if (phy_type == DW_HDMI_PHY_VENDOR_PHY) + dev_err(hdmi->dev, "Unsupported vendor HDMI PHY\n"); + else + dev_err(hdmi->dev, "Unsupported HDMI PHY type (%02x)\n", + phy_type); - return 0; + return -ENODEV; } -int dw_hdmi_bind(struct device *dev, struct device *master, - void *data, struct drm_encoder *encoder, - struct resource *iores, int irq, - const struct dw_hdmi_plat_data *plat_data) +static struct dw_hdmi * +__dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data) { - struct drm_device *drm = data; + struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct platform_device_info pdevinfo; struct device_node *ddc_node; struct dw_hdmi *hdmi; + struct resource *iores; + int irq; int ret; u32 val = 1; + u8 prod_id0; + u8 prod_id1; u8 config0; - u8 config1; + u8 config3; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) - return -ENOMEM; - - hdmi->connector.interlace_allowed = 1; + return ERR_PTR(-ENOMEM); hdmi->plat_data = plat_data; hdmi->dev = dev; hdmi->dev_type = plat_data->dev_type; hdmi->sample_rate = 48000; - hdmi->encoder = encoder; hdmi->disabled = true; hdmi->rxsense = true; hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE); @@ -1908,7 +1949,7 @@ int dw_hdmi_bind(struct device *dev, struct device *master, break; default: dev_err(dev, "reg-io-width must be 1 or 4\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); @@ -1917,13 +1958,14 @@ int dw_hdmi_bind(struct device *dev, struct device *master, of_node_put(ddc_node); if (!hdmi->ddc) { dev_dbg(hdmi->dev, "failed to read ddc node\n"); - return -EPROBE_DEFER; + return ERR_PTR(-EPROBE_DEFER); } } else { dev_dbg(hdmi->dev, "no ddc property found\n"); } + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, iores); if (IS_ERR(hdmi->regs)) { ret = PTR_ERR(hdmi->regs); @@ -1957,15 +1999,36 @@ int dw_hdmi_bind(struct device *dev, struct device *master, } /* Product and revision IDs */ - dev_info(dev, - "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n", - hdmi_readb(hdmi, HDMI_DESIGN_ID), - hdmi_readb(hdmi, HDMI_REVISION_ID), - hdmi_readb(hdmi, HDMI_PRODUCT_ID0), - hdmi_readb(hdmi, HDMI_PRODUCT_ID1)); + hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8) + | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0); + prod_id0 = hdmi_readb(hdmi, HDMI_PRODUCT_ID0); + prod_id1 = hdmi_readb(hdmi, HDMI_PRODUCT_ID1); + + if (prod_id0 != HDMI_PRODUCT_ID0_HDMI_TX || + (prod_id1 & ~HDMI_PRODUCT_ID1_HDCP) != HDMI_PRODUCT_ID1_HDMI_TX) { + dev_err(dev, "Unsupported HDMI controller (%04x:%02x:%02x)\n", + hdmi->version, prod_id0, prod_id1); + ret = -ENODEV; + goto err_iahb; + } + + ret = dw_hdmi_detect_phy(hdmi); + if (ret < 0) + goto err_iahb; + + dev_info(dev, "Detected HDMI TX controller v%x.%03x %s HDCP (%s)\n", + hdmi->version >> 12, hdmi->version & 0xfff, + prod_id1 & HDMI_PRODUCT_ID1_HDCP ? "with" : "without", + hdmi->phy->name); initialize_hdmi_ih_mutes(hdmi); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_iahb; + } + ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq, dw_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); @@ -1995,11 +2058,13 @@ int dw_hdmi_bind(struct device *dev, struct device *master, hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE, HDMI_IH_PHY_STAT0); - ret = dw_hdmi_fb_registered(hdmi); - if (ret) - goto err_iahb; + hdmi->bridge.driver_private = hdmi; + hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; +#ifdef CONFIG_OF + hdmi->bridge.of_node = pdev->dev.of_node; +#endif - ret = dw_hdmi_register(drm, hdmi); + ret = dw_hdmi_fb_registered(hdmi); if (ret) goto err_iahb; @@ -2012,9 +2077,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master, pdevinfo.id = PLATFORM_DEVID_AUTO; config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID); - config1 = hdmi_readb(hdmi, HDMI_CONFIG1_ID); + config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID); - if (config1 & HDMI_CONFIG1_AHB) { + if (config3 & HDMI_CONFIG3_AHBAUDDMA) { struct dw_hdmi_audio_data audio; audio.phys = iores->start; @@ -2046,9 +2111,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master, if (hdmi->i2c) dw_hdmi_i2c_init(hdmi); - dev_set_drvdata(dev, hdmi); + platform_set_drvdata(pdev, hdmi); - return 0; + return hdmi; err_iahb: if (hdmi->i2c) { @@ -2062,14 +2127,11 @@ err_isfr: err_res: i2c_put_adapter(hdmi->ddc); - return ret; + return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(dw_hdmi_bind); -void dw_hdmi_unbind(struct device *dev, struct device *master, void *data) +static void __dw_hdmi_remove(struct dw_hdmi *hdmi) { - struct dw_hdmi *hdmi = dev_get_drvdata(dev); - if (hdmi->audio && !IS_ERR(hdmi->audio)) platform_device_unregister(hdmi->audio); @@ -2084,6 +2146,70 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data) else i2c_put_adapter(hdmi->ddc); } + +/* ----------------------------------------------------------------------------- + * Probe/remove API, used from platforms based on the DRM bridge API. + */ +int dw_hdmi_probe(struct platform_device *pdev, + const struct dw_hdmi_plat_data *plat_data) +{ + struct dw_hdmi *hdmi; + int ret; + + hdmi = __dw_hdmi_probe(pdev, plat_data); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + ret = drm_bridge_add(&hdmi->bridge); + if (ret < 0) { + __dw_hdmi_remove(hdmi); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dw_hdmi_probe); + +void dw_hdmi_remove(struct platform_device *pdev) +{ + struct dw_hdmi *hdmi = platform_get_drvdata(pdev); + + drm_bridge_remove(&hdmi->bridge); + + __dw_hdmi_remove(hdmi); +} +EXPORT_SYMBOL_GPL(dw_hdmi_remove); + +/* ----------------------------------------------------------------------------- + * Bind/unbind API, used from platforms based on the component framework. + */ +int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, + const struct dw_hdmi_plat_data *plat_data) +{ + struct dw_hdmi *hdmi; + int ret; + + hdmi = __dw_hdmi_probe(pdev, plat_data); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); + + ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL); + if (ret) { + dw_hdmi_remove(pdev); + DRM_ERROR("Failed to initialize bridge with drm\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(dw_hdmi_bind); + +void dw_hdmi_unbind(struct device *dev) +{ + struct dw_hdmi *hdmi = dev_get_drvdata(dev); + + __dw_hdmi_remove(hdmi); +} EXPORT_SYMBOL_GPL(dw_hdmi_unbind); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); diff --git a/drivers/gpu/drm/bridge/dw-hdmi.h b/drivers/gpu/drm/bridge/dw-hdmi.h index 55135bbd0c16..325b0b8ae639 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/dw-hdmi.h @@ -545,12 +545,24 @@ #define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12 enum { +/* PRODUCT_ID0 field values */ + HDMI_PRODUCT_ID0_HDMI_TX = 0xa0, + +/* PRODUCT_ID1 field values */ + HDMI_PRODUCT_ID1_HDCP = 0xc0, + HDMI_PRODUCT_ID1_HDMI_RX = 0x02, + HDMI_PRODUCT_ID1_HDMI_TX = 0x01, + /* CONFIG0_ID field values */ HDMI_CONFIG0_I2S = 0x10, /* CONFIG1_ID field values */ HDMI_CONFIG1_AHB = 0x01, +/* CONFIG3_ID field values */ + HDMI_CONFIG3_AHBAUDDMA = 0x02, + HDMI_CONFIG3_GPAUD = 0x01, + /* IH_FC_INT2 field values */ HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03, HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02, @@ -847,8 +859,8 @@ enum { HDMI_PHY_CONF0_PDZ_OFFSET = 7, HDMI_PHY_CONF0_ENTMDS_MASK = 0x40, HDMI_PHY_CONF0_ENTMDS_OFFSET = 6, - HDMI_PHY_CONF0_SPARECTRL_MASK = 0x20, - HDMI_PHY_CONF0_SPARECTRL_OFFSET = 5, + HDMI_PHY_CONF0_SVSRET_MASK = 0x20, + HDMI_PHY_CONF0_SVSRET_OFFSET = 5, HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10, HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4, HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8, @@ -977,8 +989,7 @@ enum { HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0, /* MC_PHYRSTZ field values */ - HDMI_MC_PHYRSTZ_ASSERT = 0x0, - HDMI_MC_PHYRSTZ_DEASSERT = 0x1, + HDMI_MC_PHYRSTZ_PHYRSTZ = 0x01, /* MC_HEACPHY_RST field values */ HDMI_MC_HEACPHY_RST_ASSERT = 0x1, @@ -1073,4 +1084,70 @@ enum { HDMI_I2CM_CTLINT_ARB_MASK = 0x4, }; +/* + * HDMI 3D TX PHY registers + */ +#define HDMI_3D_TX_PHY_PWRCTRL 0x00 +#define HDMI_3D_TX_PHY_SERDIVCTRL 0x01 +#define HDMI_3D_TX_PHY_SERCKCTRL 0x02 +#define HDMI_3D_TX_PHY_SERCKKILLCTRL 0x03 +#define HDMI_3D_TX_PHY_TXRESCTRL 0x04 +#define HDMI_3D_TX_PHY_CKCALCTRL 0x05 +#define HDMI_3D_TX_PHY_CPCE_CTRL 0x06 +#define HDMI_3D_TX_PHY_TXCLKMEASCTRL 0x07 +#define HDMI_3D_TX_PHY_TXMEASCTRL 0x08 +#define HDMI_3D_TX_PHY_CKSYMTXCTRL 0x09 +#define HDMI_3D_TX_PHY_CMPSEQCTRL 0x0a +#define HDMI_3D_TX_PHY_CMPPWRCTRL 0x0b +#define HDMI_3D_TX_PHY_CMPMODECTRL 0x0c +#define HDMI_3D_TX_PHY_MEASCTRL 0x0d +#define HDMI_3D_TX_PHY_VLEVCTRL 0x0e +#define HDMI_3D_TX_PHY_D2ACTRL 0x0f +#define HDMI_3D_TX_PHY_CURRCTRL 0x10 +#define HDMI_3D_TX_PHY_DRVANACTRL 0x11 +#define HDMI_3D_TX_PHY_PLLMEASCTRL 0x12 +#define HDMI_3D_TX_PHY_PLLPHBYCTRL 0x13 +#define HDMI_3D_TX_PHY_GRP_CTRL 0x14 +#define HDMI_3D_TX_PHY_GMPCTRL 0x15 +#define HDMI_3D_TX_PHY_MPLLMEASCTRL 0x16 +#define HDMI_3D_TX_PHY_MSM_CTRL 0x17 +#define HDMI_3D_TX_PHY_SCRPB_STATUS 0x18 +#define HDMI_3D_TX_PHY_TXTERM 0x19 +#define HDMI_3D_TX_PHY_PTRPT_ENBL 0x1a +#define HDMI_3D_TX_PHY_PATTERNGEN 0x1b +#define HDMI_3D_TX_PHY_SDCAP_MODE 0x1c +#define HDMI_3D_TX_PHY_SCOPEMODE 0x1d +#define HDMI_3D_TX_PHY_DIGTXMODE 0x1e +#define HDMI_3D_TX_PHY_STR_STATUS 0x1f +#define HDMI_3D_TX_PHY_SCOPECNT0 0x20 +#define HDMI_3D_TX_PHY_SCOPECNT1 0x21 +#define HDMI_3D_TX_PHY_SCOPECNT2 0x22 +#define HDMI_3D_TX_PHY_SCOPECNTCLK 0x23 +#define HDMI_3D_TX_PHY_SCOPESAMPLE 0x24 +#define HDMI_3D_TX_PHY_SCOPECNTMSB01 0x25 +#define HDMI_3D_TX_PHY_SCOPECNTMSB2CK 0x26 + +/* HDMI_3D_TX_PHY_CKCALCTRL values */ +#define HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE BIT(15) + +/* HDMI_3D_TX_PHY_MSM_CTRL values */ +#define HDMI_3D_TX_PHY_MSM_CTRL_MPLL_PH_SEL_CK BIT(13) +#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_CLK_REF_MPLL (0 << 1) +#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_OFF (1 << 1) +#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_PCLK (2 << 1) +#define HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_FB_CLK (3 << 1) +#define HDMI_3D_TX_PHY_MSM_CTRL_SCOPE_CK_SEL BIT(0) + +/* HDMI_3D_TX_PHY_PTRPT_ENBL values */ +#define HDMI_3D_TX_PHY_PTRPT_ENBL_OVERRIDE BIT(15) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_PG_SKIP_BIT2 BIT(8) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_PG_SKIP_BIT1 BIT(7) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_PG_SKIP_BIT0 BIT(6) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_CK_REF_ENB BIT(5) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_RCAL_ENB BIT(4) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_TX_CLK_ALIGN_ENB BIT(3) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_TX_READY BIT(2) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_CKO_WORD_ENB BIT(1) +#define HDMI_3D_TX_PHY_PTRPT_ENBL_REFCLK_ENB BIT(0) + #endif /* __DW_HDMI_H__ */ diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index ca3809851377..fc78c90ee931 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig @@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU This is a KMS driver for emulated cirrus device in qemu. It is *NOT* intended for real cirrus devices. This requires the modesetting userspace X.org driver. + + Cirrus is obsolete, the hardware was designed in the 90ies + and can't keep up with todays needs. More background: + https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ + + Better alternatives are: + - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) + - qxl (DRM_QXL, qemu -vga qxl, works best with spice) + - virtio (DRM_VIRTIO_GPU), qemu -vga virtio) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index d6da848f7c6f..f53aa8f4a143 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -236,8 +236,6 @@ struct ttm_bo_driver cirrus_bo_driver = { .verify_access = cirrus_bo_verify_access, .io_mem_reserve = &cirrus_ttm_io_mem_reserve, .io_mem_free = &cirrus_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int cirrus_mm_init(struct cirrus_device *cirrus) diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index d621c8a4cf00..c89953449e96 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -421,6 +421,8 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) head->base = head->agp_info.aper_base; return head; } +/* Only exported for i810.ko */ +EXPORT_SYMBOL(drm_agp_init); /** * drm_legacy_agp_clear - Clear AGP resource list diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 6414bcf7f41b..e5b738660d66 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -195,8 +195,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_clear); * all locks. So someone else could sneak in and change the current modeset * configuration. Which means that all the state assembled in @state is no * longer an atomic update to the current state, but to some arbitrary earlier - * state. Which could break assumptions the driver's ->atomic_check likely - * relies on. + * state. Which could break assumptions the driver's + * &drm_mode_config_funcs.atomic_check likely relies on. * * Hence we must clear all cached state and completely start over, using this * function. @@ -456,11 +456,10 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc, * @property: the property to set * @val: the new property value * - * Use this instead of calling crtc->atomic_set_property directly. - * This function handles generic/core properties and calls out to - * driver's ->atomic_set_property() for driver properties. To ensure - * consistent behavior you must call this function rather than the - * driver hook directly. + * This function handles generic/core properties and calls out to driver's + * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure + * consistent behavior you must call this function rather than the driver hook + * directly. * * RETURNS: * Zero on success, error code on failure @@ -532,10 +531,10 @@ EXPORT_SYMBOL(drm_atomic_crtc_set_property); * @property: the property to set * @val: return location for the property value * - * This function handles generic/core properties and calls out to - * driver's ->atomic_get_property() for driver properties. To ensure - * consistent behavior you must call this function rather than the - * driver hook directly. + * This function handles generic/core properties and calls out to driver's + * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure + * consistent behavior you must call this function rather than the driver hook + * directly. * * RETURNS: * Zero on success, error code on failure @@ -716,11 +715,10 @@ EXPORT_SYMBOL(drm_atomic_get_plane_state); * @property: the property to set * @val: the new property value * - * Use this instead of calling plane->atomic_set_property directly. - * This function handles generic/core properties and calls out to - * driver's ->atomic_set_property() for driver properties. To ensure - * consistent behavior you must call this function rather than the - * driver hook directly. + * This function handles generic/core properties and calls out to driver's + * &drm_plane_funcs.atomic_set_property for driver properties. To ensure + * consistent behavior you must call this function rather than the driver hook + * directly. * * RETURNS: * Zero on success, error code on failure @@ -791,10 +789,10 @@ EXPORT_SYMBOL(drm_atomic_plane_set_property); * @property: the property to set * @val: return location for the property value * - * This function handles generic/core properties and calls out to - * driver's ->atomic_get_property() for driver properties. To ensure - * consistent behavior you must call this function rather than the - * driver hook directly. + * This function handles generic/core properties and calls out to driver's + * &drm_plane_funcs.atomic_get_property for driver properties. To ensure + * consistent behavior you must call this function rather than the driver hook + * directly. * * RETURNS: * Zero on success, error code on failure @@ -1057,11 +1055,10 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state); * @property: the property to set * @val: the new property value * - * Use this instead of calling connector->atomic_set_property directly. - * This function handles generic/core properties and calls out to - * driver's ->atomic_set_property() for driver properties. To ensure - * consistent behavior you must call this function rather than the - * driver hook directly. + * This function handles generic/core properties and calls out to driver's + * &drm_connector_funcs.atomic_set_property for driver properties. To ensure + * consistent behavior you must call this function rather than the driver hook + * directly. * * RETURNS: * Zero on success, error code on failure @@ -1136,10 +1133,10 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, * @property: the property to set * @val: return location for the property value * - * This function handles generic/core properties and calls out to - * driver's ->atomic_get_property() for driver properties. To ensure - * consistent behavior you must call this function rather than the - * driver hook directly. + * This function handles generic/core properties and calls out to driver's + * &drm_connector_funcs.atomic_get_property for driver properties. To ensure + * consistent behavior you must call this function rather than the driver hook + * directly. * * RETURNS: * Zero on success, error code on failure @@ -1312,12 +1309,11 @@ EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); * implicit or explicit fencing. * * This function will not set the fence to the state if it was set - * via explicit fencing interfaces on the atomic ioctl. It will - * all drope the reference to the fence as we not storing it - * anywhere. - * - * Otherwise, if plane_state->fence is not set this function we - * just set it with the received implict fence. + * via explicit fencing interfaces on the atomic ioctl. In that case it will + * drop the reference to the fence as we are not storing it anywhere. + * Otherwise, if &drm_plane_state.fence is not set this function we just set it + * with the received implicit fence. In both cases this function consumes a + * reference for @fence. */ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, @@ -1616,7 +1612,7 @@ int drm_atomic_commit(struct drm_atomic_state *state) EXPORT_SYMBOL(drm_atomic_commit); /** - * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit + * drm_atomic_nonblocking_commit - atomic nonblocking commit * @state: atomic configuration to check * * Note that this function can return -EDEADLK if the driver needed to acquire @@ -1731,13 +1727,6 @@ int drm_atomic_debugfs_init(struct drm_minor *minor) ARRAY_SIZE(drm_atomic_debugfs_list), minor->debugfs_root, minor); } - -int drm_atomic_debugfs_cleanup(struct drm_minor *minor) -{ - return drm_debugfs_remove_files(drm_atomic_debugfs_list, - ARRAY_SIZE(drm_atomic_debugfs_list), - minor); -} #endif /* @@ -1829,10 +1818,10 @@ static int atomic_set_prop(struct drm_atomic_state *state, * @plane_mask: plane mask for planes that were updated. * @ret: return value, can be -EDEADLK for a retry. * - * Before doing an update plane->old_fb is set to plane->fb, - * but before dropping the locks old_fb needs to be set to NULL - * and plane->fb updated. This is a common operation for each - * atomic update, so this call is split off as a helper. + * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before + * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This + * is a common operation for each atomic update, so this call is split off as a + * helper. */ void drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, @@ -1873,7 +1862,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb); * As a contrast, with implicit fencing the kernel keeps track of any * ongoing rendering, and automatically ensures that the atomic update waits * for any pending rendering to complete. For shared buffers represented with - * a &struct dma_buf this is tracked in &reservation_object structures. + * a &struct dma_buf this is tracked in &struct reservation_object. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), * whereas explicit fencing is what Android wants. * diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9460f3a4c1c2..9a08445a7a7a 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -458,22 +458,25 @@ mode_fixup(struct drm_atomic_state *state) * Check the state object to see if the requested state is physically possible. * This does all the crtc and connector related computations for an atomic * update and adds any additional connectors needed for full modesets and calls - * down into ->mode_fixup functions of the driver backend. - * - * crtc_state->mode_changed is set when the input mode is changed. - * crtc_state->connectors_changed is set when a connector is added or - * removed from the crtc. - * crtc_state->active_changed is set when crtc_state->active changes, - * which is used for dpms. + * down into &drm_crtc_helper_funcs.mode_fixup and + * &drm_encoder_helper_funcs.mode_fixup or + * &drm_encoder_helper_funcs.atomic_check functions of the driver backend. + * + * &drm_crtc_state.mode_changed is set when the input mode is changed. + * &drm_crtc_state.connectors_changed is set when a connector is added or + * removed from the crtc. &drm_crtc_state.active_changed is set when + * &drm_crtc_state.active changes, which is used for DPMS. * See also: drm_atomic_crtc_needs_modeset() * * IMPORTANT: * - * Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a - * plane update can't be done without a full modeset) _must_ call this function - * afterwards after that change. It is permitted to call this function multiple - * times for the same update, e.g. when the ->atomic_check functions depend upon - * the adjusted dotclock for fifo space allocation and watermark computation. + * Drivers which set &drm_crtc_state.mode_changed (e.g. in their + * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done + * without a full modeset) _must_ call this function afterwards after that + * change. It is permitted to call this function multiple times for the same + * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend + * upon the adjusted dotclock for fifo space allocation and watermark + * computation. * * RETURNS: * Zero for success or -errno @@ -584,9 +587,10 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset); * * Check the state object to see if the requested state is physically possible. * This does all the plane update related checks using by calling into the - * ->atomic_check hooks provided by the driver. + * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check + * hooks provided by the driver. * - * It also sets crtc_state->planes_changed to indicate that a crtc has + * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has * updated planes. * * RETURNS: @@ -648,14 +652,15 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes); * Check the state object to see if the requested state is physically possible. * Only crtcs and planes have check callbacks, so for any additional (global) * checking that a driver needs it can simply wrap that around this function. - * Drivers without such needs can directly use this as their ->atomic_check() - * callback. + * Drivers without such needs can directly use this as their + * &drm_mode_config_funcs.atomic_check callback. * * This just wraps the two parts of the state checking for planes and modeset * state in the default order: First it calls drm_atomic_helper_check_modeset() * and then drm_atomic_helper_check_planes(). The assumption is that the - * ->atomic_check functions depend upon an updated adjusted_mode.clock to - * e.g. properly compute watermarks. + * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check + * functions depend upon an updated adjusted_mode.clock to e.g. properly compute + * watermarks. * * RETURNS: * Zero for success or -errno @@ -1125,8 +1130,8 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * drm_atomic_helper_commit_tail - commit atomic update to hardware * @old_state: atomic state object with old state structures * - * This is the default implemenation for the ->atomic_commit_tail() hook of the - * &drm_mode_config_helper_funcs vtable. + * This is the default implementation for the + * &drm_mode_config_helper_funcs.atomic_commit_tail hook. * * Note that the default ordering of how the various stages are called is to * match the legacy modeset helper library closest. One peculiarity of that is @@ -1203,8 +1208,8 @@ static void commit_work(struct work_struct *work) * drm_atomic_helper_setup_commit() and related functions. * * Committing the actual hardware state is done through the - * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable, - * or it's default implementation drm_atomic_helper_commit_tail(). + * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default + * implementation drm_atomic_helper_commit_tail(). * * RETURNS: * Zero for success or -errno. @@ -1227,8 +1232,10 @@ int drm_atomic_helper_commit(struct drm_device *dev, if (!nonblock) { ret = drm_atomic_helper_wait_for_fences(dev, state, true); - if (ret) + if (ret) { + drm_atomic_helper_cleanup_planes(dev, state); return ret; + } } /* @@ -1355,7 +1362,7 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock) return ret < 0 ? ret : 0; } -void release_crtc_commit(struct completion *completion) +static void release_crtc_commit(struct completion *completion) { struct drm_crtc_commit *commit = container_of(completion, typeof(*commit), @@ -1371,14 +1378,15 @@ void release_crtc_commit(struct completion *completion) * * This function prepares @state to be used by the atomic helper's support for * nonblocking commits. Drivers using the nonblocking commit infrastructure - * should always call this function from their ->atomic_commit hook. + * should always call this function from their + * &drm_mode_config_funcs.atomic_commit hook. * * To be able to use this support drivers need to use a few more helper * functions. drm_atomic_helper_wait_for_dependencies() must be called before * actually committing the hardware state, and for nonblocking commits this call * must be placed in the async worker. See also drm_atomic_helper_swap_state() * and it's stall parameter, for when a driver's commit hooks look at the - * ->state pointers of &struct drm_crtc, &drm_plane or &drm_connector directly. + * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly. * * Completion of the hardware commit step must be signalled using * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed @@ -1487,8 +1495,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) * This function waits for all preceeding commits that touch the same CRTC as * @old_state to both be committed to the hardware (as signalled by * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled - * by calling drm_crtc_vblank_send_event on the event member of - * &drm_crtc_state). + * by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event). * * This is part of the atomic helper support for nonblocking commits, see * drm_atomic_helper_setup_commit() for an overview. @@ -1625,8 +1632,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); * @state: atomic state object with new state structures * * This function prepares plane state, specifically framebuffers, for the new - * configuration. If any failure is encountered this function will call - * ->cleanup_fb on any already successfully prepared framebuffer. + * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure + * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on + * any already successfully prepared framebuffer. * * Returns: * 0 on success, negative error code on failure. @@ -1706,10 +1714,10 @@ static bool plane_crtc_active(const struct drm_plane_state *state) * * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant * display controllers require to disable a CRTC's planes when the CRTC is - * disabled. This function would skip the ->atomic_disable call for a plane if - * the CRTC of the old plane state needs a modesetting operation. Of course, - * the drivers need to disable the planes in their CRTC disable callbacks - * since no one else would do that. + * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable + * call for a plane if the CRTC of the old plane state needs a modesetting + * operation. Of course, the drivers need to disable the planes in their CRTC + * disable callbacks since no one else would do that. * * The drm_atomic_helper_commit() default implementation doesn't set the * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers. @@ -1872,7 +1880,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); * planes. * * It is a bug to call this function without having implemented the - * ->atomic_disable() plane hook. + * &drm_plane_helper_funcs.atomic_disable plane hook. */ void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, @@ -1959,8 +1967,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); * contains the old state. Also do any other cleanup required with that state. * * @stall must be set when nonblocking commits for this driver directly access - * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the - * current atomic helpers this is almost always the case, since the helpers + * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With + * the current atomic helpers this is almost always the case, since the helpers * don't pass the right state structures to the callbacks. */ void drm_atomic_helper_swap_state(struct drm_atomic_state *state, @@ -2361,7 +2369,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, if (ret != 0) return ret; - drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); + drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay); drm_atomic_set_fb_for_plane(primary_state, set->fb); primary_state->crtc_x = 0; @@ -2706,6 +2714,44 @@ backoff: } EXPORT_SYMBOL(drm_atomic_helper_connector_set_property); +static int page_flip_common( + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_plane *plane = crtc->primary; + struct drm_plane_state *plane_state; + struct drm_crtc_state *crtc_state; + int ret = 0; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->event = event; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + + ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); + if (ret != 0) + return ret; + drm_atomic_set_fb_for_plane(plane_state, fb); + + /* Make sure we don't accidentally do a full modeset. */ + state->allow_modeset = false; + if (!crtc_state->active) { + DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n", + crtc->base.id); + return -EINVAL; + } + + return ret; +} + /** * drm_atomic_helper_page_flip - execute a legacy page flip * @crtc: DRM crtc @@ -2713,7 +2759,8 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_set_property); * @event: optional DRM event to signal upon completion * @flags: flip flags for non-vblank sync'ed updates * - * Provides a default page flip implementation using the atomic driver interface. + * Provides a default &drm_crtc_funcs.page_flip implementation + * using the atomic driver interface. * * Note that for now so called async page flips (i.e. updates which are not * synchronized to vblank) are not supported, since the atomic interfaces have @@ -2721,6 +2768,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_set_property); * * Returns: * Returns 0 on success, negative errno numbers on failure. + * + * See also: + * drm_atomic_helper_page_flip_target() */ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -2729,8 +2779,6 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, { struct drm_plane *plane = crtc->primary; struct drm_atomic_state *state; - struct drm_plane_state *plane_state; - struct drm_crtc_state *crtc_state; int ret = 0; if (flags & DRM_MODE_PAGE_FLIP_ASYNC) @@ -2741,35 +2789,86 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, return -ENOMEM; state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); + retry: - crtc_state = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) { - ret = PTR_ERR(crtc_state); + ret = page_flip_common(state, crtc, fb, event); + if (ret != 0) goto fail; - } - crtc_state->event = event; - plane_state = drm_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) { - ret = PTR_ERR(plane_state); - goto fail; - } + ret = drm_atomic_nonblocking_commit(state); - ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); +fail: + if (ret == -EDEADLK) + goto backoff; + + drm_atomic_state_put(state); + return ret; + +backoff: + drm_atomic_state_clear(state); + drm_atomic_legacy_backoff(state); + + /* + * Someone might have exchanged the framebuffer while we dropped locks + * in the backoff code. We need to fix up the fb refcount tracking the + * core does for us. + */ + plane->old_fb = plane->fb; + + goto retry; +} +EXPORT_SYMBOL(drm_atomic_helper_page_flip); + +/** + * drm_atomic_helper_page_flip_target - do page flip on target vblank period. + * @crtc: DRM crtc + * @fb: DRM framebuffer + * @event: optional DRM event to signal upon completion + * @flags: flip flags for non-vblank sync'ed updates + * @target: specifying the target vblank period when the flip to take effect + * + * Provides a default &drm_crtc_funcs.page_flip_target implementation. + * Similar to drm_atomic_helper_page_flip() with extra parameter to specify + * target vblank period to flip. + * + * Returns: + * Returns 0 on success, negative errno numbers on failure. + */ +int drm_atomic_helper_page_flip_target( + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags, + uint32_t target) +{ + struct drm_plane *plane = crtc->primary; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + int ret = 0; + + if (flags & DRM_MODE_PAGE_FLIP_ASYNC) + return -EINVAL; + + state = drm_atomic_state_alloc(plane->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); + +retry: + ret = page_flip_common(state, crtc, fb, event); if (ret != 0) goto fail; - drm_atomic_set_fb_for_plane(plane_state, fb); - /* Make sure we don't accidentally do a full modeset. */ - state->allow_modeset = false; - if (!crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d] disabled, rejecting legacy flip\n", - crtc->base.id); + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + if (WARN_ON(!crtc_state)) { ret = -EINVAL; goto fail; } + crtc_state->target_vblank = target; ret = drm_atomic_nonblocking_commit(state); + fail: if (ret == -EDEADLK) goto backoff; @@ -2790,7 +2889,7 @@ backoff: goto retry; } -EXPORT_SYMBOL(drm_atomic_helper_page_flip); +EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); /** * drm_atomic_helper_connector_dpms() - connector dpms helper implementation @@ -2799,8 +2898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip); * * This is the main helper function provided by the atomic helper framework for * implementing the legacy DPMS connector interface. It computes the new desired - * ->active state for the corresponding CRTC (if the connector is enabled) and - * updates it. + * &drm_crtc_state.active state for the corresponding CRTC (if the connector is + * enabled) and updates it. * * Returns: * Returns 0 on success, negative errno numbers on failure. @@ -2872,11 +2971,11 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_connector_dpms); /** - * drm_atomic_helper_best_encoder - Helper for &drm_connector_helper_funcs - * ->best_encoder callback + * drm_atomic_helper_best_encoder - Helper for + * &drm_connector_helper_funcs.best_encoder callback * @connector: Connector control structure * - * This is a &drm_connector_helper_funcs ->best_encoder callback helper for + * This is a &drm_connector_helper_funcs.best_encoder callback helper for * connectors that support exactly 1 encoder, statically determined at driver * init time. */ @@ -2910,7 +3009,7 @@ EXPORT_SYMBOL(drm_atomic_helper_best_encoder); */ /** - * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs + * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs * @crtc: drm CRTC * * Resets the atomic state for @crtc by freeing the state pointer (which might @@ -3017,7 +3116,7 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); /** - * drm_atomic_helper_plane_reset - default ->reset hook for planes + * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes * @plane: drm plane * * Resets the atomic state for @plane by freeing the state pointer (which might @@ -3121,8 +3220,9 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); * @conn_state: connector state to assign * * Initializes the newly allocated @conn_state and assigns it to - * #connector ->state, usually required when initializing the drivers - * or when called from the ->reset hook. + * the &drm_conector->state pointer of @connector, usually required when + * initializing the drivers or when called from the &drm_connector_funcs.reset + * hook. * * This is useful for drivers that subclass the connector state. */ @@ -3138,7 +3238,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector, EXPORT_SYMBOL(__drm_atomic_helper_connector_reset); /** - * drm_atomic_helper_connector_reset - default ->reset hook for connectors + * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors * @connector: drm connector * * Resets the atomic state for @connector by freeing the state pointer (which diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 860cfe124c2a..7ff697389d74 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -40,8 +40,8 @@ * least once successfully became the device master (either through the * SET_MASTER IOCTL, or implicitly through opening the primary device node when * no one else is the current master that time) there exists one &drm_master. - * This is noted in the is_master member of &drm_file. All other clients have - * just a pointer to the &drm_master they are associated with. + * This is noted in &drm_file.is_master. All other clients have just a pointer + * to the &drm_master they are associated with. * * In addition only one &drm_master can be the current master for a &drm_device. * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 1f2412c7ccfd..665aafc6ad68 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -40,9 +40,8 @@ * sub-pixel accuracy, which is scaled up to a pixel-aligned destination * rectangle in the visible area of a &drm_crtc. The visible area of a CRTC is * defined by the horizontal and vertical visible pixels (stored in @hdisplay - * and @vdisplay) of the requested mode (stored in @mode in the - * &drm_crtc_state). These two rectangles are both stored in the - * &drm_plane_state. + * and @vdisplay) of the requested mode (stored in &drm_crtc_state.mode). These + * two rectangles are both stored in the &drm_plane_state. * * For the atomic ioctl the following standard (atomic) properties on the plane object * encode the basic plane composition model: @@ -215,7 +214,7 @@ EXPORT_SYMBOL(drm_rotation_simplify); * for it in drm core. Drivers can then attach this property to planes to enable * support for configurable planes arrangement during blending operation. * Once mutable zpos property has been enabled, the DRM core will automatically - * calculate drm_plane_state->normalized_zpos values. Usually min should be set + * calculate &drm_plane_state.normalized_zpos values. Usually min should be set * to 0 and max to maximal number of planes for given crtc - 1. * * If zpos of some planes cannot be changed (like fixed background or @@ -367,8 +366,8 @@ done: * For every CRTC this function checks new states of all planes assigned to * it and calculates normalized zpos value for these planes. Planes are compared * first by their zpos values, then by plane id (if zpos is equal). The plane - * with lowest zpos value is at the bottom. The plane_state->normalized_zpos is - * then filled with unique values from 0 to number of active planes in crtc + * with lowest zpos value is at the bottom. The &drm_plane_state.normalized_zpos + * is then filled with unique values from 0 to number of active planes in crtc * minus one. * * RETURNS diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index a7916e5f8864..c3b9aaccdf42 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -29,7 +29,9 @@ */ #include <linux/export.h> -#include <drm/drmP.h> +#include <linux/highmem.h> + +#include <drm/drm_cache.h> #if defined(CONFIG_X86) #include <asm/smp.h> @@ -67,6 +69,14 @@ static void drm_cache_flush_clflush(struct page *pages[], } #endif +/** + * drm_clflush_pages - Flush dcache lines of a set of pages. + * @pages: List of pages to be flushed. + * @num_pages: Number of pages in the array. + * + * Flush every data cache line entry that points to an address belonging + * to a page in the array. + */ void drm_clflush_pages(struct page *pages[], unsigned long num_pages) { @@ -101,6 +111,13 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) } EXPORT_SYMBOL(drm_clflush_pages); +/** + * drm_clflush_sg - Flush dcache lines pointing to a scather-gather. + * @st: struct sg_table. + * + * Flush every data cache line entry that points to an address in the + * sg. + */ void drm_clflush_sg(struct sg_table *st) { @@ -125,6 +142,14 @@ drm_clflush_sg(struct sg_table *st) } EXPORT_SYMBOL(drm_clflush_sg); +/** + * drm_clflush_virt_range - Flush dcache lines of a region + * @addr: Initial kernel memory address. + * @length: Region size. + * + * Flush every data cache line entry that points to an address in the + * region requested. + */ void drm_clflush_virt_range(void *addr, unsigned long length) { diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 799edd0d308e..e4d2c8a49076 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -38,18 +38,17 @@ * Hence they are reference-counted using drm_connector_reference() and * drm_connector_unreference(). * - * KMS driver must create, initialize, register and attach at a struct - * &drm_connector for each such sink. The instance is created as other KMS - * objects and initialized by setting the following fields. - * - * The connector is then registered with a call to drm_connector_init() with a - * pointer to the connector functions and a connector type, and exposed through - * sysfs with a call to drm_connector_register(). + * KMS driver must create, initialize, register and attach at a &struct + * drm_connector for each such sink. The instance is created as other KMS + * objects and initialized by setting the following fields. The connector is + * initialized with a call to drm_connector_init() with a pointer to the + * &struct drm_connector_funcs and a connector type, and then exposed to + * userspace with a call to drm_connector_register(). * * Connectors must be attached to an encoder to be used. For devices that map * connectors to encoders 1:1, the connector should be attached at * initialization time with a call to drm_mode_connector_attach_encoder(). The - * driver must also set the &struct drm_connector encoder field to point to the + * driver must also set the &drm_connector.encoder field to point to the * attached encoder. * * For connectors which are not fixed (like built-in panels) the driver needs to @@ -497,7 +496,7 @@ static struct lockdep_map connector_list_iter_dep_map = { * @dev: DRM device * @iter: connector_list iterator * - * Sets @iter up to walk the connector list in &drm_mode_config of @dev. @iter + * Sets @iter up to walk the &drm_mode_config.connector_list of @dev. @iter * must always be cleaned up again by calling drm_connector_list_iter_put(). * Iteration itself happens using drm_connector_list_iter_next() or * drm_for_each_connector_iter(). @@ -696,8 +695,8 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, * drivers this is only provided for backwards compatibility with existing * drivers, it remaps to controlling the "ACTIVE" property on the CRTC the * connector is linked to. Drivers should never set this property directly, - * it is handled by the DRM core by calling the ->dpms() callback in - * &drm_connector_funcs. Atomic drivers should implement this hook using + * it is handled by the DRM core by calling the &drm_connector_funcs.dpms + * callback. Atomic drivers should implement this hook using * drm_atomic_helper_connector_dpms(). This is the only property standard * connector property that userspace can change. * PATH: diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bd3c8b243447..6915f897bd8e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -47,6 +47,27 @@ #include "drm_internal.h" /** + * DOC: overview + * + * A CRTC represents the overall display pipeline. It receives pixel data from + * &drm_plane and blends them together. The &drm_display_mode is also attached + * to the CRTC, specifying display timings. On the output side the data is fed + * to one or more &drm_encoder, which are then each connected to one + * &drm_connector. + * + * To create a CRTC, a KMS drivers allocates and zeroes an instances of + * &struct drm_crtc (possibly as part of a larger structure) and registers it + * with a call to drm_crtc_init_with_planes(). + * + * The CRTC is also the entry point for legacy modeset operations, see + * &drm_crtc_funcs.set_config, legacy plane operations, see + * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy + * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these + * features are controlled through &drm_property and + * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check. + */ + +/** * drm_crtc_from_index - find the registered CRTC at an index * @dev: DRM device * @idx: index of registered CRTC to find for @@ -415,11 +436,12 @@ int drm_mode_getcrtc(struct drm_device *dev, } /** - * drm_mode_set_config_internal - helper to call ->set_config + * drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config * @set: modeset config to set * - * This is a little helper to wrap internal calls to the ->set_config driver - * interface. The only thing it adds is correct refcounting dance. + * This is a little helper to wrap internal calls to the + * &drm_mode_config_funcs.set_config driver interface. The only thing it adds is + * correct refcounting dance. * * Returns: * Zero on success, negative errno on failure. @@ -460,27 +482,6 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) EXPORT_SYMBOL(drm_mode_set_config_internal); /** - * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode - * @mode: mode to query - * @hdisplay: hdisplay value to fill in - * @vdisplay: vdisplay value to fill in - * - * The vdisplay value will be doubled if the specified mode is a stereo mode of - * the appropriate layout. - */ -void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, - int *hdisplay, int *vdisplay) -{ - struct drm_display_mode adjusted; - - drm_mode_copy(&adjusted, mode); - drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY); - *hdisplay = adjusted.crtc_hdisplay; - *vdisplay = adjusted.crtc_vdisplay; -} -EXPORT_SYMBOL(drm_crtc_get_hv_timing); - -/** * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the * CRTC viewport * @crtc: CRTC that framebuffer will be displayed on @@ -497,7 +498,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, { int hdisplay, vdisplay; - drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); + drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); if (crtc->state && drm_rotation_90_or_270(crtc->primary->state->rotation)) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 1e281dd42e4b..44ba0e990d6c 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -53,9 +53,9 @@ * configuration on resume with drm_helper_resume_force_mode(). * * Note that this helper library doesn't track the current power state of CRTCs - * and encoders. It can call callbacks like ->dpms() even though the hardware is - * already in the desired state. This deficiency has been fixed in the atomic - * helpers. + * and encoders. It can call callbacks like &drm_encoder_helper_funcs.dpms even + * though the hardware is already in the desired state. This deficiency has been + * fixed in the atomic helpers. * * The driver callbacks are mostly compatible with the atomic modeset helpers, * except for the handling of the primary plane: Atomic helpers require that the @@ -477,12 +477,12 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) * drm_crtc_helper_set_config - set a new config from userspace * @set: mode set configuration * - * The drm_crtc_helper_set_config() helper function implements the set_config - * callback of &struct drm_crtc_funcs for drivers using the legacy CRTC helpers. + * The drm_crtc_helper_set_config() helper function implements the of + * &drm_crtc_funcs.set_config callback for drivers using the legacy CRTC + * helpers. * * It first tries to locate the best encoder for each connector by calling the - * connector ->best_encoder() (&struct drm_connector_helper_funcs) helper - * operation. + * connector @drm_connector_helper_funcs.best_encoder helper operation. * * After locating the appropriate encoders, the helper function will call the * mode_fixup encoder and CRTC helper operations to adjust the requested mode, @@ -493,8 +493,7 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) * * If the adjusted mode is identical to the current mode but changes to the * frame buffer need to be applied, the drm_crtc_helper_set_config() function - * will call the CRTC ->mode_set_base() (&struct drm_crtc_helper_funcs) helper - * operation. + * will call the CRTC &drm_crtc_helper_funcs.mode_set_base helper operation. * * If the adjusted mode differs from the current mode, or if the * ->mode_set_base() helper operation is not provided, the helper function @@ -851,14 +850,15 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) * @connector: affected connector * @mode: DPMS mode * - * The drm_helper_connector_dpms() helper function implements the ->dpms() - * callback of &struct drm_connector_funcs for drivers using the legacy CRTC helpers. + * The drm_helper_connector_dpms() helper function implements the + * &drm_connector_funcs.dpms callback for drivers using the legacy CRTC + * helpers. * * This is the main helper function provided by the CRTC helper framework for * implementing the DPMS connector attribute. It computes the new desired DPMS - * state for all encoders and CRTCs in the output mesh and calls the ->dpms() - * callbacks provided by the driver in &struct drm_crtc_helper_funcs and struct - * &drm_encoder_helper_funcs appropriately. + * state for all encoders and CRTCs in the output mesh and calls the + * &drm_crtc_helper_funcs.dpms and &drm_encoder_helper_funcs.dpms callbacks + * provided by the driver. * * This function is deprecated. New drivers must implement atomic modeset * support, for which this function is unsuitable. Instead drivers should use diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 724c329186d5..1bdcfd566695 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -177,7 +177,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, #ifdef CONFIG_DEBUG_FS struct drm_minor; int drm_atomic_debugfs_init(struct drm_minor *minor); -int drm_atomic_debugfs_cleanup(struct drm_minor *minor); #endif int drm_atomic_get_property(struct drm_mode_object *obj, diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 37fd612d57a6..2290a74a6e46 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -81,7 +81,8 @@ static const struct file_operations drm_debugfs_fops = { * \return Zero on success, non-zero on failure * * Create a given set of debugfs files represented by an array of - * gdm_debugfs_lists in the given root directory. + * &drm_info_list in the given root directory. These files will be removed + * automatically on drm_debugfs_cleanup(). */ int drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, struct drm_minor *minor) @@ -218,6 +219,19 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count, } EXPORT_SYMBOL(drm_debugfs_remove_files); +static void drm_debugfs_remove_all_files(struct drm_minor *minor) +{ + struct drm_info_node *node, *tmp; + + mutex_lock(&minor->debugfs_lock); + list_for_each_entry_safe(node, tmp, &minor->debugfs_list, list) { + debugfs_remove(node->dent); + list_del(&node->list); + kfree(node); + } + mutex_unlock(&minor->debugfs_lock); +} + /** * Cleanup the debugfs filesystem resources. * @@ -229,7 +243,6 @@ EXPORT_SYMBOL(drm_debugfs_remove_files); int drm_debugfs_cleanup(struct drm_minor *minor) { struct drm_device *dev = minor->dev; - int ret; if (!minor->debugfs_root) return 0; @@ -237,17 +250,9 @@ int drm_debugfs_cleanup(struct drm_minor *minor) if (dev->driver->debugfs_cleanup) dev->driver->debugfs_cleanup(minor); - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { - ret = drm_atomic_debugfs_cleanup(minor); - if (ret) { - DRM_ERROR("DRM: Failed to remove atomic debugfs entries\n"); - return ret; - } - } - - drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor); + drm_debugfs_remove_all_files(minor); - debugfs_remove(minor->debugfs_root); + debugfs_remove_recursive(minor->debugfs_root); minor->debugfs_root = NULL; return 0; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 3e6fe82c6d64..68908c1d5ca1 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -725,7 +725,7 @@ MODULE_PARM_DESC(dp_aux_i2c_speed_khz, /* * Transfer a single I2C-over-AUX message and handle various error conditions, * retrying the transaction as appropriate. It is assumed that the - * aux->transfer function does not modify anything in the msg other than the + * &drm_dp_aux.transfer function does not modify anything in the msg other than the * reply field. * * Returns bytes transferred on success, or a negative error code on failure. diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index aa644487749c..122a1b04bebc 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1086,7 +1086,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, } static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, - struct device *dev, + struct drm_device *dev, struct drm_dp_link_addr_reply_port *port_msg) { struct drm_dp_mst_port *port; @@ -1104,7 +1104,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, port->port_num = port_msg->port_number; port->mgr = mstb->mgr; port->aux.name = "DPMST"; - port->aux.dev = dev; + port->aux.dev = dev->dev; created = true; } else { old_pdt = port->pdt; @@ -2949,7 +2949,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) * Return 0 for success, or negative error code on failure */ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, - struct device *dev, struct drm_dp_aux *aux, + struct drm_device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 1b11ab628da7..6cbd67f4fbc5 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -221,7 +221,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type) ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root); if (ret) { DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); - return ret; + goto err_debugfs; } ret = device_add(minor->kdev); @@ -309,7 +309,7 @@ void drm_minor_release(struct drm_minor *minor) * userspace the device instance can be published using drm_dev_register(). * * There is also deprecated support for initalizing device instances using - * bus-specific helpers and the ->load() callback. But due to + * bus-specific helpers and the &drm_driver.load callback. But due to * backwards-compatibility needs the device instance have to be published too * early, which requires unpretty global locking to make safe and is therefore * only support for existing drivers not yet converted to the new scheme. @@ -598,6 +598,8 @@ static void drm_dev_release(struct kref *ref) { struct drm_device *dev = container_of(ref, struct drm_device, ref); + drm_vblank_cleanup(dev); + if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_destroy(dev); @@ -718,9 +720,9 @@ static void remove_compat_control_link(struct drm_device *dev) * Never call this twice on any device! * * NOTE: To ensure backward compatibility with existing drivers method this - * function calls the ->load() method after registering the device nodes, - * creating race conditions. Usage of the ->load() methods is therefore - * deprecated, drivers must perform all initialization before calling + * function calls the &drm_driver.load method after registering the device + * nodes, creating race conditions. Usage of the &drm_driver.load methods is + * therefore deprecated, drivers must perform all initialization before calling * drm_dev_register(). * * RETURNS: @@ -805,8 +807,6 @@ void drm_dev_unregister(struct drm_device *dev) if (dev->agp) drm_pci_agp_destroy(dev); - drm_vblank_cleanup(dev); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_legacy_rmmap(dev, r_list->map); diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index e5c61cda4ae3..10307cc16d75 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -42,8 +42,8 @@ * create dumb buffers suitable for scanout, which can then be used to create * KMS frame buffers. * - * To support dumb objects drivers must implement the dumb_create, - * dumb_destroy and dumb_map_offset operations from &struct drm_driver. See + * To support dumb objects drivers must implement the &drm_driver.dumb_create, + * &drm_driver.dumb_destroy and &drm_driver.dumb_map_offset operations. See * there for further details. * * Note that dumb objects may not be used for gpu acceleration, as has been diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4ff04aa84dd0..baa6ccb3e18b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3768,6 +3768,25 @@ bool drm_rgb_quant_range_selectable(struct edid *edid) } EXPORT_SYMBOL(drm_rgb_quant_range_selectable); +/** + * drm_default_rgb_quant_range - default RGB quantization range + * @mode: display mode + * + * Determine the default RGB quantization range for the mode, + * as specified in CEA-861. + * + * Return: The default RGB quantization range for the mode + */ +enum hdmi_quantization_range +drm_default_rgb_quant_range(const struct drm_display_mode *mode) +{ + /* All CEA modes other than VIC 1 use limited quantization range. */ + return drm_match_cea_mode(mode) > 1 ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL; +} +EXPORT_SYMBOL(drm_default_rgb_quant_range); + static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, const u8 *hdmi) { @@ -4273,6 +4292,52 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); +/** + * drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe + * quantization range information + * @frame: HDMI AVI infoframe + * @mode: DRM display mode + * @rgb_quant_range: RGB quantization range (Q) + * @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS) + */ +void +drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, + const struct drm_display_mode *mode, + enum hdmi_quantization_range rgb_quant_range, + bool rgb_quant_range_selectable) +{ + /* + * CEA-861: + * "A Source shall not send a non-zero Q value that does not correspond + * to the default RGB Quantization Range for the transmitted Picture + * unless the Sink indicates support for the Q bit in a Video + * Capabilities Data Block." + * + * HDMI 2.0 recommends sending non-zero Q when it does match the + * default RGB quantization range for the mode, even when QS=0. + */ + if (rgb_quant_range_selectable || + rgb_quant_range == drm_default_rgb_quant_range(mode)) + frame->quantization_range = rgb_quant_range; + else + frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + + /* + * CEA-861-F: + * "When transmitting any RGB colorimetry, the Source should set the + * YQ-field to match the RGB Quantization Range being transmitted + * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB, + * set YQ=1) and the Sink shall ignore the YQ-field." + */ + if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) + frame->ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + else + frame->ycc_quantization_range = + HDMI_YCC_QUANTIZATION_RANGE_FULL; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range); + static enum hdmi_3d_structure s3d_structure_from_display_mode(const struct drm_display_mode *mode) { diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index 487cfe3989e8..129450713bb7 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -98,7 +98,7 @@ void drm_encoder_unregister_all(struct drm_device *dev) * * Initialises a preallocated encoder. Encoder should be subclassed as part of * driver encoder objects. At driver unload time drm_encoder_cleanup() should be - * called from the driver's destroy hook in &drm_encoder_funcs. + * called from the driver's &drm_encoder_funcs.destroy hook. * * Returns: * Zero on success, error code on failure. diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index 4484785cd9ac..cf804389f5ec 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -43,7 +43,7 @@ * &drm_encoder_slave. The @slave_funcs field will be initialized with * the hooks provided by the slave driver. * - * If @info->platform_data is non-NULL it will be used as the initial + * If @info.platform_data is non-NULL it will be used as the initial * slave config. * * Returns 0 on success or a negative errno on failure, in particular, diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 20a4011f790d..0ef8b284a4b8 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -473,8 +473,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper, return 0; err_cma_destroy: - drm_framebuffer_unregister_private(&fbdev_cma->fb->fb); - drm_fb_cma_destroy(&fbdev_cma->fb->fb); + drm_framebuffer_remove(&fbdev_cma->fb->fb); err_fb_info_destroy: drm_fb_helper_release_fbi(helper); err_gem_free_object: @@ -574,10 +573,8 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev); drm_fb_helper_release_fbi(&fbdev_cma->fb_helper); - if (fbdev_cma->fb) { - drm_framebuffer_unregister_private(&fbdev_cma->fb->fb); - drm_fb_cma_destroy(&fbdev_cma->fb->fb); - } + if (fbdev_cma->fb) + drm_framebuffer_remove(&fbdev_cma->fb->fb); drm_fb_helper_fini(&fbdev_cma->fb_helper); kfree(fbdev_cma); @@ -625,3 +622,21 @@ void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state) drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state); } EXPORT_SYMBOL(drm_fbdev_cma_set_suspend); + +/** + * drm_fbdev_cma_set_suspend_unlocked - wrapper around + * drm_fb_helper_set_suspend_unlocked + * @fbdev_cma: The drm_fbdev_cma struct, may be NULL + * @state: desired state, zero to resume, non-zero to suspend + * + * Calls drm_fb_helper_set_suspend, which is a wrapper around + * fb_set_suspend implemented by fbdev core. + */ +void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, + int state) +{ + if (fbdev_cma) + drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper, + state); +} +EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0ab6aaacb7d6..c7fafa175755 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -66,11 +66,11 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * Teardown is done with drm_fb_helper_fini(). * * At runtime drivers should restore the fbdev console by calling - * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback. - * They should also notify the fb helper code from updates to the output - * configuration by calling drm_fb_helper_hotplug_event(). For easier + * drm_fb_helper_restore_fbdev_mode_unlocked() from their &drm_driver.lastclose + * callback. They should also notify the fb helper code from updates to the + * output configuration by calling drm_fb_helper_hotplug_event(). For easier * integration with the output polling code in drm_crtc_helper.c the modeset - * code provides a ->output_poll_changed callback. + * code provides a &drm_mode_config_funcs.output_poll_changed callback. * * All other functions exported by the fb helper library can be used to * implement the fbdev driver interface by the driver. @@ -79,7 +79,7 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare() * helper must be called first to initialize the minimum required to make * hotplug detection work. Drivers also need to make sure to properly set up - * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init() + * the &drm_mode_config.funcs member. After calling drm_kms_helper_poll_init() * it is safe to enable interrupts and start processing hotplug events. At the * same time, drivers should initialize all modeset objects such as CRTCs, * encoders and connectors. To finish up the fbdev helper initialization, the @@ -88,9 +88,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * should call drm_fb_helper_single_add_all_connectors() followed by * drm_fb_helper_initial_config(). * - * If &drm_framebuffer_funcs ->dirty is set, the + * If &drm_framebuffer_funcs.dirty is set, the * drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will - * accumulate changes and schedule &drm_fb_helper ->dirty_work to run right + * accumulate changes and schedule &drm_fb_helper.dirty_work to run right * away. This worker then calls the dirty() function ensuring that it will * always run in process context since the fb_*() function could be running in * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io @@ -247,7 +247,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) } /** - * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter + * drm_fb_helper_debug_enter - implementation for &fb_ops.fb_debug_enter * @info: fbdev registered by the helper */ int drm_fb_helper_debug_enter(struct fb_info *info) @@ -296,7 +296,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) } /** - * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave + * drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave * @info: fbdev registered by the helper */ int drm_fb_helper_debug_leave(struct fb_info *info) @@ -445,7 +445,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration * @fb_helper: fbcon to restore * - * This should be called from driver's drm ->lastclose callback + * This should be called from driver's drm &drm_driver.lastclose callback * when implementing an fbcon on top of kms using this helper. This ensures that * the user isn't greeted with a black screen when e.g. X dies. * @@ -585,7 +585,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) } /** - * drm_fb_helper_blank - implementation for ->fb_blank + * drm_fb_helper_blank - implementation for &fb_ops.fb_blank * @blank: desired blanking state * @info: fbdev registered by the helper */ @@ -912,7 +912,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, * @info: fb_info struct pointer * @pagelist: list of dirty mmap framebuffer pages * - * This function is used as the &fb_deferred_io ->deferred_io + * This function is used as the &fb_deferred_io.deferred_io * callback function for flushing the fbdev mmap writes. */ void drm_fb_helper_deferred_io(struct fb_info *info, @@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(drm_fb_helper_set_suspend); * due to all the printk activity. * * This function can be called multiple times with the same state since - * &fb_info->state is checked to see if fbdev is running or not before locking. + * &fb_info.state is checked to see if fbdev is running or not before locking. * * Use drm_fb_helper_set_suspend() if you need to take the lock yourself. */ @@ -1181,7 +1181,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, } /** - * drm_fb_helper_setcmap - implementation for ->fb_setcmap + * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap * @cmap: cmap to set * @info: fbdev registered by the helper */ @@ -1238,7 +1238,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) EXPORT_SYMBOL(drm_fb_helper_setcmap); /** - * drm_fb_helper_check_var - implementation for ->fb_check_var + * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var * @var: screeninfo to check * @info: fbdev registered by the helper */ @@ -1338,7 +1338,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, EXPORT_SYMBOL(drm_fb_helper_check_var); /** - * drm_fb_helper_set_par - implementation for ->fb_set_par + * drm_fb_helper_set_par - implementation for &fb_ops.fb_set_par * @info: fbdev registered by the helper * * This will let fbcon do the mode init and is called at initialization time by @@ -1422,7 +1422,7 @@ backoff: } /** - * drm_fb_helper_pan_display - implementation for ->fb_pan_display + * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display * @var: updated screen information * @info: fbdev registered by the helper */ @@ -1607,7 +1607,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, * additional constraints need to set up their own limits. * * Drivers should call this (or their equivalent setup code) from their - * ->fb_probe callback. + * &drm_fb_helper_funcs.fb_probe callback. */ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, uint32_t depth) @@ -1636,11 +1636,11 @@ EXPORT_SYMBOL(drm_fb_helper_fill_fix); * @fb_height: desired fb height * * Sets up the variable fbdev metainformation from the given fb helper instance - * and the drm framebuffer allocated in fb_helper->fb. + * and the drm framebuffer allocated in &drm_fb_helper.fb. * * Drivers should call this (or their equivalent setup code) from their - * ->fb_probe callback after having allocated the fbdev backing - * storage framebuffer. + * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev + * backing storage framebuffer. */ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) @@ -2207,9 +2207,9 @@ out: * Note that this also registers the fbdev and so allows userspace to call into * the driver through the fbdev interfaces. * - * This function will call down into the ->fb_probe callback to let - * the driver allocate and initialize the fbdev info structure and the drm - * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and + * This function will call down into the &drm_fb_helper_funcs.fb_probe callback + * to let the driver allocate and initialize the fbdev info structure and the + * drm framebuffer used to back the fbdev. drm_fb_helper_fill_var() and * drm_fb_helper_fill_fix() are provided as helpers to setup simple default * values for the fbdev info structure. * diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index e22645375e60..afdf5b147f39 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -580,7 +580,7 @@ EXPORT_SYMBOL(drm_poll); * kmalloc and @p must be the first member element. * * This is the locked version of drm_event_reserve_init() for callers which - * already hold dev->event_lock. + * already hold &drm_device.event_lock. * * RETURNS: * @@ -621,7 +621,7 @@ EXPORT_SYMBOL(drm_event_reserve_init_locked); * If callers embedded @p into a larger structure it must be allocated with * kmalloc and @p must be the first member element. * - * Callers which already hold dev->event_lock should use + * Callers which already hold &drm_device.event_lock should use * drm_event_reserve_init_locked() instead. * * RETURNS: @@ -677,7 +677,7 @@ EXPORT_SYMBOL(drm_event_cancel_free); * * This function sends the event @e, initialized with drm_event_reserve_init(), * to its associated userspace DRM file. Callers must already hold - * dev->event_lock, see drm_send_event() for the unlocked version. + * &drm_device.event_lock, see drm_send_event() for the unlocked version. * * Note that the core will take care of unlinking and disarming events when the * corresponding DRM file is closed. Drivers need not worry about whether the @@ -717,8 +717,9 @@ EXPORT_SYMBOL(drm_send_event_locked); * @e: DRM event to deliver * * This function sends the event @e, initialized with drm_event_reserve_init(), - * to its associated userspace DRM file. This function acquires dev->event_lock, - * see drm_send_event_locked() for callers which already hold this lock. + * to its associated userspace DRM file. This function acquires + * &drm_device.event_lock, see drm_send_event_locked() for callers which already + * hold this lock. * * Note that the core will take care of unlinking and disarming events when the * corresponding DRM file is closed. Drivers need not worry about whether the diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 588ccc3a2218..28a0108a1ab8 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -58,8 +58,8 @@ * fbdev framebuffer when the struct &struct drm_framebuffer is embedded into * the fbdev helper struct) drivers can manually clean up a framebuffer at * module unload time with drm_framebuffer_unregister_private(). But doing this - * is not recommended, and it's better to have a normal free-standing struct - * &drm_framebuffer. + * is not recommended, and it's better to have a normal free-standing &struct + * drm_framebuffer. */ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y, @@ -470,7 +470,7 @@ int drm_mode_getfb(struct drm_device *dev, * usb display-link, mipi manual update panels or edp panel self refresh modes. * * Modesetting drivers which always update the frontbuffer do not need to - * implement the corresponding ->dirty framebuffer callback. + * implement the corresponding &drm_framebuffer_funcs.dirty callback. * * Called by the user via ioctl. * @@ -709,10 +709,10 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); * @fb: framebuffer to remove * * Cleanup framebuffer. This function is intended to be used from the drivers - * ->destroy callback. It can also be used to clean up driver private - * framebuffers embedded into a larger structure. + * &drm_framebuffer_funcs.destroy callback. It can also be used to clean up + * driver private framebuffers embedded into a larger structure. * - * Note that this function does not remove the fb from active usuage - if it is + * Note that this function does not remove the fb from active usage - if it is * still used anywhere, hilarity can ensue since userspace could call getfb on * the id and get back -EINVAL. Obviously no concern at driver unload time. * diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 465bacd0a630..bc93de308673 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -316,8 +316,8 @@ EXPORT_SYMBOL(drm_gem_handle_delete); * @dev: corresponding drm_device * @handle: the dumb handle to remove * - * This implements the ->dumb_destroy kms driver callback for drivers which use - * gem to manage their backing storage. + * This implements the &drm_driver.dumb_destroy kms driver callback for drivers + * which use gem to manage their backing storage. */ int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, @@ -333,9 +333,9 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy); * @obj: object to register * @handlep: pointer to return the created handle to the caller * - * This expects the dev->object_name_lock to be held already and will drop it - * before returning. Used to avoid races in establishing new handles when - * importing an object from either an flink name or a dma-buf. + * This expects the &drm_device.object_name_lock to be held already and will + * drop it before returning. Used to avoid races in establishing new handles + * when importing an object from either an flink name or a dma-buf. * * Handles must be release again through drm_gem_handle_delete(). This is done * when userspace closes @file_priv for all attached handles, or through the @@ -447,8 +447,8 @@ EXPORT_SYMBOL(drm_gem_free_mmap_offset); * structures. * * This routine allocates and attaches a fake offset for @obj, in cases where - * the virtual size differs from the physical size (ie. obj->size). Otherwise - * just use drm_gem_create_mmap_offset(). + * the virtual size differs from the physical size (ie. &drm_gem_object.size). + * Otherwise just use drm_gem_create_mmap_offset(). * * This function is idempotent and handles an already allocated mmap offset * transparently. Drivers do not need to check for this case. @@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release); * @kref: kref of the object to free * * Called after the last reference to the object has been lost. - * Must be called holding &drm_device->struct_mutex. + * Must be called holding &drm_device.struct_mutex. * * Frees the object */ @@ -813,7 +813,7 @@ EXPORT_SYMBOL(drm_gem_object_free); * @obj: GEM buffer object * * This releases a reference to @obj. Callers must not hold the - * dev->struct_mutex lock when calling this function. + * &drm_device.struct_mutex lock when calling this function. * * See also __drm_gem_object_unreference(). */ @@ -840,9 +840,9 @@ EXPORT_SYMBOL(drm_gem_object_unreference_unlocked); * drm_gem_object_unreference - release a GEM BO reference * @obj: GEM buffer object * - * This releases a reference to @obj. Callers must hold the dev->struct_mutex - * lock when calling this function, even when the driver doesn't use - * dev->struct_mutex for anything. + * This releases a reference to @obj. Callers must hold the + * &drm_device.struct_mutex lock when calling this function, even when the + * driver doesn't use &drm_device.struct_mutex for anything. * * For drivers not encumbered with legacy locking use * drm_gem_object_unreference_unlocked() instead. diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index a6213f814345..f37388cb2fde 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -31,6 +31,7 @@ void drm_lastclose(struct drm_device *dev); /* drm_pci.c */ int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); +void drm_pci_agp_destroy(struct drm_device *dev); /* drm_prime.c */ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 88c69e71102e..e06cf11ebb4a 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -95,7 +95,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, * * Only to be called from drm_crtc_vblank_on(). * - * Note: caller must hold dev->vbl_lock since this reads & writes + * Note: caller must hold &drm_device.vbl_lock since this reads & writes * device vblank fields. */ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe) @@ -142,7 +142,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe * Only necessary when going from off->on, to account for frames we * didn't get an interrupt for. * - * Note: caller must hold dev->vbl_lock since this reads & writes + * Note: caller must hold &drm_device.vbl_lock since this reads & writes * device vblank fields. */ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, @@ -415,29 +415,6 @@ err: } EXPORT_SYMBOL(drm_vblank_init); -static void drm_irq_vgaarb_nokms(void *cookie, bool state) -{ - struct drm_device *dev = cookie; - - if (dev->driver->vgaarb_irq) { - dev->driver->vgaarb_irq(dev, state); - return; - } - - if (!dev->irq_enabled) - return; - - if (state) { - if (dev->driver->irq_uninstall) - dev->driver->irq_uninstall(dev); - } else { - if (dev->driver->irq_preinstall) - dev->driver->irq_preinstall(dev); - if (dev->driver->irq_postinstall) - dev->driver->irq_postinstall(dev); - } -} - /** * drm_irq_install - install IRQ handler * @dev: DRM device @@ -449,7 +426,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state) * * This is the simplified helper interface provided for drivers with no special * needs. Drivers which need to install interrupt handlers for multiple - * interrupts must instead set drm_device->irq_enabled to signal the DRM core + * interrupts must instead set &drm_device.irq_enabled to signal the DRM core * that vblank interrupts are available. * * Returns: @@ -492,9 +469,6 @@ int drm_irq_install(struct drm_device *dev, int irq) return ret; } - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); - /* After installing handler */ if (dev->driver->irq_postinstall) ret = dev->driver->irq_postinstall(dev); @@ -519,7 +493,7 @@ EXPORT_SYMBOL(drm_irq_install); * Calls the driver's irq_uninstall() function and unregisters the IRQ handler. * This should only be called by drivers which used drm_irq_install() to set up * their interrupt handler. Other drivers must only reset - * drm_device->irq_enabled to false. + * &drm_device.irq_enabled to false. * * Note that for kernel modesetting drivers it is a bug if this function fails. * The sanity checks are only to catch buggy user modesetting drivers which call @@ -982,12 +956,11 @@ static void send_vblank_event(struct drm_device *dev, * period. This helper function implements exactly the required vblank arming * behaviour. * - * NOTE: Drivers using this to send out the event in &struct drm_crtc_state - * as part of an atomic commit must ensure that the next vblank happens at - * exactly the same time as the atomic commit is committed to the hardware. This - * function itself does **not** protect again the next vblank interrupt racing - * with either this function call or the atomic commit operation. A possible - * sequence could be: + * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an + * atomic commit must ensure that the next vblank happens at exactly the same + * time as the atomic commit is committed to the hardware. This function itself + * does **not** protect again the next vblank interrupt racing with either this + * function call or the atomic commit operation. A possible sequence could be: * * 1. Driver commits new hardware state into vblank-synchronized registers. * 2. A vblank happens, committing the hardware state. Also the corresponding diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 229b3f525dee..e51876e588d6 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -552,8 +552,8 @@ EXPORT_SYMBOL(drm_mm_replace_node); * objects to the roster, probably by walking an LRU list, but this can be * freely implemented. Eviction candiates are added using * drm_mm_scan_add_block() until a suitable hole is found or there are no - * further evictable objects. Eviction roster metadata is tracked in struct - * &drm_mm_scan. + * further evictable objects. Eviction roster metadata is tracked in &struct + * drm_mm_scan. * * The driver must walk through all objects again in exactly the reverse * order to restore the allocator state. Note that while the allocator is used diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index ed1ee5a44a7b..884cc4d26fb5 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -421,7 +421,12 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_connector_unreference(connector); } drm_connector_list_iter_put(&conn_iter); - WARN_ON(!list_empty(&dev->mode_config.connector_list)); + if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + DRM_ERROR("connector %s leaked!\n", connector->name); + drm_connector_list_iter_put(&conn_iter); + } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ac6a35212501..a8616b1a8d22 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -797,6 +797,26 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode) EXPORT_SYMBOL(drm_mode_vrefresh); /** + * drm_mode_get_hv_timing - Fetches hdisplay/vdisplay for given mode + * @mode: mode to query + * @hdisplay: hdisplay value to fill in + * @vdisplay: vdisplay value to fill in + * + * The vdisplay value will be doubled if the specified mode is a stereo mode of + * the appropriate layout. + */ +void drm_mode_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay) +{ + struct drm_display_mode adjusted = *mode; + + drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY); + *hdisplay = adjusted.crtc_hdisplay; + *vdisplay = adjusted.crtc_vdisplay; +} +EXPORT_SYMBOL(drm_mode_get_hv_timing); + +/** * drm_mode_set_crtcinfo - set CRTC modesetting timing parameters * @p: mode * @adjust_flags: a combination of adjustment flags @@ -1460,6 +1480,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev, return NULL; mode->type |= DRM_MODE_TYPE_USERDEF; + /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ + if (cmd->xres == 1366 && mode->hdisplay == 1368) { + mode->hdisplay = 1366; + mode->hsync_start--; + mode->hsync_end--; + drm_mode_set_name(mode); + } drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); return mode; } diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 3551ae31f143..bf60f2645e55 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -33,7 +33,7 @@ * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because * the locking is more distributed around the driver code, we want a bit * of extra utility/tracking out of our acquire-ctx. This is provided - * by drm_modeset_lock / drm_modeset_acquire_ctx. + * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx. * * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt * @@ -53,7 +53,7 @@ * drm_modeset_acquire_fini(&ctx); * * On top of of these per-object locks using &ww_mutex there's also an overall - * dev->mode_config.lock, for protecting everything else. Mostly this means + * &drm_mode_config.mutex, for protecting everything else. Mostly this means * probe state of connectors, and preventing hotplug add/removal of connectors. * * Finally there's a bunch of dedicated locks to protect drm core internal @@ -71,7 +71,7 @@ static DEFINE_WW_CLASS(crtc_ww_class); * drm_modeset_unlock_all() function. * * This function is deprecated. It allocates a lock acquisition context and - * stores it in the DRM device's ->mode_config. This facilitate conversion of + * stores it in &drm_device.mode_config. This facilitate conversion of * existing code because it removes the need to manually deal with the * acquisition context, but it is also brittle because the context is global * and care must be taken not to nest calls. New code should use the @@ -124,7 +124,7 @@ EXPORT_SYMBOL(drm_modeset_lock_all); * drm_modeset_lock_all() function. * * This function is deprecated. It uses the lock acquisition context stored - * in the DRM device's ->mode_config. This facilitates conversion of existing + * in &drm_device.mode_config. This facilitates conversion of existing * code because it removes the need to manually deal with the acquisition * context, but it is also brittle because the context is global and care must * be taken not to nest calls. New code should pass the acquisition context @@ -468,7 +468,7 @@ EXPORT_SYMBOL(drm_modeset_unlock); * This function takes all modeset locks, suitable where a more fine-grained * scheme isn't (yet) implemented. * - * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex + * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex * since that lock isn't required for modeset state changes. Callers which * need to grab that lock too need to do so outside of the acquire context * @ctx. diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index dc358f860aea..a3b356e70b35 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -191,7 +191,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, static void drm_pci_agp_init(struct drm_device *dev) { if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { - if (drm_pci_device_is_agp(dev)) + if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) dev->agp = drm_agp_init(dev); if (dev->agp) { dev->agp->agp_mtrr = arch_phys_wc_add( @@ -223,7 +223,7 @@ void drm_pci_agp_destroy(struct drm_device *dev) * Try and register, if we fail to register, backout previous work. * * NOTE: This function is deprecated, please use drm_dev_alloc() and - * drm_dev_register() instead and remove your ->load() callback. + * drm_dev_register() instead and remove your &drm_driver.load callback. * * Return: 0 on success or a negative error code on failure. */ diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index eed66be18329..c464fc4a874d 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -42,7 +42,7 @@ * * Cursor and overlay planes are optional. All drivers should provide one * primary plane per CRTC to avoid surprising userspace too much. See enum - * &drm_plane_type for a more in-depth discussion of these special uapi-relevant + * drm_plane_type for a more in-depth discussion of these special uapi-relevant * plane types. Special planes are associated with their CRTC by calling * drm_crtc_init_with_planes(). * diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 35d43607a47d..148688fb920a 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -39,9 +39,9 @@ * * This helper library has two parts. The first part has support to implement * primary plane support on top of the normal CRTC configuration interface. - * Since the legacy ->set_config interface ties the primary plane together with - * the CRTC state this does not allow userspace to disable the primary plane - * itself. To avoid too much duplicated code use + * Since the legacy &drm_mode_config_funcs.set_config interface ties the primary + * plane together with the CRTC state this does not allow userspace to disable + * the primary plane itself. To avoid too much duplicated code use * drm_plane_helper_check_update() which can be used to enforce the same * restrictions as primary planes had thus. The default primary plane only * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached @@ -384,7 +384,8 @@ EXPORT_SYMBOL(drm_primary_helper_update); * is called in response to a userspace SetPlane operation on the plane with a * NULL framebuffer parameter. It unconditionally fails the disable call with * -EINVAL the only way to disable the primary plane without driver support is - * to disable the entier CRTC. Which does not match the plane ->disable hook. + * to disable the entire CRTC. Which does not match the plane + * &drm_plane_funcs.disable_plane hook. * * Note that some hardware may be able to disable the primary plane without * disabling the whole CRTC. Drivers for such hardware should provide their diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c index 7af3005a030c..56d2f93ed6b9 100644 --- a/drivers/gpu/drm/drm_platform.c +++ b/drivers/gpu/drm/drm_platform.c @@ -74,7 +74,7 @@ err_free: * .load() function. * * NOTE: This function is deprecated, please use drm_dev_alloc() and - * drm_dev_register() instead and remove your ->load() callback. + * drm_dev_register() instead and remove your &drm_driver.load callback. * * Return: 0 on success or a negative error code on failure. */ diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 8d77b2462594..25aa4558f1b5 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -40,8 +40,11 @@ * On the export the dma_buf holds a reference to the exporting GEM * object. It takes this reference in handle_to_fd_ioctl, when it * first calls .prime_export and stores the exporting GEM object in - * the dma_buf priv. This reference is released when the dma_buf - * object goes away in the driver .release function. + * the dma_buf priv. This reference needs to be released when the + * final reference to the &dma_buf itself is dropped and its + * &dma_buf_ops.release function is called. For GEM-based drivers, + * the dma_buf should be exported using drm_gem_dmabuf_export() and + * then released by drm_gem_dmabuf_release(). * * On the import the importing GEM object holds a reference to the * dma_buf (which in turn holds a ref to the exporting GEM object). @@ -51,6 +54,16 @@ * when the imported object is destroyed, we remove the attachment * and drop the reference to the dma_buf. * + * When all the references to the &dma_buf are dropped, i.e. when + * userspace has closed both handles to the imported GEM object (through the + * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported + * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references + * are also gone, then the dma_buf gets destroyed. This can also happen as a + * part of the clean up procedure in the drm_release() function if userspace + * fails to properly clean up. Note that both the kernel and userspace (by + * keeeping the PRIME file descriptors open) can hold references onto a + * &dma_buf. + * * Thus the chain of references always flows in one direction * (avoiding loops): importing_gem -> dmabuf -> exporting_gem * @@ -291,7 +304,7 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, * This wraps dma_buf_export() for use by generic GEM drivers that are using * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take * a reference to the &drm_device and the exported &drm_gem_object (stored in - * exp_info->priv) which is released by drm_gem_dmabuf_release(). + * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). * * Returns the new dmabuf. */ diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 97a32898ef50..93381454bdf7 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -43,7 +43,7 @@ * DOC: output probing helper overview * * This library provides some helper code for output probing. It provides an - * implementation of the core connector->fill_modes interface with + * implementation of the core &drm_connector_funcs.fill_modes interface with * drm_helper_probe_single_connector_modes. * * It also provides support for polling connectors with a work item and for @@ -115,25 +115,28 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) #define DRM_OUTPUT_POLL_PERIOD (10*HZ) /** - * drm_kms_helper_poll_enable_locked - re-enable output polling. + * drm_kms_helper_poll_enable - re-enable output polling. * @dev: drm_device * - * This function re-enables the output polling work without - * locking the mode_config mutex. + * This function re-enables the output polling work, after it has been + * temporarily disabled using drm_kms_helper_poll_disable(), for example over + * suspend/resume. * - * This is like drm_kms_helper_poll_enable() however it is to be - * called from a context where the mode_config mutex is locked - * already. + * Drivers can call this helper from their device resume implementation. It is + * an error to call this when the output polling support has not yet been set + * up. + * + * Note that calls to enable and disable polling must be strictly ordered, which + * is automatically the case when they're only call from suspend/resume + * callbacks. */ -void drm_kms_helper_poll_enable_locked(struct drm_device *dev) +void drm_kms_helper_poll_enable(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; unsigned long delay = DRM_OUTPUT_POLL_PERIOD; - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) return; @@ -146,14 +149,24 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) drm_connector_list_iter_put(&conn_iter); if (dev->mode_config.delayed_event) { + /* + * FIXME: + * + * Use short (1s) delay to handle the initial delayed event. + * This delay should not be needed, but Optimus/nouveau will + * fail in a mysterious way if the delayed event is handled as + * soon as possible like it is done in + * drm_helper_probe_single_connector_modes() in case the poll + * was enabled before. + */ poll = true; - delay = 0; + delay = HZ; } if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, delay); } -EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); +EXPORT_SYMBOL(drm_kms_helper_poll_enable); static enum drm_connector_status drm_connector_detect(struct drm_connector *connector, bool force) @@ -174,9 +187,9 @@ drm_connector_detect(struct drm_connector *connector, bool force) * be added to the connector's probed_modes list, then culled (based on validity * and the @maxX, @maxY parameters) and put into the normal modes list. * - * Intended to be used as a generic implementation of the ->fill_modes() - * @connector vfunc for drivers that use the CRTC helpers for output mode - * filtering and detection. + * Intended to be used as a generic implementation of the + * &drm_connector_funcs.fill_modes() vfunc for drivers that use the CRTC helpers + * for output mode filtering and detection. * * The basic procedure is as follows * @@ -188,7 +201,7 @@ drm_connector_detect(struct drm_connector *connector, bool force) * * - debugfs 'override_edid' (used for testing only) * - firmware EDID (drm_load_edid_firmware()) - * - connector helper ->get_modes() vfunc + * - &drm_connector_helper_funcs.get_modes vfunc * - if the connector status is connector_status_connected, standard * VESA DMT modes up to 1024x768 are automatically added * (drm_add_modes_noedid()) @@ -209,8 +222,8 @@ drm_connector_detect(struct drm_connector *connector, bool force) * (if specified) * - drm_mode_validate_flag() checks the modes againt basic connector * capabilites (interlace_allowed,doublescan_allowed,stereo_allowed) - * - the optional connector ->mode_valid() helper can perform driver and/or - * hardware specific checks + * - the optional &drm_connector_helper_funcs.mode_valid helper can perform + * driver and/or hardware specific checks * * 5. Any mode whose status is not OK is pruned from the connector's modes list, * accompanied by a debug message indicating the reason for the mode's @@ -280,7 +293,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - drm_kms_helper_poll_enable_locked(dev); + drm_kms_helper_poll_enable(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -474,8 +487,12 @@ out: * This function disables the output polling work. * * Drivers can call this helper from their device suspend implementation. It is - * not an error to call this even when output polling isn't enabled or arlready - * disabled. + * not an error to call this even when output polling isn't enabled or already + * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). + * + * Note that calls to enable and disable polling must be strictly ordered, which + * is automatically the case when they're only call from suspend/resume + * callbacks. */ void drm_kms_helper_poll_disable(struct drm_device *dev) { @@ -486,24 +503,6 @@ void drm_kms_helper_poll_disable(struct drm_device *dev) EXPORT_SYMBOL(drm_kms_helper_poll_disable); /** - * drm_kms_helper_poll_enable - re-enable output polling. - * @dev: drm_device - * - * This function re-enables the output polling work. - * - * Drivers can call this helper from their device resume implementation. It is - * an error to call this when the output polling support has not yet been set - * up. - */ -void drm_kms_helper_poll_enable(struct drm_device *dev) -{ - mutex_lock(&dev->mode_config.mutex); - drm_kms_helper_poll_enable_locked(dev); - mutex_unlock(&dev->mode_config.mutex); -} -EXPORT_SYMBOL(drm_kms_helper_poll_enable); - -/** * drm_kms_helper_poll_init - initialize and enable output polling * @dev: drm_device * diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 0d0e5dc0ee23..7fc070f3e49e 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -42,8 +42,8 @@ * drm_object_attach_property(). * * Property values are only 64bit. To support bigger piles of data (like gamma - * tables, color correction matrizes or large structures) a property can instead - * point at a &drm_property_blob with that additional data + * tables, color correction matrices or large structures) a property can instead + * point at a &drm_property_blob with that additional data. * * Properties are defined by their symbolic name, userspace must keep a * per-object mapping from those names to the property ID used in the atomic diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 9a37196c1bf1..513288b5c2f6 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -255,7 +255,7 @@ static const struct attribute_group *connector_dev_groups[] = { * @connector: connector to add * * Create a connector device in sysfs, along with its associated connector - * properties (so far, connection status, dpms, mode list & edid) and + * properties (so far, connection status, dpms, mode list and edid) and * generate a hotplug event so userspace knows there's a new connector * available. */ diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 2cde7a5442fb..cc1731c5289c 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -2,7 +2,8 @@ config DRM_ETNAVIV tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" depends on DRM - depends on ARCH_MXC || ARCH_DOVE + depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) + depends on MMU select SHMEM select TMPFS select IOMMU_API diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index b92c24e07cea..590be0d1dd95 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -258,12 +258,6 @@ static int etnaviv_debugfs_init(struct drm_minor *minor) return ret; } - -static void etnaviv_debugfs_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(etnaviv_debugfs_list, - ARRAY_SIZE(etnaviv_debugfs_list), minor); -} #endif /* @@ -509,7 +503,6 @@ static struct drm_driver etnaviv_drm_driver = { .gem_prime_mmap = etnaviv_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = etnaviv_debugfs_init, - .debugfs_cleanup = etnaviv_debugfs_cleanup, #endif .ioctls = etnaviv_ioctls, .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index ae2733a609ba..f503af462dad 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -117,9 +117,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, struct list_head list; bool found; + /* + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick + * drm_mm into giving out a low IOVA after address space + * rollover. This needs a proper fix. + */ ret = drm_mm_insert_node_in_range(&mmu->mm, node, size, 0, mmu->last_iova, ~0UL, - DRM_MM_SEARCH_DEFAULT); + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); if (ret != -ENOSPC) break; diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index d706ca4e2f02..1d185347c64c 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -19,7 +19,6 @@ comment "CRTCs" config DRM_EXYNOS_FIMD bool "FIMD" depends on !FB_S3C - select FB_MODE_HELPERS select MFD_SYSCON help Choose this option if you want to use Exynos FIMD for DRM. @@ -32,7 +31,6 @@ config DRM_EXYNOS5433_DECON config DRM_EXYNOS7_DECON bool "DECON on Exynos7" depends on !FB_S3C - select FB_MODE_HELPERS help Choose this option if you want to use Exynos DECON for DRM. diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index c5c01628c715..d69af00bdd6a 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -46,7 +46,8 @@ enum decon_flag_bits { BIT_CLKS_ENABLED, BIT_IRQS_ENABLED, BIT_WIN_UPDATED, - BIT_SUSPENDED + BIT_SUSPENDED, + BIT_REQUEST_UPDATE }; struct decon_context { @@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) m->crtc_vsync_end = m->crtc_vsync_start + 1; } - decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); - - /* enable clock gate */ - val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; - writel(val, ctx->addr + DECON_CMU); - if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) decon_setup_trigger(ctx); @@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, /* window enable */ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); } static void decon_disable_plane(struct exynos_drm_crtc *crtc, @@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, return; decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); } static void decon_atomic_flush(struct exynos_drm_crtc *crtc) @@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) for (i = ctx->first_win; i < WINDOWS_NR; i++) decon_shadow_protect_win(ctx, i, false); - /* standalone update */ - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); if (ctx->out_type & IFTYPE_I80) set_bit(BIT_WIN_UPDATED, &ctx->flags); @@ -470,7 +467,7 @@ err: clk_disable_unprepare(ctx->clks[i]); } -static struct exynos_drm_crtc_ops decon_crtc_ops = { +static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, .enable_vblank = decon_enable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 2530bf57716a..5367b6664fe3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -39,6 +39,14 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) if (exynos_crtc->ops->disable) exynos_crtc->ops->disable(exynos_crtc); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } } static void @@ -109,9 +117,6 @@ static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - struct exynos_drm_private *private = crtc->dev->dev_private; - - private->crtc[exynos_crtc->pipe] = NULL; drm_crtc_cleanup(crtc); kfree(exynos_crtc); @@ -134,7 +139,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, void *ctx) { struct exynos_drm_crtc *exynos_crtc; - struct exynos_drm_private *private = drm_dev->dev_private; struct drm_crtc *crtc; int ret; @@ -149,8 +153,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, crtc = &exynos_crtc->base; - private->crtc[pipe] = crtc; - ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL, &exynos_crtc_funcs, NULL); if (ret < 0) @@ -209,23 +211,3 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc) if (exynos_crtc->ops->te_handler) exynos_crtc->ops->te_handler(exynos_crtc); } - -void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, - struct drm_file *file) -{ - struct drm_pending_vblank_event *e; - unsigned long flags; - - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - e = crtc->state->event; - if (e && e->base.file_priv == file) - crtc->state->event = NULL; - else - e = NULL; - - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - - if (e) - drm_event_cancel_free(crtc->dev, &e->base); -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index cfdcf3e4eb1b..6a581a8af465 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -40,8 +40,4 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, */ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); -/* This function cancels a page flip request. */ -void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, - struct drm_file *file); - #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 3ec053542e93..035d02ecffcd 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -38,56 +38,6 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -struct exynos_atomic_commit { - struct work_struct work; - struct drm_device *dev; - struct drm_atomic_state *state; - u32 crtcs; -}; - -static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) -{ - struct drm_device *dev = commit->dev; - struct exynos_drm_private *priv = dev->dev_private; - struct drm_atomic_state *state = commit->state; - - drm_atomic_helper_commit_modeset_disables(dev, state); - - drm_atomic_helper_commit_modeset_enables(dev, state); - - /* - * Exynos can't update planes with CRTCs and encoders disabled, - * its updates routines, specially for FIMD, requires the clocks - * to be enabled. So it is necessary to handle the modeset operations - * *before* the commit_planes() step, this way it will always - * have the relevant clocks enabled to perform the update. - */ - - drm_atomic_helper_commit_planes(dev, state, 0); - - drm_atomic_helper_wait_for_vblanks(dev, state); - - drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_put(state); - - spin_lock(&priv->lock); - priv->pending &= ~commit->crtcs; - spin_unlock(&priv->lock); - - wake_up_all(&priv->wait); - - kfree(commit); -} - -static void exynos_drm_atomic_work(struct work_struct *work) -{ - struct exynos_atomic_commit *commit = container_of(work, - struct exynos_atomic_commit, work); - - exynos_atomic_commit_complete(commit); -} - static struct device *exynos_drm_get_dma_device(void); static int exynos_drm_load(struct drm_device *dev, unsigned long flags) @@ -202,65 +152,6 @@ static void exynos_drm_unload(struct drm_device *dev) dev->dev_private = NULL; } -static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs) -{ - bool pending; - - spin_lock(&priv->lock); - pending = priv->pending & crtcs; - spin_unlock(&priv->lock); - - return pending; -} - -int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, - bool nonblock) -{ - struct exynos_drm_private *priv = dev->dev_private; - struct exynos_atomic_commit *commit; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - int i, ret; - - commit = kzalloc(sizeof(*commit), GFP_KERNEL); - if (!commit) - return -ENOMEM; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) { - kfree(commit); - return ret; - } - - /* This is the point of no return */ - - INIT_WORK(&commit->work, exynos_drm_atomic_work); - commit->dev = dev; - commit->state = state; - - /* Wait until all affected CRTCs have completed previous commits and - * mark them as pending. - */ - for_each_crtc_in_state(state, crtc, crtc_state, i) - commit->crtcs |= drm_crtc_mask(crtc); - - wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); - - spin_lock(&priv->lock); - priv->pending |= commit->crtcs; - spin_unlock(&priv->lock); - - drm_atomic_helper_swap_state(state, true); - - drm_atomic_state_get(state); - if (nonblock) - schedule_work(&commit->work); - else - exynos_atomic_commit_complete(commit); - - return 0; -} - int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -307,12 +198,7 @@ err_file_priv_free: static void exynos_drm_preclose(struct drm_device *dev, struct drm_file *file) { - struct drm_crtc *crtc; - exynos_drm_subdrv_close(dev, file); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - exynos_drm_crtc_cancel_page_flip(crtc, file); } static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 80c4d5b81689..cf6e08cb35a7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -211,12 +211,6 @@ struct drm_exynos_file_private { struct exynos_drm_private { struct drm_fb_helper *fb_helper; - /* - * created crtc object would be contained at this array and - * this array is used to be aware of which crtc did it request vblank. - */ - struct drm_crtc *crtc[MAX_CRTC]; - struct device *dma_dev; void *mapping; @@ -231,9 +225,9 @@ struct exynos_drm_private { static inline struct exynos_drm_crtc * exynos_drm_crtc_from_pipe(struct drm_device *dev, int pipe) { - struct exynos_drm_private *private = dev->dev_private; + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); - return to_exynos_crtc(private->crtc[pipe]); + return to_exynos_crtc(crtc); } static inline struct device *to_dma_dev(struct drm_device *dev) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 68d414227533..c77a5aced81a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -187,11 +187,40 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) return exynos_fb->dma_addr[index]; } +static void exynos_drm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + /* + * Exynos can't update planes with CRTCs and encoders disabled, + * its updates routines, specially for FIMD, requires the clocks + * to be enabled. So it is necessary to handle the modeset operations + * *before* the commit_planes() step, this way it will always + * have the relevant clocks enabled to perform the update. + */ + drm_atomic_helper_commit_planes(dev, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { + .atomic_commit_tail = exynos_drm_atomic_commit_tail, +}; + static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { .fb_create = exynos_user_fb_create, .output_poll_changed = exynos_drm_output_poll_changed, .atomic_check = exynos_atomic_check, - .atomic_commit = exynos_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; void exynos_drm_mode_config_init(struct drm_device *dev) @@ -208,4 +237,5 @@ void exynos_drm_mode_config_init(struct drm_device *dev) dev->mode_config.max_height = 4096; dev->mode_config.funcs = &exynos_drm_mode_config_funcs; + dev->mode_config.helper_private = &exynos_drm_mode_config_helpers; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index d8808158d418..a7884bea42eb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -270,10 +270,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, /* release drm framebuffer and real buffer */ if (fb_helper->fb && fb_helper->fb->funcs) { fb = fb_helper->fb; - if (fb) { - drm_framebuffer_unregister_private(fb); + if (fb) drm_framebuffer_remove(fb); - } } drm_fb_helper_unregister_fbi(fb_helper); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index fbd13fabdf2d..603d8425cca6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1193,6 +1193,17 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, if (!node) return -ENOMEM; + /* + * To avoid an integer overflow for the later size computations, we + * enforce a maximum number of submitted commands here. This limit is + * sufficient for all conceivable usage cases of the G2D. + */ + if (req->cmd_nr > G2D_CMDLIST_DATA_NUM || + req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) { + dev_err(dev, "number of submitted G2D commands exceeds limit\n"); + return -EINVAL; + } + node->event = NULL; if (req->event_type != G2D_EVENT_NOT) { @@ -1250,7 +1261,11 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; } - /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ + /* + * Check the size of cmdlist. The 2 that is added last comes from + * the implicit G2D_BITBLT_START that is appended once we have + * checked all the submitted commands. + */ size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2; if (size > G2D_CMDLIST_DATA_NUM) { dev_err(dev, "cmdlist size is too big\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index a0def0be6d65..2ef43d403eaa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -19,6 +19,7 @@ #include <linux/of_graph.h> #include <linux/clk.h> #include <linux/component.h> +#include <linux/pm_runtime.h> #include <drm/drmP.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> @@ -269,35 +270,9 @@ static int parse_dt(struct exynos_mic *mic) } nodes[j++] = remote_node; - switch (i) { - case ENDPOINT_DECON_NODE: - /* decon node */ - if (of_get_child_by_name(remote_node, - "i80-if-timings")) - mic->i80_mode = 1; - - break; - case ENDPOINT_DSI_NODE: - /* panel node */ - remote_node = get_remote_node(remote_node, 1); - if (!remote_node) { - ret = -EPIPE; - goto exit; - } - nodes[j++] = remote_node; - - ret = of_get_videomode(remote_node, - &mic->vm, 0); - if (ret) { - DRM_ERROR("mic: failed to get videomode"); - goto exit; - } - - break; - default: - DRM_ERROR("mic: Unknown endpoint from MIC"); - break; - } + if (i == ENDPOINT_DECON_NODE && + of_get_child_by_name(remote_node, "i80-if-timings")) + mic->i80_mode = 1; } exit: @@ -312,7 +287,6 @@ static void mic_disable(struct drm_bridge *bridge) { } static void mic_post_disable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; - int i; mutex_lock(&mic_mutex); if (!mic->enabled) @@ -320,39 +294,43 @@ static void mic_post_disable(struct drm_bridge *bridge) mic_set_path(mic, 0); - for (i = NUM_CLKS - 1; i > -1; i--) - clk_disable_unprepare(mic->clks[i]); - + pm_runtime_put(mic->dev); mic->enabled = 0; already_disabled: mutex_unlock(&mic_mutex); } +static void mic_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct exynos_mic *mic = bridge->driver_private; + + mutex_lock(&mic_mutex); + drm_display_mode_to_videomode(mode, &mic->vm); + mutex_unlock(&mic_mutex); +} + static void mic_pre_enable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; - int ret, i; + int ret; mutex_lock(&mic_mutex); if (mic->enabled) - goto already_enabled; + goto unlock; - for (i = 0; i < NUM_CLKS; i++) { - ret = clk_prepare_enable(mic->clks[i]); - if (ret < 0) { - DRM_ERROR("Failed to enable clock (%s)\n", - clk_names[i]); - goto turn_off_clks; - } - } + ret = pm_runtime_get_sync(mic->dev); + if (ret < 0) + goto unlock; mic_set_path(mic, 1); ret = mic_sw_reset(mic); if (ret) { DRM_ERROR("Failed to reset\n"); - goto turn_off_clks; + goto turn_off; } if (!mic->i80_mode) @@ -365,10 +343,9 @@ static void mic_pre_enable(struct drm_bridge *bridge) return; -turn_off_clks: - while (--i > -1) - clk_disable_unprepare(mic->clks[i]); -already_enabled: +turn_off: + pm_runtime_put(mic->dev); +unlock: mutex_unlock(&mic_mutex); } @@ -377,6 +354,7 @@ static void mic_enable(struct drm_bridge *bridge) { } static const struct drm_bridge_funcs mic_bridge_funcs = { .disable = mic_disable, .post_disable = mic_post_disable, + .mode_set = mic_mode_set, .pre_enable = mic_pre_enable, .enable = mic_enable, }; @@ -401,14 +379,12 @@ static void exynos_mic_unbind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); - int i; mutex_lock(&mic_mutex); if (!mic->enabled) goto already_disabled; - for (i = NUM_CLKS - 1; i > -1; i--) - clk_disable_unprepare(mic->clks[i]); + pm_runtime_put(mic->dev); already_disabled: mutex_unlock(&mic_mutex); @@ -421,6 +397,41 @@ static const struct component_ops exynos_mic_component_ops = { .unbind = exynos_mic_unbind, }; +#ifdef CONFIG_PM +static int exynos_mic_suspend(struct device *dev) +{ + struct exynos_mic *mic = dev_get_drvdata(dev); + int i; + + for (i = NUM_CLKS - 1; i > -1; i--) + clk_disable_unprepare(mic->clks[i]); + + return 0; +} + +static int exynos_mic_resume(struct device *dev) +{ + struct exynos_mic *mic = dev_get_drvdata(dev); + int ret, i; + + for (i = 0; i < NUM_CLKS; i++) { + ret = clk_prepare_enable(mic->clks[i]); + if (ret < 0) { + DRM_ERROR("Failed to enable clock (%s)\n", + clk_names[i]); + while (--i > -1) + clk_disable_unprepare(mic->clks[i]); + return ret; + } + } + return 0; +} +#endif + +static const struct dev_pm_ops exynos_mic_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL) +}; + static int exynos_mic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -473,9 +484,18 @@ static int exynos_mic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mic); + pm_runtime_enable(dev); + + ret = component_add(dev, &exynos_mic_component_ops); + if (ret) + goto err_pm; + DRM_DEBUG_KMS("MIC has been probed\n"); - return component_add(dev, &exynos_mic_component_ops); + return 0; + +err_pm: + pm_runtime_disable(dev); err: return ret; } @@ -483,6 +503,7 @@ err: static int exynos_mic_remove(struct platform_device *pdev) { component_del(&pdev->dev, &exynos_mic_component_ops); + pm_runtime_disable(&pdev->dev); return 0; } @@ -497,6 +518,7 @@ struct platform_driver mic_driver = { .remove = exynos_mic_remove, .driver = { .name = "exynos-mic", + .pm = &exynos_mic_pm_ops, .owner = THIS_MODULE, .of_match_table = exynos_mic_of_match, }, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index a106046e0c93..72143ac10525 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -701,7 +701,7 @@ static void vp_win_reset(struct mixer_context *ctx) unsigned int tries = 100; vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); - while (tries--) { + while (--tries) { /* waiting until VP_SRESET_PROCESSING is 0 */ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) break; diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 0dc7ba2fdc22..5ee93ff55608 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -406,11 +406,6 @@ out_err: return ret; } -static int psb_driver_device_is_agp(struct drm_device *dev) -{ - return 0; -} - static inline void get_brightness(struct backlight_device *bd) { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE @@ -487,7 +482,6 @@ static struct drm_driver driver = { .set_busid = drm_pci_set_busid, .num_ioctls = ARRAY_SIZE(psb_ioctls), - .device_is_agp = psb_driver_device_is_agp, .irq_preinstall = psb_irq_preinstall, .irq_postinstall = psb_irq_postinstall, .irq_uninstall = psb_irq_uninstall, diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index 7a6957ae4b44..16fe79053ee1 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -121,7 +121,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj); if (IS_ERR(hi_fbdev->fb)) { - ret = PTR_ERR(info); + ret = PTR_ERR(hi_fbdev->fb); DRM_ERROR("failed to initialize framebuffer: %d\n", ret); goto out_release_fbi; } diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 3c6f750389fb..20732b62d4c9 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -243,8 +243,6 @@ struct ttm_bo_driver hibmc_bo_driver = { .verify_access = hibmc_bo_verify_access, .io_mem_reserve = &hibmc_ttm_io_mem_reserve, .io_mem_free = NULL, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int hibmc_mm_init(struct hibmc_drm_private *hibmc) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 307d460ab684..9a0678a33e0d 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -304,8 +304,8 @@ static void ade_set_medianoc_qos(struct ade_crtc *acrtc) static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe) { - struct kirin_drm_private *priv = dev->dev_private; - struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]); + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); + struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; void __iomem *base = ctx->base; @@ -320,8 +320,8 @@ static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe) static void ade_disable_vblank(struct drm_device *dev, unsigned int pipe) { - struct kirin_drm_private *priv = dev->dev_private; - struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]); + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); + struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; void __iomem *base = ctx->base; @@ -575,7 +575,6 @@ static const struct drm_crtc_funcs ade_crtc_funcs = { static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *plane) { - struct kirin_drm_private *priv = dev->dev_private; struct device_node *port; int ret; @@ -599,7 +598,6 @@ static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, } drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs); - priv->crtc[drm_crtc_index(crtc)] = crtc; return 0; } diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h index a0bb217c4c64..7f60c64915d9 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h @@ -20,7 +20,6 @@ struct kirin_dc_ops { }; struct kirin_drm_private { - struct drm_crtc *crtc[MAX_CRTC]; #ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_fbdev_cma *fbdev; #endif diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index ab4e6cbe1f8b..576a417690d4 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -1190,6 +1190,14 @@ static int i810_flip_bufs(struct drm_device *dev, void *data, int i810_driver_load(struct drm_device *dev, unsigned long flags) { + dev->agp = drm_agp_init(dev); + if (dev->agp) { + dev->agp->agp_mtrr = arch_phys_wc_add( + dev->agp->agp_info.aper_base, + dev->agp->agp_info.aper_size * + 1024 * 1024); + } + /* Our userspace depends upon the agp mapping support. */ if (!dev->agp) return -EINVAL; @@ -1249,19 +1257,3 @@ const struct drm_ioctl_desc i810_ioctls[] = { }; int i810_max_ioctl = ARRAY_SIZE(i810_ioctls); - -/** - * Determine if the device really is AGP or not. - * - * All Intel graphics chipsets are treated as AGP, even if they are really - * PCI-e. - * - * \param dev The device to be tested. - * - * \returns - * A value of 1 is always retured to indictate every i810 is AGP. - */ -int i810_driver_device_is_agp(struct drm_device *dev) -{ - return 1; -} diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 02504a7cfaf2..37fd0906f807 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -60,7 +60,6 @@ static struct drm_driver driver = { .lastclose = i810_driver_lastclose, .preclose = i810_driver_preclose, .set_busid = drm_pci_set_busid, - .device_is_agp = i810_driver_device_is_agp, .dma_quiescent = i810_driver_dma_quiescent, .ioctls = i810_ioctls, .fops = &i810_driver_fops, diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index 93ec5dc4e7d3..c73d2f2da57b 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h @@ -124,7 +124,6 @@ extern int i810_driver_load(struct drm_device *, unsigned long flags); extern void i810_driver_lastclose(struct drm_device *dev); extern void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); -extern int i810_driver_device_is_agp(struct drm_device *dev); extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); extern const struct drm_ioctl_desc i810_ioctls[]; diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 1bb7a5b80d47..7311aeab16f7 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -37,13 +37,6 @@ #include "i915_drv.h" #include "gvt.h" -#define MB_TO_BYTES(mb) ((mb) << 20ULL) -#define BYTES_TO_MB(b) ((b) >> 20ULL) - -#define HOST_LOW_GM_SIZE MB_TO_BYTES(128) -#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) -#define HOST_FENCE 4 - static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; @@ -155,6 +148,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, POSTING_READ(fence_reg_lo); } +static void _clear_vgpu_fence(struct intel_vgpu *vgpu) +{ + int i; + + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, 0); +} + static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; @@ -168,9 +169,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); + _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; - intel_vgpu_write_fence(vgpu, i, 0); list_add_tail(®->link, &dev_priv->mm.fence_list); } @@ -198,13 +199,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) continue; list_del(pos); vgpu->fence.regs[i] = reg; - intel_vgpu_write_fence(vgpu, i, 0); if (++i == vgpu_fence_sz(vgpu)) break; } if (i != vgpu_fence_sz(vgpu)) goto out_free_fence; + _clear_vgpu_fence(vgpu); + mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); return 0; @@ -304,6 +306,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) } /** + * intel_vgpu_reset_resource - reset resource state owned by a vGPU + * @vgpu: a vGPU + * + * This function is used to reset resource state owned by a vGPU. + * + */ +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + intel_runtime_pm_get(dev_priv); + _clear_vgpu_fence(vgpu); + intel_runtime_pm_put(dev_priv); +} + +/** * intel_alloc_vgpu_resource - allocate HW resource for a vGPU * @vgpu: vGPU * @param: vGPU creation params diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 711c31c8d8b4..4a6a2ed65732 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, } return 0; } + +/** + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU + * + * @vgpu: a vGPU + * @primary: is the vGPU presented as primary + * + */ +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + u16 *gmch_ctl; + int i; + + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, + info->cfg_space_size); + + if (!primary) { + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = + INTEL_GVT_PCI_CLASS_VGA_OTHER; + } + + /* Show guest that there isn't any stolen memory.*/ + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); + + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, + gvt_aperture_pa_base(gvt), true); + + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO + | PCI_COMMAND_MEMORY + | PCI_COMMAND_MASTER); + /* + * Clear the bar upper 32bit and let guest to assign the new value + */ + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); + + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { + vgpu->cfg_space.bar[i].size = pci_resource_len( + gvt->dev_priv->drm.pdev, i * 2); + vgpu->cfg_space.bar[i].tracked = false; + } +} + +/** + * intel_vgpu_reset_cfg_space - reset vGPU configuration space + * + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) +{ + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != + INTEL_GVT_PCI_CLASS_VGA_OTHER; + + if (cmd & PCI_COMMAND_MEMORY) { + trap_gttmmio(vgpu, false); + map_aperture(vgpu, false); + } + + /** + * Currently we only do such reset when vGPU is not + * owned by any VM, so we simply restore entire cfg + * space to default value. + */ + intel_vgpu_init_cfg_space(vgpu, primary); +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6c5fdf5b2ce2..47dec4acf7ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -240,15 +240,8 @@ static inline int get_pse_type(int type) static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; - u64 pte; -#ifdef readq - pte = readq(addr); -#else - pte = ioread32(addr); - pte |= (u64)ioread32(addr + 4) << 32; -#endif - return pte; + return readq(addr); } static void write_pte64(struct drm_i915_private *dev_priv, @@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, { void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; -#ifdef writeq writeq(pte, addr); -#else - iowrite32((u32)pte, addr); - iowrite32(pte >> 32, addr + 4); -#endif + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); } @@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) info->gtt_entry_size; mem = kzalloc(mm->has_shadow_page_table ? mm->page_table_entry_size * 2 - : mm->page_table_entry_size, - GFP_ATOMIC); + : mm->page_table_entry_size, GFP_KERNEL); if (!mem) return -ENOMEM; mm->virtual_page_table = mem; @@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm; int ret; - mm = kzalloc(sizeof(*mm), GFP_ATOMIC); + mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { ret = -ENOMEM; goto fail; @@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; - struct page *scratch_pt; + void *scratch_pt; unsigned long mfn; int i; - void *p; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; - scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); + scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); if (!scratch_pt) { gvt_err("fail to allocate scratch page\n"); return -ENOMEM; } - p = kmap_atomic(scratch_pt); - mfn = intel_gvt_hypervisor_virt_to_mfn(p); + mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); - kunmap_atomic(p); - __free_page(scratch_pt); + gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); + free_page((unsigned long)scratch_pt); return -EFAULT; } gtt->scratch_pt[type].page_mfn = mfn; - gtt->scratch_pt[type].page = scratch_pt; + gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", vgpu->id, type, mfn); @@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * scratch_pt[type] indicate the scratch pt/scratch page used by the * 'type' pt. * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by - * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self + * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { @@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, se.val64 |= PPAT_CACHED_INDEX; for (i = 0; i < page_entry_num; i++) - ops->set_entry(p, &se, i, false, 0, vgpu); + ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); } - kunmap_atomic(p); - return 0; } @@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; - void *page_addr; + void *page; gvt_dbg_core("init gtt\n"); @@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) return -ENODEV; } - gvt->gtt.scratch_ggtt_page = - alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); - if (!gvt->gtt.scratch_ggtt_page) { + page = (void *)get_zeroed_page(GFP_KERNEL); + if (!page) { gvt_err("fail to allocate scratch ggtt page\n"); return -ENOMEM; } + gvt->gtt.scratch_ggtt_page = virt_to_page(page); - page_addr = page_address(gvt->gtt.scratch_ggtt_page); - - gvt->gtt.scratch_ggtt_mfn = - intel_gvt_hypervisor_virt_to_mfn(page_addr); + gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { gvt_err("fail to translate scratch ggtt page\n"); __free_page(gvt->gtt.scratch_ggtt_page); @@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) for (offset = 0; offset < num_entries; offset++) ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); } + +/** + * intel_vgpu_reset_gtt - reset the all GTT related status + * @vgpu: a vGPU + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset + * + * This function is called from vfio core to reset reset all + * GTT related status, including GGTT, PPGTT, scratch page. + * + */ +void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) +{ + int i; + + ppgtt_free_all_shadow_page(vgpu); + if (!dmlr) + return; + + intel_vgpu_reset_ggtt(vgpu); + + /* clear scratch page for security */ + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { + if (vgpu->gtt.scratch_pt[i].page != NULL) + memset(page_address(vgpu->gtt.scratch_pt[i].page), + 0, PAGE_SIZE); + } +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index b315ab3593ec..f88eb5e89bea 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c3d2fd..e6bf5c533fbe 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); intel_gvt_clean_vgpu_types(gvt); + idr_destroy(&gvt->vgpu_idr); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) gvt_dbg_core("init gvt device\n"); + idr_init(&gvt->vgpu_idr); + mutex_init(&gvt->lock); gvt->dev_priv = dev_priv; @@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ret = intel_gvt_setup_mmio_info(gvt); if (ret) - return ret; + goto out_clean_idr; ret = intel_gvt_load_firmware(gvt); if (ret) @@ -313,6 +317,8 @@ out_free_firmware: intel_gvt_free_firmware(gvt); out_clean_mmio_info: intel_gvt_clean_mmio_info(gvt); +out_clean_idr: + idr_destroy(&gvt->vgpu_idr); kfree(gvt); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0af17016f33f..e227caf5859e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -323,6 +323,7 @@ struct intel_vgpu_creation_params { int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, struct intel_vgpu_creation_params *param); +void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value); @@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); @@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, unsigned long *g_index); +void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, + bool primary); +void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); + int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); @@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); -int setup_vgpu_mmio(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu); struct intel_gvt_ops { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 57fb8e3cbd1f..1d450627ff65 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, static int new_mmio_info(struct intel_gvt *gvt, u32 offset, u32 flags, u32 size, u32 addr_mask, u32 ro_mask, u32 device, - void *read, void *write) + int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), + int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) { struct intel_gvt_mmio_info *info, *p; u32 start, end, i; @@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, default: /*should not hit here*/ gvt_err("invalid forcewake offset 0x%x\n", offset); - return 1; + return -EINVAL; } } else { ack_reg_offset = FORCEWAKE_ACK_HSW_REG; @@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, return 0; } -static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes, unsigned long bitmap) -{ - struct intel_gvt_workload_scheduler *scheduler = - &vgpu->gvt->scheduler; - - vgpu->resetting = true; - - intel_vgpu_stop_schedule(vgpu); - /* - * The current_vgpu will set to NULL after stopping the - * scheduler when the reset is triggered by current vgpu. - */ - if (scheduler->current_vgpu == NULL) { - mutex_unlock(&vgpu->gvt->lock); - intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&vgpu->gvt->lock); - } - - intel_vgpu_reset_execlist(vgpu, bitmap); - - /* full GPU reset */ - if (bitmap == 0xff) { - mutex_unlock(&vgpu->gvt->lock); - intel_vgpu_clean_gtt(vgpu); - mutex_lock(&vgpu->gvt->lock); - setup_vgpu_mmio(vgpu); - populate_pvinfo_page(vgpu); - intel_vgpu_init_gtt(vgpu); - } - - vgpu->resetting = false; - - return 0; -} - static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) + void *p_data, unsigned int bytes) { + unsigned int engine_mask = 0; u32 data; - u64 bitmap = 0; write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); if (data & GEN6_GRDOM_FULL) { gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); - bitmap = 0xff; - } - if (data & GEN6_GRDOM_RENDER) { - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - bitmap |= (1 << RCS); - } - if (data & GEN6_GRDOM_MEDIA) { - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - bitmap |= (1 << VCS); - } - if (data & GEN6_GRDOM_BLT) { - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - bitmap |= (1 << BCS); - } - if (data & GEN6_GRDOM_VECS) { - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - bitmap |= (1 << VECS); - } - if (data & GEN8_GRDOM_MEDIA2) { - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - bitmap |= (1 << VCS2); + engine_mask = ALL_ENGINES; + } else { + if (data & GEN6_GRDOM_RENDER) { + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); + engine_mask |= (1 << RCS); + } + if (data & GEN6_GRDOM_MEDIA) { + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); + engine_mask |= (1 << VCS); + } + if (data & GEN6_GRDOM_BLT) { + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); + engine_mask |= (1 << BCS); + } + if (data & GEN6_GRDOM_VECS) { + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); + engine_mask |= (1 << VECS); + } + if (data & GEN8_GRDOM_MEDIA2) { + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); + if (HAS_BSD2(vgpu->gvt->dev_priv)) + engine_mask |= (1 << VCS2); + } } - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); + + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); + + return 0; } static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, @@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, +static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; @@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int rc = 0; unsigned int id = 0; write_vreg(vgpu, offset, p_data, bytes); @@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, id = VECS; break; default: - rc = -EINVAL; - break; + return -EINVAL; } set_bit(id, (void *)vgpu->tlb_handle_pending); - return rc; + return 0; } static int ring_reset_ctl_write(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faaae07ae487..0c9234a87a20 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) struct intel_vgpu_type *type; struct device *pdev; void *gvt; + int ret; pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; @@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) if (!type) { gvt_err("failed to find type %s to create\n", kobject_name(kobj)); - return -EINVAL; + ret = -EINVAL; + goto out; } vgpu = intel_gvt_ops->vgpu_create(gvt, type); if (IS_ERR_OR_NULL(vgpu)) { - gvt_err("create intel vgpu failed\n"); - return -EINVAL; + ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); + gvt_err("failed to create intel vgpu: %d\n", ret); + goto out; } INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); @@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); - return 0; + ret = 0; + +out: + return ret; } static int intel_vgpu_remove(struct mdev_device *mdev) diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450a1946..4df078bc5d04 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) goto err; - mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); - if (!mmio && !vgpu->mmio.disable_warn_untrack) { - gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", - vgpu->id, offset, bytes, *(u32 *)p_data); - - if (offset == 0x206c) { - gvt_err("------------------------------------------\n"); - gvt_err("vgpu%d: likely triggers a gfx reset\n", - vgpu->id); - gvt_err("------------------------------------------\n"); - vgpu->mmio.disable_warn_untrack = true; - } - } - if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (WARN_ON(!IS_ALIGNED(offset, bytes))) goto err; } + mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); if (mmio) { if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) @@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; } ret = mmio->read(vgpu, offset, p_data, bytes); - } else + } else { ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); + if (!vgpu->mmio.disable_warn_untrack) { + gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", + vgpu->id, offset, bytes, *(u32 *)p_data); + + if (offset == 0x206c) { + gvt_err("------------------------------------------\n"); + gvt_err("vgpu%d: likely triggers a gfx reset\n", + vgpu->id); + gvt_err("------------------------------------------\n"); + vgpu->mmio.disable_warn_untrack = true; + } + } + } + if (ret) goto err; @@ -302,3 +303,56 @@ err: mutex_unlock(&gvt->lock); return ret; } + + +/** + * intel_vgpu_reset_mmio - reset virtual MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + const struct intel_gvt_device_info *info = &gvt->device_info; + + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); + + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; + + /* set the bit 0:2(Core C-State ) to C0 */ + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; +} + +/** + * intel_vgpu_init_mmio - init MMIO space + * @vgpu: a vGPU + * + * Returns: + * Zero on success, negative error code if failed + */ +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); + if (!vgpu->mmio.vreg) + return -ENOMEM; + + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + + intel_vgpu_reset_mmio(vgpu); + + return 0; +} + +/** + * intel_vgpu_clean_mmio - clean MMIO space + * @vgpu: a vGPU + * + */ +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) +{ + vfree(vgpu->mmio.vreg); + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 87d5b5e366a3..3bc620f56f35 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, *offset; \ }) +int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); +void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); + int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 81cd921770c6..d9fb41ab7119 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) vgpu->id)) return -EINVAL; - vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | - GFP_DMA32 | __GFP_ZERO, - INTEL_GVT_OPREGION_PORDER); + vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | + __GFP_ZERO, + get_order(INTEL_GVT_OPREGION_SIZE)); if (!vgpu_opregion(vgpu)->va) return -ENOMEM; @@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { map_vgpu_opregion(vgpu, false); free_pages((unsigned long)vgpu_opregion(vgpu)->va, - INTEL_GVT_OPREGION_PORDER); + get_order(INTEL_GVT_OPREGION_SIZE)); vgpu_opregion(vgpu)->va = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789d8f02..fbd023a16f18 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -50,8 +50,7 @@ #define INTEL_GVT_OPREGION_PARM 0x204 #define INTEL_GVT_OPREGION_PAGES 2 -#define INTEL_GVT_OPREGION_PORDER 1 -#define INTEL_GVT_OPREGION_SIZE (2 * 4096) +#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index fd2b026f7ecd..7ea68a75dc46 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload; + struct intel_vgpu *vgpu; int event; mutex_lock(&gvt->lock); workload = scheduler->current_workload[ring_id]; + vgpu = workload->vgpu; - if (!workload->status && !workload->vgpu->resetting) { + if (!workload->status && !vgpu->resetting) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) for_each_set_bit(event, workload->pending_events, INTEL_GVT_EVENT_MAX) - intel_vgpu_trigger_virtual_event(workload->vgpu, - event); + intel_vgpu_trigger_virtual_event(vgpu, event); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id] = NULL; - atomic_dec(&workload->vgpu->running_workload_num); - list_del_init(&workload->list); workload->complete(workload); + atomic_dec(&vgpu->running_workload_num); wake_up(&scheduler->workload_complete_wq); mutex_unlock(&gvt->lock); } @@ -459,11 +459,11 @@ complete: gvt_dbg_sched("will complete workload %p\n, status: %d\n", workload, workload->status); - complete_current_workload(gvt, ring_id); - if (workload->req) i915_gem_request_put(fetch_and_zero(&workload->req)); + complete_current_workload(gvt, ring_id); + if (need_force_wake) intel_uncore_forcewake_put(gvt->dev_priv, FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9d5777..7295bc8e12fb 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -35,79 +35,6 @@ #include "gvt.h" #include "i915_pvinfo.h" -static void clean_vgpu_mmio(struct intel_vgpu *vgpu) -{ - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; -} - -int setup_vgpu_mmio(struct intel_vgpu *vgpu) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - - if (vgpu->mmio.vreg) - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); - else { - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); - if (!vgpu->mmio.vreg) - return -ENOMEM; - } - - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; - - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); - - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; - - /* set the bit 0:2(Core C-State ) to C0 */ - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; - return 0; -} - -static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) -{ - struct intel_gvt *gvt = vgpu->gvt; - const struct intel_gvt_device_info *info = &gvt->device_info; - u16 *gmch_ctl; - int i; - - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, - info->cfg_space_size); - - if (!param->primary) { - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = - INTEL_GVT_PCI_CLASS_VGA_OTHER; - } - - /* Show guest that there isn't any stolen memory.*/ - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); - - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, - gvt_aperture_pa_base(gvt), true); - - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO - | PCI_COMMAND_MEMORY - | PCI_COMMAND_MASTER); - /* - * Clear the bar upper 32bit and let guest to assign the new value - */ - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); - - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { - vgpu->cfg_space.bar[i].size = pci_resource_len( - gvt->dev_priv->drm.pdev, i * 2); - vgpu->cfg_space.bar[i].tracked = false; - } -} - void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ @@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) if (low_avail / min_low == 0) break; gvt->types[i].low_gm_size = min_low; - gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; + gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].fence = 4; gvt->types[i].max_instance = low_avail / min_low; gvt->types[i].avail_instance = gvt->types[i].max_instance; @@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - + high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; @@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); vfree(vgpu); intel_gvt_update_vgpu_types(gvt); @@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->gvt = gvt; bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); - setup_vgpu_cfg_space(vgpu, param); + intel_vgpu_init_cfg_space(vgpu, param->primary); - ret = setup_vgpu_mmio(vgpu); + ret = intel_vgpu_init_mmio(vgpu); if (ret) - goto out_free_vgpu; + goto out_clean_idr; ret = intel_vgpu_alloc_resource(vgpu, param); if (ret) @@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu: out_clean_vgpu_resource: intel_vgpu_free_resource(vgpu); out_clean_vgpu_mmio: - clean_vgpu_mmio(vgpu); + intel_vgpu_clean_mmio(vgpu); +out_clean_idr: + idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); mutex_unlock(&gvt->lock); @@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, } /** - * intel_gvt_reset_vgpu - reset a virtual GPU + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset + * @vgpu: virtual GPU + * @dmlr: vGPU Device Model Level Reset or GT Reset + * @engine_mask: engines to reset for GT reset + * + * This function is called when user wants to reset a virtual GPU through + * device model reset or GT reset. The caller should hold the gvt lock. + * + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset + * the whole vGPU to default state as when it is created. This vGPU function + * is required both for functionary and security concerns.The ultimate goal + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we + * assign a vGPU to a virtual machine we must isse such reset first. + * + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. + * Unlike the FLR, GT reset only reset particular resource of a vGPU per + * the reset request. Guest driver can issue a GT reset by programming the + * virtual GDRST register to reset specific virtual GPU engine or all + * engines. + * + * The parameter dev_level is to identify if we will do DMLR or GT reset. + * The parameter engine_mask is to specific the engines that need to be + * resetted. If value ALL_ENGINES is given for engine_mask, it means + * the caller requests a full GT reset that we will reset all virtual + * GPU engines. For FLR, engine_mask is ignored. + */ +void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, + unsigned int engine_mask) +{ + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + + gvt_dbg_core("------------------------------------------\n"); + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", + vgpu->id, dmlr, engine_mask); + vgpu->resetting = true; + + intel_vgpu_stop_schedule(vgpu); + /* + * The current_vgpu will set to NULL after stopping the + * scheduler when the reset is triggered by current vgpu. + */ + if (scheduler->current_vgpu == NULL) { + mutex_unlock(&gvt->lock); + intel_gvt_wait_vgpu_idle(vgpu); + mutex_lock(&gvt->lock); + } + + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); + + /* full GPU reset or device model level reset */ + if (engine_mask == ALL_ENGINES || dmlr) { + intel_vgpu_reset_gtt(vgpu, dmlr); + intel_vgpu_reset_resource(vgpu); + intel_vgpu_reset_mmio(vgpu); + populate_pvinfo_page(vgpu); + + if (dmlr) + intel_vgpu_reset_cfg_space(vgpu); + } + + vgpu->resetting = false; + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); + gvt_dbg_core("------------------------------------------\n"); +} + +/** + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) * @vgpu: virtual GPU * * This function is called when user wants to reset a virtual GPU. @@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->lock); + intel_gvt_reset_vgpu_locked(vgpu, true, 0); + mutex_unlock(&vgpu->gvt->lock); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 15f0fc48c442..bd5a38be5b83 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -49,6 +49,7 @@ #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ #include <drm/drm_gem.h> #include <drm/drm_auth.h> +#include <drm/drm_cache.h> #include "i915_params.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ac25706b7d4d..b44e9466d394 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11107,7 +11107,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, return PTR_ERR(plane_state); if (mode) - drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); + drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); else hdisplay = vdisplay = 0; @@ -12997,7 +12997,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, * computation to clearly distinguish it from the adjusted mode, which * can be changed by the connectors in the below retry loop. */ - drm_crtc_get_hv_timing(&pipe_config->base.mode, + drm_mode_get_hv_timing(&pipe_config->base.mode, &pipe_config->pipe_src_w, &pipe_config->pipe_src_h); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0fafbec6dacf..2ab0192595c2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1732,7 +1732,9 @@ found: * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry */ pipe_config->limited_color_range = - bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1; + bpp != 18 && + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; } else { pipe_config->limited_color_range = intel_dp->limited_color_range; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 29a9af177734..95267fcef71b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -592,7 +592,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba /* create encoders */ intel_dp_create_fake_mst_encoders(intel_dig_port); - ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id); + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev, + &intel_dp->aux, 16, 3, conn_base_id); if (ret) { intel_dp->can_mst = false; return ret; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 0bcfead14571..af16b0fa6b69 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -455,24 +455,23 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; union hdmi_infoframe frame; int ret; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - &crtc_state->base.adjusted_mode); + adjusted_mode); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); return; } - if (intel_hdmi->rgb_quant_range_selectable) { - if (crtc_state->limited_color_range) - frame.avi.quantization_range = - HDMI_QUANTIZATION_RANGE_LIMITED; - else - frame.avi.quantization_range = - HDMI_QUANTIZATION_RANGE_FULL; - } + drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode, + crtc_state->limited_color_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL, + intel_hdmi->rgb_quant_range_selectable); intel_write_infoframe(encoder, crtc_state, &frame); } @@ -1330,7 +1329,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, /* See CEA-861-E - 5.1 Default Encoding Parameters */ pipe_config->limited_color_range = pipe_config->has_hdmi_sink && - drm_match_cea_mode(adjusted_mode) > 1; + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; } else { pipe_config->limited_color_range = intel_hdmi->limited_color_range; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 3d546c019de0..b62e3f8ad415 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable_locked(dev); + drm_kms_helper_poll_enable(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } @@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) } if (enabled) - drm_kms_helper_poll_enable_locked(dev); + drm_kms_helper_poll_enable(dev); mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 359cd2765552..f645275e6e63 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -207,8 +207,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct drm_encoder *encoder; struct imx_hdmi *hdmi; - struct resource *iores; - int irq; int ret; if (!pdev->dev.of_node) @@ -223,14 +221,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, hdmi->dev = &pdev->dev; encoder = &hdmi->encoder; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iores) - return -ENXIO; - encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); /* * If we failed to find the CRTC(s) which this encoder is @@ -249,7 +239,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); - ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); + ret = dw_hdmi_bind(pdev, encoder, plat_data); /* * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(), @@ -264,7 +254,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, static void dw_hdmi_imx_unbind(struct device *dev, struct device *master, void *data) { - return dw_hdmi_unbind(dev, master, data); + return dw_hdmi_unbind(dev); } static const struct component_ops dw_hdmi_imx_ops = { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 01a21dd835b5..a73de1e669c2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -170,8 +170,8 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe) { - struct mtk_drm_private *priv = drm->dev_private; - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]); + struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe); + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); @@ -181,8 +181,8 @@ int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe) void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe) { - struct mtk_drm_private *priv = drm->dev_private; - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]); + struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe); + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; mtk_ddp_comp_disable_vblank(ovl); @@ -588,7 +588,6 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, goto unprepare; drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); - priv->crtc[pipe] = &mtk_crtc->base; priv->num_pipes++; return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index aa9389446785..df322a7a5fcb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -32,7 +32,6 @@ struct mtk_drm_private { struct drm_device *drm; struct device *dma_dev; - struct drm_crtc *crtc[MAX_CRTC]; unsigned int num_pipes; struct device_node *mutex_node; diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 642b2fab42ff..a32d3b6e2e12 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane, struct drm_crtc_state *crtc_state; struct drm_rect clip = { 0, }; + if (!state->crtc) + return 0; + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index d836b2274531..f7c870172220 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c @@ -38,6 +38,11 @@ * - TV Panel encoding via ENCT */ +/* HHI Registers */ +#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ +#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */ + struct meson_cvbs_enci_mode meson_cvbs_enci_pal = { .mode_tag = MESON_VENC_MODE_CVBS_PAL, .hso_begin = 3, @@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv) void meson_venc_init(struct meson_drm *priv) { + /* Disable CVBS VDAC */ + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); + + /* Power Down Dacs */ + writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING)); + + /* Disable HDMI PHY */ + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0); + + /* Disable HDMI */ + writel_bits_relaxed(0x3, 0, + priv->io_base + _REG(VPU_HDMI_SETTING)); + /* Disable all encoders */ writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c index c809c085fd78..a2bcc70a03ef 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c @@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder) /* Disable CVBS VDAC */ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); } static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index a1d8dd15b131..1ffdafea27e4 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -392,6 +392,24 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) drm_mga_private_t *dev_priv; int ret; + /* There are PCI versions of the G450. These cards have the + * same PCI ID as the AGP G450, but have an additional PCI-to-PCI + * bridge chip. We detect these cards, which are not currently + * supported by this driver, by looking at the device ID of the + * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the + * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the + * device. + */ + if ((dev->pdev->device == 0x0525) && dev->pdev->bus->self + && (dev->pdev->bus->self->vendor == 0x3388) + && (dev->pdev->bus->self->device == 0x0021) + && dev->agp) { + /* FIXME: This should be quirked in the pci core, but oh well + * the hw probably stopped existing. */ + arch_phys_wc_del(dev->agp->agp_mtrr); + kfree(dev->agp); + dev->agp = NULL; + } dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL); if (!dev_priv) return -ENOMEM; @@ -698,7 +716,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev, static int mga_do_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t *dma_bs) { - const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev); + const int is_agp = (dma_bs->agp_mode != 0) && dev->agp; int err; drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 25b2a1a424e6..63ba0699d107 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -37,8 +37,6 @@ #include <drm/drm_pciids.h> -static int mga_driver_device_is_agp(struct drm_device *dev); - static struct pci_device_id pciidlist[] = { mga_PCI_IDS }; @@ -66,7 +64,6 @@ static struct drm_driver driver = { .lastclose = mga_driver_lastclose, .set_busid = drm_pci_set_busid, .dma_quiescent = mga_driver_dma_quiescent, - .device_is_agp = mga_driver_device_is_agp, .get_vblank_counter = mga_get_vblank_counter, .enable_vblank = mga_enable_vblank, .disable_vblank = mga_disable_vblank, @@ -107,37 +104,3 @@ module_exit(mga_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); - -/** - * Determine if the device really is AGP or not. - * - * In addition to the usual tests performed by \c drm_device_is_agp, this - * function detects PCI G450 cards that appear to the system exactly like - * AGP G450 cards. - * - * \param dev The device to be tested. - * - * \returns - * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. - */ -static int mga_driver_device_is_agp(struct drm_device *dev) -{ - const struct pci_dev *const pdev = dev->pdev; - - /* There are PCI versions of the G450. These cards have the - * same PCI ID as the AGP G450, but have an additional PCI-to-PCI - * bridge chip. We detect these cards, which are not currently - * supported by this driver, by looking at the device ID of the - * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the - * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the - * device. - */ - - if ((pdev->device == 0x0525) && pdev->bus->self - && (pdev->bus->self->vendor == 0x3388) - && (pdev->bus->self->device == 0x0021)) { - return 0; - } - - return 2; -} diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index b0b874264f9d..9ac007880328 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -36,6 +36,7 @@ static const struct pci_device_id pciidlist[] = { { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH }, { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER }, { PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 }, + { PCI_VENDOR_ID_MATROX, 0x538, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH3 }, {0,} }; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 0d6e998d63e6..c88b6ec88dd2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -180,6 +180,7 @@ enum mga_type { G200_WB, G200_EV, G200_EH, + G200_EH3, G200_ER, G200_EW3, }; diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 10535e3b75f2..77d1c4771786 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -106,6 +106,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev) clock = 2; break; case G200_EH: + case G200_EH3: case G200_ER: data = 2; clock = 1; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 067dfbc91b1c..3938120e5051 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -497,34 +497,70 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) bool pll_locked = false; m = n = p = 0; - vcomax = 800000; - vcomin = 400000; - pllreffreq = 33333; - delta = 0xffffffff; + if (mdev->type == G200_EH3) { + vcomax = 3000000; + vcomin = 1500000; + pllreffreq = 25000; - for (testp = 16; testp > 0; testp >>= 1) { - if (clock * testp > vcomax) - continue; - if (clock * testp < vcomin) - continue; + delta = 0xffffffff; - for (testm = 1; testm < 33; testm++) { - for (testn = 17; testn < 257; testn++) { - computed = (pllreffreq * testn) / - (testm * testp); + testp = 0; + + for (testm = 150; testm >= 6; testm--) { + if (clock * testm > vcomax) + continue; + if (clock * testm < vcomin) + continue; + for (testn = 120; testn >= 60; testn--) { + computed = (pllreffreq * testn) / testm; if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; - n = testn - 1; - m = (testm - 1); - p = testp - 1; + n = testn; + m = testm; + p = testp; + } + if (delta == 0) + break; + } + if (delta == 0) + break; + } + } else { + + vcomax = 800000; + vcomin = 400000; + pllreffreq = 33333; + + delta = 0xffffffff; + + for (testp = 16; testp > 0; testp >>= 1) { + if (clock * testp > vcomax) + continue; + if (clock * testp < vcomin) + continue; + + for (testm = 1; testm < 33; testm++) { + for (testn = 17; testn < 257; testn++) { + computed = (pllreffreq * testn) / + (testm * testp); + if (computed > clock) + tmpdelta = computed - clock; + else + tmpdelta = clock - computed; + if (tmpdelta < delta) { + delta = tmpdelta; + n = testn - 1; + m = (testm - 1); + p = testp - 1; + } + if ((clock * testp) >= 600000) + p |= 0x80; } - if ((clock * testp) >= 600000) - p |= 0x80; } } } @@ -674,6 +710,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock) return mga_g200ev_set_plls(mdev, clock); break; case G200_EH: + case G200_EH3: return mga_g200eh_set_plls(mdev, clock); break; case G200_ER: @@ -933,6 +970,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, option2 = 0x0000b000; break; case G200_EH: + case G200_EH3: dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS; option = 0x00000120; @@ -979,7 +1017,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH || - mdev->type == G200_EW3) && + mdev->type == G200_EW3 || + mdev->type == G200_EH3) && (i >= 0x44) && (i <= 0x4e)) continue; diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 5e20220ef4c6..657598bb1e6b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -236,8 +236,6 @@ struct ttm_bo_driver mgag200_bo_driver = { .verify_access = mgag200_bo_verify_access, .io_mem_reserve = &mgag200_ttm_io_mem_reserve, .io_mem_free = &mgag200_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int mgag200_mm_init(struct mga_device *mdev) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index d96b2b6898a3..7f78da695dff 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,6 +4,7 @@ config DRM_MSM depends on DRM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK + depends on MMU select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a18126150e11..686a580c711a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, void adreno_flush(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - uint32_t wptr = get_wptr(gpu->rb); + uint32_t wptr; + + /* + * Mask wptr value that we calculate to fit in the HW range. This is + * to account for the possibility that the last command fit exactly into + * the ringbuffer and rb->next hasn't wrapped to zero yet + */ + wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); /* ensure writes to ringbuffer have hit system memory: */ mb(); @@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; - struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; @@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - mmu = gpu->aspace->mmu; - if (mmu) { + if (gpu->aspace && gpu->aspace->mmu) { + struct msm_mmu *mmu = gpu->aspace->mmu; ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5f6cd8745dbc..c396d459a9d0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct drm_plane *plane; - struct drm_plane_state *plane_state; - - for_each_plane_in_state(state, plane, plane_state, i) - mdp5_plane_complete_commit(plane, plane_state); if (mdp5_kms->smp) mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 17b0cc101171..cdfc63d90c7b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -104,8 +104,6 @@ struct mdp5_plane_state { /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; - - bool pending : 1; }; #define to_mdp5_plane_state(x) \ container_of(x, struct mdp5_plane_state, base) @@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 75247ea4335b..b9fb111d3428 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, drm_printf(p, "\tzpos=%u\n", pstate->zpos); drm_printf(p, "\talpha=%u\n", pstate->alpha); drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); - drm_printf(p, "\tpending=%u\n", pstate->pending); } static void mdp5_plane_reset(struct drm_plane *plane) @@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) if (mdp5_state && mdp5_state->base.fb) drm_framebuffer_reference(mdp5_state->base.fb); - mdp5_state->pending = false; - return &mdp5_state->base; } @@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); - /* We don't allow faster-than-vblank updates.. if we did add this - * some day, we would need to disallow in cases where hwpipe - * changes - */ - if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) - return -EBUSY; - max_width = config->hw->lm.max_width << 16; max_height = config->hw->lm.max_height << 16; @@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct drm_plane_state *state = plane->state; - struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); DBG("%s: update", plane->name); - mdp5_state->pending = true; - if (plane_enabled(state)) { int ret; @@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return pstate->hwpipe->flush_mask; } -/* called after vsync in thread context */ -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - - pstate->pending = false; -} - /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) { diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 5d68ab362d75..f8a587eac6b8 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -174,10 +174,8 @@ fail_unlock: fail: if (ret) { - if (fb) { - drm_framebuffer_unregister_private(fb); + if (fb) drm_framebuffer_remove(fb); - } } return ret; @@ -247,7 +245,6 @@ void msm_fbdev_free(struct drm_device *dev) /* this will free the backing object */ if (fbdev->fb) { msm_gem_put_vaddr(fbdev->bo); - drm_framebuffer_unregister_private(fbdev->fb); drm_framebuffer_remove(fbdev->fb); } diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8bc59c7e261..8098677a3916 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { + if (!priv->aspace[id]) + continue; msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id], msm_obj->sgt); } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 166e84e4f0d4..489676568a10 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, pagefault_disable(); } - if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { + if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || + !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) { DRM_ERROR("invalid flags: %x\n", submit_bo.flags); ret = -EINVAL; goto out_unlock; @@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob { uint32_t i, last_offset = 0; uint32_t *ptr; - int ret; + int ret = 0; if (offset % 4) { DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); @@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); if (ret) - return -EFAULT; + goto out; if (submit_reloc.submit_offset % 4) { DRM_ERROR("non-aligned reloc offset: %u\n", submit_reloc.submit_offset); - return -EINVAL; + ret = -EINVAL; + goto out; } /* offset in dwords: */ @@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob if ((off >= (obj->base.size / 4)) || (off < last_offset)) { DRM_ERROR("invalid offset %u at reloc %u\n", off, i); - return -EINVAL; + ret = -EINVAL; + goto out; } ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); if (ret) - return ret; + goto out; if (valid) continue; @@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob last_offset = off; } +out: msm_gem_put_vaddr_locked(&obj->base); - return 0; + return ret; } static void submit_cleanup(struct msm_gem_submit *submit) diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index f326cf6a32e6..67b34e069abf 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) struct msm_ringbuffer *ring; int ret; - size = ALIGN(size, 4); /* size should be dword aligned */ + if (WARN_ON(!is_power_of_2(size))) + return ERR_PTR(-EINVAL); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 7bd4683216d0..4df4f6ed4886 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -199,7 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) if (!nvxx_device(device)->func->pci) getparam->value = 3; else - if (drm_pci_device_is_agp(dev)) + if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) getparam->value = 0; else if (!pci_is_pcie(dev->pdev)) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index dd07ca140d12..8a528ebe30f3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1194,7 +1194,8 @@ out: } static void -nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) +nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, + struct ttm_mem_reg *new_mem) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nvkm_vma *vma; @@ -1570,8 +1571,6 @@ struct ttm_bo_driver nouveau_bo_driver = { .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; struct nvkm_vma * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index add353e230f4..6b570079d185 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -58,27 +58,30 @@ int nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe) { struct drm_crtc *crtc; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - if (nv_crtc->index == pipe) { - nvif_notify_get(&nv_crtc->vblank); - return 0; - } - } - return -EINVAL; + struct nouveau_crtc *nv_crtc; + + crtc = drm_crtc_from_index(dev, pipe); + if (!crtc) + return -EINVAL; + + nv_crtc = nouveau_crtc(crtc); + nvif_notify_get(&nv_crtc->vblank); + + return 0; } void nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe) { struct drm_crtc *crtc; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - if (nv_crtc->index == pipe) { - nvif_notify_put(&nv_crtc->vblank); - return; - } - } + struct nouveau_crtc *nv_crtc; + + crtc = drm_crtc_from_index(dev, pipe); + if (!crtc) + return; + + nv_crtc = nouveau_crtc(crtc); + nvif_notify_put(&nv_crtc->vblank); } static inline int diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index cb85cb72dc1c..452da483ca01 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3417,7 +3417,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, mstm->outp = outp; mstm->mgr.cbs = &nv50_mstm; - ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev->dev, aux, aux_max, + ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max, max_payloads, conn_base_id); if (ret) return ret; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index dc026a843712..a2bb855a2851 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1253,7 +1253,7 @@ static int dsicm_probe(struct platform_device *pdev) dsicm_hw_reset(ddata); if (ddata->use_dsi_backlight) { - memset(&props, 0, sizeof(struct backlight_properties)); + memset(&props, 0, sizeof(props)); props.max_brightness = 255; props.type = BACKLIGHT_RAW; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 746cb8d9cba1..5ab39e0060f2 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -909,6 +909,7 @@ static struct spi_driver acx565akm_driver = { module_spi_driver(acx565akm_driver); +MODULE_ALIAS("spi:sony,acx565akm"); MODULE_AUTHOR("Nokia Corporation"); MODULE_DESCRIPTION("acx565akm LCD Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index c839f6456db2..5554b72cf56a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -620,6 +620,19 @@ u32 dispc_wb_get_framedone_irq(void) return DISPC_IRQ_FRAMEDONEWB; } +void dispc_mgr_enable(enum omap_channel channel, bool enable) +{ + mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); + /* flush posted write */ + mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); +} +EXPORT_SYMBOL(dispc_mgr_enable); + +static bool dispc_mgr_is_enabled(enum omap_channel channel) +{ + return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); +} + bool dispc_mgr_go_busy(enum omap_channel channel) { return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; @@ -2901,20 +2914,6 @@ enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channe } EXPORT_SYMBOL(dispc_mgr_get_supported_outputs); -void dispc_mgr_enable(enum omap_channel channel, bool enable) -{ - mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); - /* flush posted write */ - mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); -} -EXPORT_SYMBOL(dispc_mgr_enable); - -bool dispc_mgr_is_enabled(enum omap_channel channel) -{ - return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); -} -EXPORT_SYMBOL(dispc_mgr_is_enabled); - void dispc_wb_enable(bool enable) { dispc_ovl_enable(OMAP_DSS_WB, enable); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index f060bda31235..f74615d005a8 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4336,7 +4336,7 @@ static void print_dsi_vm(const char *str, wc = DIV_ROUND_UP(t->hact * t->bitspp, 8); pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */ - bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch; + bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp; tot = bl + pps; #define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk)) @@ -4345,14 +4345,14 @@ static void print_dsi_vm(const char *str, "%u/%u/%u/%u/%u/%u = %u + %u = %u\n", str, byteclk, - t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch, + t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp, bl, pps, tot, TO_DSI_T(t->hss), TO_DSI_T(t->hsa), TO_DSI_T(t->hse), TO_DSI_T(t->hbp), TO_DSI_T(pps), - TO_DSI_T(t->hfront_porch), + TO_DSI_T(t->hfp), TO_DSI_T(bl), TO_DSI_T(pps), @@ -4367,7 +4367,7 @@ static void print_dispc_vm(const char *str, const struct videomode *vm) int hact, bl, tot; hact = vm->hactive; - bl = vm->hsync_len + vm->hbp + vm->hfront_porch; + bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch; tot = hact + bl; #define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck)) @@ -4376,10 +4376,10 @@ static void print_dispc_vm(const char *str, const struct videomode *vm) "%u/%u/%u/%u = %u + %u = %u\n", str, pck, - vm->hsync_len, vm->hbp, hact, vm->hfront_porch, + vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch, bl, hact, tot, TO_DISPC_T(vm->hsync_len), - TO_DISPC_T(vm->hbp), + TO_DISPC_T(vm->hback_porch), TO_DISPC_T(hact), TO_DISPC_T(vm->hfront_porch), TO_DISPC_T(bl), @@ -4401,12 +4401,12 @@ static void print_dsi_dispc_vm(const char *str, dsi_tput = (u64)byteclk * t->ndl * 8; pck = (u32)div64_u64(dsi_tput, t->bitspp); dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl); - dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch; + dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp; vm.pixelclock = pck; vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk); - vm.hbp = div64_u64((u64)t->hbp * pck, byteclk); - vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk); + vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk); + vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk); vm.hactive = t->hact; print_dispc_vm(str, &vm); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 136d30484d02..bf626acae271 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -119,8 +119,7 @@ static void __init omapdss_omapify_node(struct device_node *node) static void __init omapdss_add_to_list(struct device_node *node, bool root) { - struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), - GFP_KERNEL); + struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL); if (n) { n->node = node; n->root = root; diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index b420dde8c0fb..5b3b961127bd 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -856,7 +856,6 @@ int dispc_runtime_get(void); void dispc_runtime_put(void); void dispc_mgr_enable(enum omap_channel channel, bool enable); -bool dispc_mgr_is_enabled(enum omap_channel channel); u32 dispc_mgr_get_vsync_irq(enum omap_channel channel); u32 dispc_mgr_get_framedone_irq(enum omap_channel channel); u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 2580e8673908..f90e2d22c5ec 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -162,7 +162,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector, dssdrv->get_timings(dssdev, &t); - if (memcmp(&vm, &t, sizeof(struct videomode))) + if (memcmp(&vm, &t, sizeof(vm))) r = -EINVAL; else r = 0; @@ -217,7 +217,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, omap_dss_get_device(dssdev); - omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL); + omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) goto fail; @@ -240,8 +240,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_connector_register(connector); - return connector; fail: diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 8dea89030e66..dd47dc191e6b 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -36,26 +36,18 @@ struct omap_crtc { struct videomode vm; - struct omap_drm_irq vblank_irq; - struct omap_drm_irq error_irq; - bool ignore_digit_sync_lost; + bool enabled; bool pending; wait_queue_head_t pending_wait; + struct drm_pending_vblank_event *event; }; /* ----------------------------------------------------------------------------- * Helper Functions */ -uint32_t pipe2vbl(struct drm_crtc *crtc) -{ - struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - - return dispc_mgr_get_vsync_irq(omap_crtc->channel); -} - struct videomode *omap_crtc_timings(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -68,6 +60,19 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc) return omap_crtc->channel; } +static bool omap_crtc_is_pending(struct drm_crtc *crtc) +{ + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + unsigned long flags; + bool pending; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + pending = omap_crtc->pending; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + return pending; +} + int omap_crtc_wait_pending(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -77,7 +82,7 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc) * a single frame refresh even on slower displays. */ return wait_event_timeout(omap_crtc->pending_wait, - !omap_crtc->pending, + !omap_crtc_is_pending(crtc), msecs_to_jiffies(250)); } @@ -135,14 +140,15 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) u32 framedone_irq, vsync_irq; int ret; + if (WARN_ON(omap_crtc->enabled == enable)) + return; + if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) { dispc_mgr_enable(channel, enable); + omap_crtc->enabled = enable; return; } - if (dispc_mgr_is_enabled(channel) == enable) - return; - if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) { /* * Digit output produces some sync lost interrupts during the @@ -173,6 +179,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) } dispc_mgr_enable(channel, enable); + omap_crtc->enabled = enable; ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); if (ret) { @@ -259,26 +266,9 @@ static const struct dss_mgr_ops mgr_ops = { * Setup, Flush and Page Flip */ -static void omap_crtc_complete_page_flip(struct drm_crtc *crtc) +void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus) { - struct drm_pending_vblank_event *event; - struct drm_device *dev = crtc->dev; - unsigned long flags; - - event = crtc->state->event; - - if (!event) - return; - - spin_lock_irqsave(&dev->event_lock, flags); - drm_crtc_send_vblank_event(crtc, event); - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) -{ - struct omap_crtc *omap_crtc = - container_of(irq, struct omap_crtc, error_irq); + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); if (omap_crtc->ignore_digit_sync_lost) { irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT; @@ -289,29 +279,38 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus); } -static void omap_crtc_vblank_irq(struct omap_drm_irq *irq, uint32_t irqstatus) +void omap_crtc_vblank_irq(struct drm_crtc *crtc) { - struct omap_crtc *omap_crtc = - container_of(irq, struct omap_crtc, vblank_irq); - struct drm_device *dev = omap_crtc->base.dev; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + bool pending; - if (dispc_mgr_go_busy(omap_crtc->channel)) + spin_lock(&crtc->dev->event_lock); + /* + * If the dispc is busy we're racing the flush operation. Try again on + * the next vblank interrupt. + */ + if (dispc_mgr_go_busy(omap_crtc->channel)) { + spin_unlock(&crtc->dev->event_lock); return; + } - DBG("%s: apply done", omap_crtc->name); - - __omap_irq_unregister(dev, &omap_crtc->vblank_irq); + /* Send the vblank event if one has been requested. */ + if (omap_crtc->event) { + drm_crtc_send_vblank_event(crtc, omap_crtc->event); + omap_crtc->event = NULL; + } - rmb(); - WARN_ON(!omap_crtc->pending); + pending = omap_crtc->pending; omap_crtc->pending = false; - wmb(); + spin_unlock(&crtc->dev->event_lock); - /* wake up userspace */ - omap_crtc_complete_page_flip(&omap_crtc->base); + if (pending) + drm_crtc_vblank_put(crtc); - /* wake up omap_atomic_complete */ + /* Wake up omap_atomic_complete. */ wake_up(&omap_crtc->pending_wait); + + DBG("%s: apply done", omap_crtc->name); } /* ----------------------------------------------------------------------------- @@ -324,9 +323,6 @@ static void omap_crtc_destroy(struct drm_crtc *crtc) DBG("%s", omap_crtc->name); - WARN_ON(omap_crtc->vblank_irq.registered); - omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); - drm_crtc_cleanup(crtc); kfree(omap_crtc); @@ -335,17 +331,18 @@ static void omap_crtc_destroy(struct drm_crtc *crtc) static void omap_crtc_enable(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + int ret; DBG("%s", omap_crtc->name); - rmb(); + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_vblank_on(crtc); + ret = drm_crtc_vblank_get(crtc); + WARN_ON(ret != 0); + WARN_ON(omap_crtc->pending); omap_crtc->pending = true; - wmb(); - - omap_irq_register(crtc->dev, &omap_crtc->vblank_irq); - - drm_crtc_vblank_on(crtc); + spin_unlock_irq(&crtc->dev->event_lock); } static void omap_crtc_disable(struct drm_crtc *crtc) @@ -390,16 +387,15 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc, } static void omap_crtc_atomic_begin(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_crtc_state *old_crtc_state) { } static void omap_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) + struct drm_crtc_state *old_crtc_state) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - - WARN_ON(omap_crtc->vblank_irq.registered); + int ret; if (crtc->state->color_mgmt_changed) { struct drm_color_lut *lut = NULL; @@ -414,18 +410,30 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, dispc_mgr_set_gamma(omap_crtc->channel, lut, length); } - if (dispc_mgr_is_enabled(omap_crtc->channel)) { + /* + * Only flush the CRTC if it is currently enabled. CRTCs that require a + * mode set are disabled prior plane updates and enabled afterwards. + * They are thus not active (regardless of what their CRTC core state + * reports) and the DRM core could thus call this function even though + * the CRTC is currently disabled. Do nothing in that case. + */ + if (!omap_crtc->enabled) + return; + + DBG("%s: GO", omap_crtc->name); - DBG("%s: GO", omap_crtc->name); + ret = drm_crtc_vblank_get(crtc); + WARN_ON(ret != 0); - rmb(); - WARN_ON(omap_crtc->pending); - omap_crtc->pending = true; - wmb(); + spin_lock_irq(&crtc->dev->event_lock); + dispc_mgr_go(omap_crtc->channel); - dispc_mgr_go(omap_crtc->channel); - omap_irq_register(crtc->dev, &omap_crtc->vblank_irq); - } + WARN_ON(omap_crtc->pending); + omap_crtc->pending = true; + + if (crtc->state->event) + omap_crtc->event = crtc->state->event; + spin_unlock_irq(&crtc->dev->event_lock); } static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc, @@ -546,14 +554,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, omap_crtc->channel = channel; omap_crtc->name = channel_names[channel]; - omap_crtc->vblank_irq.irqmask = pipe2vbl(crtc); - omap_crtc->vblank_irq.irq = omap_crtc_vblank_irq; - - omap_crtc->error_irq.irqmask = - dispc_mgr_get_sync_lost_irq(channel); - omap_crtc->error_irq.irq = omap_crtc_error_irq; - omap_irq_register(dev, &omap_crtc->error_irq); - ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, &omap_crtc_funcs, NULL); if (ret < 0) { diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index bf65862daf62..19b716745623 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -123,13 +123,4 @@ int omap_debugfs_init(struct drm_minor *minor) return ret; } -void omap_debugfs_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(omap_debugfs_list, - ARRAY_SIZE(omap_debugfs_list), minor); - if (dmm_is_available()) - drm_debugfs_remove_files(omap_dmm_debugfs_list, - ARRAY_SIZE(omap_dmm_debugfs_list), minor); -} - #endif diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index 4ceed7a9762f..3cab06661a08 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -224,7 +224,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, int rows = (1 + area->y1 - area->y0); int i = columns*rows; - pat = alloc_dma(txn, sizeof(struct pat), &pat_pa); + pat = alloc_dma(txn, sizeof(*pat), &pat_pa); if (txn->last_pat) txn->last_pat->next_pa = (uint32_t)pat_pa; @@ -735,7 +735,7 @@ static int omap_dmm_probe(struct platform_device *dev) /* alloc engines */ omap_dmm->engines = kcalloc(omap_dmm->num_engines, - sizeof(struct refill_engine), GFP_KERNEL); + sizeof(*omap_dmm->engines), GFP_KERNEL); if (!omap_dmm->engines) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 4fd2e1799a88..afe8f05b927b 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -96,7 +96,8 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit) dispc_runtime_get(); drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, 0); + drm_atomic_helper_commit_planes(dev, old_state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_modeset_enables(dev, old_state); omap_atomic_wait_for_completion(dev, old_state); @@ -315,8 +316,6 @@ static int omap_modeset_init(struct drm_device *dev) drm_mode_config_init(dev); - omap_drm_irq_install(dev); - ret = omap_modeset_init_properties(dev); if (ret < 0) return ret; @@ -489,12 +488,9 @@ static int omap_modeset_init(struct drm_device *dev) drm_mode_config_reset(dev); - return 0; -} + omap_drm_irq_install(dev); -static void omap_modeset_free(struct drm_device *dev) -{ - drm_mode_config_cleanup(dev); + return 0; } /* @@ -632,93 +628,6 @@ static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = * drm driver funcs */ -/** - * load - setup chip and create an initial config - * @dev: DRM device - * @flags: startup flags - * - * The driver load routine has to do several things: - * - initialize the memory manager - * - allocate initial config memory - * - setup the DRM framebuffer with the allocated memory - */ -static int dev_load(struct drm_device *dev, unsigned long flags) -{ - struct omap_drm_platform_data *pdata = dev->dev->platform_data; - struct omap_drm_private *priv; - unsigned int i; - int ret; - - DBG("load: dev=%p", dev); - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->omaprev = pdata->omaprev; - - dev->dev_private = priv; - - priv->wq = alloc_ordered_workqueue("omapdrm", 0); - init_waitqueue_head(&priv->commit.wait); - spin_lock_init(&priv->commit.lock); - - spin_lock_init(&priv->list_lock); - INIT_LIST_HEAD(&priv->obj_list); - - omap_gem_init(dev); - - ret = omap_modeset_init(dev); - if (ret) { - dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret); - dev->dev_private = NULL; - kfree(priv); - return ret; - } - - /* Initialize vblank handling, start with all CRTCs disabled. */ - ret = drm_vblank_init(dev, priv->num_crtcs); - if (ret) - dev_warn(dev->dev, "could not init vblank\n"); - - for (i = 0; i < priv->num_crtcs; i++) - drm_crtc_vblank_off(priv->crtcs[i]); - - priv->fbdev = omap_fbdev_init(dev); - - /* store off drm_device for use in pm ops */ - dev_set_drvdata(dev->dev, dev); - - drm_kms_helper_poll_init(dev); - - return 0; -} - -static void dev_unload(struct drm_device *dev) -{ - struct omap_drm_private *priv = dev->dev_private; - - DBG("unload: dev=%p", dev); - - drm_kms_helper_poll_fini(dev); - - if (priv->fbdev) - omap_fbdev_free(dev); - - omap_modeset_free(dev); - omap_gem_deinit(dev); - - destroy_workqueue(priv->wq); - - drm_vblank_cleanup(dev); - omap_drm_irq_uninstall(dev); - - kfree(dev->dev_private); - dev->dev_private = NULL; - - dev_set_drvdata(dev->dev, NULL); -} - static int dev_open(struct drm_device *dev, struct drm_file *file) { file->driver_priv = NULL; @@ -803,8 +712,6 @@ static const struct file_operations omapdriver_fops = { static struct drm_driver omap_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .load = dev_load, - .unload = dev_unload, .open = dev_open, .lastclose = dev_lastclose, .get_vblank_counter = drm_vblank_no_hw_counter, @@ -812,7 +719,6 @@ static struct drm_driver omap_drm_driver = { .disable_vblank = omap_irq_disable_vblank, #ifdef CONFIG_DEBUG_FS .debugfs_init = omap_debugfs_init, - .debugfs_cleanup = omap_debugfs_cleanup, #endif .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, @@ -834,30 +740,125 @@ static struct drm_driver omap_drm_driver = { .patchlevel = DRIVER_PATCHLEVEL, }; -static int pdev_probe(struct platform_device *device) +static int pdev_probe(struct platform_device *pdev) { - int r; + struct omap_drm_platform_data *pdata = pdev->dev.platform_data; + struct omap_drm_private *priv; + struct drm_device *ddev; + unsigned int i; + int ret; + + DBG("%s", pdev->name); if (omapdss_is_initialized() == false) return -EPROBE_DEFER; omap_crtc_pre_init(); - r = omap_connect_dssdevs(); - if (r) { - omap_crtc_pre_uninit(); - return r; + ret = omap_connect_dssdevs(); + if (ret) + goto err_crtc_uninit; + + /* Allocate and initialize the driver private structure. */ + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_disconnect_dssdevs; } - DBG("%s", device->name); - return drm_platform_init(&omap_drm_driver, device); + priv->omaprev = pdata->omaprev; + priv->wq = alloc_ordered_workqueue("omapdrm", 0); + + init_waitqueue_head(&priv->commit.wait); + spin_lock_init(&priv->commit.lock); + spin_lock_init(&priv->list_lock); + INIT_LIST_HEAD(&priv->obj_list); + + /* Allocate and initialize the DRM device. */ + ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev); + if (IS_ERR(ddev)) { + ret = PTR_ERR(ddev); + goto err_free_priv; + } + + ddev->dev_private = priv; + platform_set_drvdata(pdev, ddev); + + omap_gem_init(ddev); + + ret = omap_modeset_init(ddev); + if (ret) { + dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret); + goto err_free_drm_dev; + } + + /* Initialize vblank handling, start with all CRTCs disabled. */ + ret = drm_vblank_init(ddev, priv->num_crtcs); + if (ret) { + dev_err(&pdev->dev, "could not init vblank\n"); + goto err_cleanup_modeset; + } + + for (i = 0; i < priv->num_crtcs; i++) + drm_crtc_vblank_off(priv->crtcs[i]); + + priv->fbdev = omap_fbdev_init(ddev); + + drm_kms_helper_poll_init(ddev); + + /* + * Register the DRM device with the core and the connectors with + * sysfs. + */ + ret = drm_dev_register(ddev, 0); + if (ret) + goto err_cleanup_helpers; + + return 0; + +err_cleanup_helpers: + drm_kms_helper_poll_fini(ddev); + if (priv->fbdev) + omap_fbdev_free(ddev); +err_cleanup_modeset: + drm_mode_config_cleanup(ddev); + omap_drm_irq_uninstall(ddev); +err_free_drm_dev: + omap_gem_deinit(ddev); + drm_dev_unref(ddev); +err_free_priv: + destroy_workqueue(priv->wq); + kfree(priv); +err_disconnect_dssdevs: + omap_disconnect_dssdevs(); +err_crtc_uninit: + omap_crtc_pre_uninit(); + return ret; } -static int pdev_remove(struct platform_device *device) +static int pdev_remove(struct platform_device *pdev) { + struct drm_device *ddev = platform_get_drvdata(pdev); + struct omap_drm_private *priv = ddev->dev_private; + DBG(""); - drm_put_dev(platform_get_drvdata(device)); + drm_dev_unregister(ddev); + + drm_kms_helper_poll_fini(ddev); + + if (priv->fbdev) + omap_fbdev_free(ddev); + + drm_mode_config_cleanup(ddev); + + omap_drm_irq_uninstall(ddev); + omap_gem_deinit(ddev); + + drm_dev_unref(ddev); + + destroy_workqueue(priv->wq); + kfree(priv); omap_disconnect_dssdevs(); omap_crtc_pre_uninit(); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 7d9dd5400cef..36d93ce84a29 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -48,19 +48,6 @@ struct omap_drm_window { uint32_t src_w, src_h; }; -/* For transiently registering for different DSS irqs that various parts - * of the KMS code need during setup/configuration. We these are not - * necessarily the same as what drm_vblank_get/put() are requesting, and - * the hysteresis in drm_vblank_put() is not necessarily desirable for - * internal housekeeping related irq usage. - */ -struct omap_drm_irq { - struct list_head node; - uint32_t irqmask; - bool registered; - void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus); -}; - /* For KMS code that needs to wait for a certain # of IRQs: */ struct omap_irq_wait; @@ -101,9 +88,9 @@ struct omap_drm_private { struct drm_property *zorder_prop; /* irq handling: */ - struct list_head irq_list; /* list of omap_drm_irq */ - uint32_t vblank_mask; /* irq bits set for userspace vblank */ - struct omap_drm_irq error_handler; + spinlock_t wait_lock; /* protects the wait_list */ + struct list_head wait_list; /* list of omap_irq_wait */ + uint32_t irq_mask; /* enabled irqs in addition to wait_list */ /* atomic commit */ struct { @@ -116,7 +103,6 @@ struct omap_drm_private { #ifdef CONFIG_DEBUG_FS int omap_debugfs_init(struct drm_minor *minor); -void omap_debugfs_cleanup(struct drm_minor *minor); void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m); void omap_gem_describe_objects(struct list_head *list, struct seq_file *m); @@ -128,10 +114,6 @@ int omap_gem_resume(struct device *dev); int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe); void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe); -void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); -void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); -void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq); -void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq); void omap_drm_irq_uninstall(struct drm_device *dev); int omap_drm_irq_install(struct drm_device *dev); @@ -155,6 +137,8 @@ void omap_crtc_pre_uninit(void); struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct drm_plane *plane, enum omap_channel channel, int id); int omap_crtc_wait_pending(struct drm_crtc *crtc); +void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus); +void omap_crtc_vblank_irq(struct drm_crtc *crtc); struct drm_plane *omap_plane_init(struct drm_device *dev, int id, enum drm_plane_type type, @@ -233,32 +217,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *buffer); /* map crtc to vblank mask */ -uint32_t pipe2vbl(struct drm_crtc *crtc); struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); -/* should these be made into common util helpers? - */ - -static inline int objects_lookup( - struct drm_file *filp, uint32_t pixel_format, - struct drm_gem_object **bos, const uint32_t *handles) -{ - int i, n = drm_format_num_planes(pixel_format); - - for (i = 0; i < n; i++) { - bos[i] = drm_gem_object_lookup(filp, handles[i]); - if (!bos[i]) - goto fail; - - } - - return 0; - -fail: - while (--i > 0) - drm_gem_object_unreference_unlocked(bos[i]); - - return -ENOENT; -} - #endif /* __OMAP_DRV_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index a20f30039aee..86c977b7189a 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -117,7 +117,7 @@ static int omap_encoder_update(struct drm_encoder *encoder, dssdrv->get_timings(dssdev, &t); - if (memcmp(vm, &t, sizeof(struct videomode))) + if (memcmp(vm, &t, sizeof(*vm))) ret = -EINVAL; else ret = 0; diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index bd6b94c38613..29dc677dd4d3 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -29,37 +29,30 @@ * framebuffer funcs */ -/* per-format info: */ -struct format { +/* DSS to DRM formats mapping */ +static const struct { enum omap_color_mode dss_format; uint32_t pixel_format; - struct { - int stride_bpp; /* this times width is stride */ - int sub_y; /* sub-sample in y dimension */ - } planes[4]; - bool yuv; -}; - -static const struct format formats[] = { +} formats[] = { /* 16bpp [A]RGB: */ - { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */ - { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */ - { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */ - { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */ - { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */ - { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */ - { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */ + { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565 }, /* RGB16-565 */ + { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444 }, /* RGB12x-4444 */ + { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444 }, /* xRGB12-4444 */ + { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444 }, /* RGBA12-4444 */ + { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444 }, /* ARGB16-4444 */ + { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555 }, /* xRGB15-1555 */ + { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555 }, /* ARGB16-1555 */ /* 24bpp RGB: */ - { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */ + { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888 }, /* RGB24-888 */ /* 32bpp [A]RGB: */ - { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */ - { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */ - { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */ - { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */ + { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888 }, /* RGBx24-8888 */ + { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888 }, /* xRGB24-8888 */ + { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888 }, /* RGBA32-8888 */ + { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888 }, /* ARGB32-8888 */ /* YUV: */ - { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true }, - { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true }, - { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true }, + { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12 }, + { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV }, + { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY }, }; /* convert from overlay's pixel formats bitmask to an array of fourcc's */ @@ -89,8 +82,9 @@ struct plane { struct omap_framebuffer { struct drm_framebuffer base; int pin_count; - const struct format *format; - struct plane planes[4]; + const struct drm_format_info *format; + enum omap_color_mode dss_format; + struct plane planes[2]; /* lock for pinning (pin_count and planes.paddr) */ struct mutex lock; }; @@ -128,13 +122,13 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { }; static uint32_t get_linear_addr(struct plane *plane, - const struct format *format, int n, int x, int y) + const struct drm_format_info *format, int n, int x, int y) { uint32_t offset; - offset = plane->offset + - (x * format->planes[n].stride_bpp) + - (y * plane->pitch / format->planes[n].sub_y); + offset = plane->offset + + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub)) + + (y * plane->pitch / (n == 0 ? 1 : format->vsub)); return plane->paddr + offset; } @@ -153,11 +147,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, struct omap_drm_window *win, struct omap_overlay_info *info) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - const struct format *format = omap_fb->format; + const struct drm_format_info *format = omap_fb->format; struct plane *plane = &omap_fb->planes[0]; uint32_t x, y, orient = 0; - info->color_mode = format->dss_format; + info->color_mode = omap_fb->dss_format; info->pos_x = win->crtc_x; info->pos_y = win->crtc_y; @@ -231,9 +225,9 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, } /* convert to pixels: */ - info->screen_width /= format->planes[0].stride_bpp; + info->screen_width /= format->cpp[0]; - if (format->dss_format == OMAP_DSS_COLOR_NV12) { + if (omap_fb->dss_format == OMAP_DSS_COLOR_NV12) { plane = &omap_fb->planes[1]; if (info->rotation_type == OMAP_DSS_ROT_TILER) { @@ -360,47 +354,58 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { + unsigned int num_planes = drm_format_num_planes(mode_cmd->pixel_format); struct drm_gem_object *bos[4]; struct drm_framebuffer *fb; - int ret; + int i; - ret = objects_lookup(file, mode_cmd->pixel_format, - bos, mode_cmd->handles); - if (ret) - return ERR_PTR(ret); + for (i = 0; i < num_planes; i++) { + bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]); + if (!bos[i]) { + fb = ERR_PTR(-ENOENT); + goto error; + } + } fb = omap_framebuffer_init(dev, mode_cmd, bos); - if (IS_ERR(fb)) { - int i, n = drm_format_num_planes(mode_cmd->pixel_format); - for (i = 0; i < n; i++) - drm_gem_object_unreference_unlocked(bos[i]); - return fb; - } + if (IS_ERR(fb)) + goto error; + + return fb; + +error: + while (--i > 0) + drm_gem_object_unreference_unlocked(bos[i]); + return fb; } struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { + const struct drm_format_info *format = NULL; struct omap_framebuffer *omap_fb = NULL; struct drm_framebuffer *fb = NULL; - const struct format *format = NULL; - int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); + enum omap_color_mode dss_format = 0; + unsigned int pitch = mode_cmd->pitches[0]; + int ret, i; DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); + format = drm_format_info(mode_cmd->pixel_format); + for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i].pixel_format == mode_cmd->pixel_format) { - format = &formats[i]; + dss_format = formats[i].dss_format; break; } } - if (!format) { - dev_err(dev->dev, "unsupported pixel format: %4.4s\n", - (char *)&mode_cmd->pixel_format); + if (!format || !dss_format) { + dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n", + (char *)&mode_cmd->pixel_format); ret = -EINVAL; goto fail; } @@ -413,40 +418,39 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, fb = &omap_fb->base; omap_fb->format = format; + omap_fb->dss_format = dss_format; mutex_init(&omap_fb->lock); - for (i = 0; i < n; i++) { - struct plane *plane = &omap_fb->planes[i]; - int size, pitch = mode_cmd->pitches[i]; - - if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) { - dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n", - pitch, mode_cmd->width * format->planes[i].stride_bpp); - ret = -EINVAL; - goto fail; - } + /* + * The code below assumes that no format use more than two planes, and + * that the two planes of multiplane formats need the same number of + * bytes per pixel. + */ + if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) { + dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n"); + ret = -EINVAL; + goto fail; + } - if (pitch % format->planes[i].stride_bpp != 0) { - dev_err(dev->dev, - "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n", - pitch, format->planes[i].stride_bpp); - ret = -EINVAL; - goto fail; - } + if (pitch % format->cpp[0]) { + dev_dbg(dev->dev, + "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n", + pitch, format->cpp[0]); + ret = -EINVAL; + goto fail; + } - size = pitch * mode_cmd->height / format->planes[i].sub_y; + for (i = 0; i < format->num_planes; i++) { + struct plane *plane = &omap_fb->planes[i]; + unsigned int vsub = i == 0 ? 1 : format->vsub; + unsigned int size; - if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) { - dev_err(dev->dev, "provided buffer object is too small! %d < %d\n", - bos[i]->size - mode_cmd->offsets[i], size); - ret = -EINVAL; - goto fail; - } + size = pitch * mode_cmd->height / vsub; - if (i > 0 && pitch != mode_cmd->pitches[i - 1]) { - dev_err(dev->dev, - "pitches are not the same between framebuffer planes %d != %d\n", - pitch, mode_cmd->pitches[i - 1]); + if (size > omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i]) { + dev_dbg(dev->dev, + "provided buffer object is too small! %d < %d\n", + bos[i]->size - mode_cmd->offsets[i], size); ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index aed99a0fc44b..2a839956dae6 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -225,10 +225,8 @@ fail: drm_fb_helper_release_fbi(helper); - if (fb) { - drm_framebuffer_unregister_private(fb); + if (fb) drm_framebuffer_remove(fb); - } } return ret; @@ -314,10 +312,8 @@ void omap_fbdev_free(struct drm_device *dev) omap_gem_put_paddr(fbdev->bo); /* this will free the backing object */ - if (fbdev->fb) { - drm_framebuffer_unregister_private(fbdev->fb); + if (fbdev->fb) drm_framebuffer_remove(fbdev->fb); - } kfree(fbdev); diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index 60e1e8016708..9adfa7c99695 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -19,25 +19,24 @@ #include "omap_drv.h" -static DEFINE_SPINLOCK(list_lock); - -static void omap_irq_error_handler(struct omap_drm_irq *irq, - uint32_t irqstatus) -{ - DRM_ERROR("errors: %08x\n", irqstatus); -} +struct omap_irq_wait { + struct list_head node; + wait_queue_head_t wq; + uint32_t irqmask; + int count; +}; -/* call with list_lock and dispc runtime held */ +/* call with wait_lock and dispc runtime held */ static void omap_irq_update(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - struct omap_drm_irq *irq; - uint32_t irqmask = priv->vblank_mask; + struct omap_irq_wait *wait; + uint32_t irqmask = priv->irq_mask; - assert_spin_locked(&list_lock); + assert_spin_locked(&priv->wait_lock); - list_for_each_entry(irq, &priv->irq_list, node) - irqmask |= irq->irqmask; + list_for_each_entry(wait, &priv->wait_list, node) + irqmask |= wait->irqmask; DBG("irqmask=%08x", irqmask); @@ -45,90 +44,48 @@ static void omap_irq_update(struct drm_device *dev) dispc_read_irqenable(); /* flush posted write */ } -void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) -{ - struct omap_drm_private *priv = dev->dev_private; - unsigned long flags; - - spin_lock_irqsave(&list_lock, flags); - - if (!WARN_ON(irq->registered)) { - irq->registered = true; - list_add(&irq->node, &priv->irq_list); - omap_irq_update(dev); - } - - spin_unlock_irqrestore(&list_lock, flags); -} - -void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) -{ - dispc_runtime_get(); - - __omap_irq_register(dev, irq); - - dispc_runtime_put(); -} - -void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) +static void omap_irq_wait_handler(struct omap_irq_wait *wait) { - unsigned long flags; - - spin_lock_irqsave(&list_lock, flags); - - if (!WARN_ON(!irq->registered)) { - irq->registered = false; - list_del(&irq->node); - omap_irq_update(dev); - } - - spin_unlock_irqrestore(&list_lock, flags); -} - -void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) -{ - dispc_runtime_get(); - - __omap_irq_unregister(dev, irq); - - dispc_runtime_put(); -} - -struct omap_irq_wait { - struct omap_drm_irq irq; - int count; -}; - -static DECLARE_WAIT_QUEUE_HEAD(wait_event); - -static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus) -{ - struct omap_irq_wait *wait = - container_of(irq, struct omap_irq_wait, irq); wait->count--; - wake_up_all(&wait_event); + wake_up(&wait->wq); } struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, uint32_t irqmask, int count) { + struct omap_drm_private *priv = dev->dev_private; struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL); - wait->irq.irq = wait_irq; - wait->irq.irqmask = irqmask; + unsigned long flags; + + init_waitqueue_head(&wait->wq); + wait->irqmask = irqmask; wait->count = count; - omap_irq_register(dev, &wait->irq); + + spin_lock_irqsave(&priv->wait_lock, flags); + list_add(&wait->node, &priv->wait_list); + omap_irq_update(dev); + spin_unlock_irqrestore(&priv->wait_lock, flags); + return wait; } int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait, unsigned long timeout) { - int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout); - omap_irq_unregister(dev, &wait->irq); + struct omap_drm_private *priv = dev->dev_private; + unsigned long flags; + int ret; + + ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout); + + spin_lock_irqsave(&priv->wait_lock, flags); + list_del(&wait->node); + omap_irq_update(dev); + spin_unlock_irqrestore(&priv->wait_lock, flags); + kfree(wait); - if (ret == 0) - return -1; - return 0; + + return ret == 0 ? -1 : 0; } /** @@ -152,10 +109,10 @@ int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe) DBG("dev=%p, crtc=%u", dev, pipe); - spin_lock_irqsave(&list_lock, flags); - priv->vblank_mask |= pipe2vbl(crtc); + spin_lock_irqsave(&priv->wait_lock, flags); + priv->irq_mask |= dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc)); omap_irq_update(dev); - spin_unlock_irqrestore(&list_lock, flags); + spin_unlock_irqrestore(&priv->wait_lock, flags); return 0; } @@ -177,17 +134,66 @@ void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe) DBG("dev=%p, crtc=%u", dev, pipe); - spin_lock_irqsave(&list_lock, flags); - priv->vblank_mask &= ~pipe2vbl(crtc); + spin_lock_irqsave(&priv->wait_lock, flags); + priv->irq_mask &= ~dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc)); omap_irq_update(dev); - spin_unlock_irqrestore(&list_lock, flags); + spin_unlock_irqrestore(&priv->wait_lock, flags); +} + +static void omap_irq_fifo_underflow(struct omap_drm_private *priv, + u32 irqstatus) +{ + static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + static const struct { + const char *name; + u32 mask; + } sources[] = { + { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW }, + { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW }, + { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW }, + { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW }, + }; + + const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW + | DISPC_IRQ_VID1_FIFO_UNDERFLOW + | DISPC_IRQ_VID2_FIFO_UNDERFLOW + | DISPC_IRQ_VID3_FIFO_UNDERFLOW; + unsigned int i; + + spin_lock(&priv->wait_lock); + irqstatus &= priv->irq_mask & mask; + spin_unlock(&priv->wait_lock); + + if (!irqstatus) + return; + + if (!__ratelimit(&_rs)) + return; + + DRM_ERROR("FIFO underflow on "); + + for (i = 0; i < ARRAY_SIZE(sources); ++i) { + if (sources[i].mask & irqstatus) + pr_cont("%s ", sources[i].name); + } + + pr_cont("(0x%08x)\n", irqstatus); +} + +static void omap_irq_ocp_error_handler(u32 irqstatus) +{ + if (!(irqstatus & DISPC_IRQ_OCP_ERR)) + return; + + DRM_ERROR("OCP error\n"); } static irqreturn_t omap_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; struct omap_drm_private *priv = dev->dev_private; - struct omap_drm_irq *handler, *n; + struct omap_irq_wait *wait, *n; unsigned long flags; unsigned int id; u32 irqstatus; @@ -200,24 +206,37 @@ static irqreturn_t omap_irq_handler(int irq, void *arg) for (id = 0; id < priv->num_crtcs; id++) { struct drm_crtc *crtc = priv->crtcs[id]; + enum omap_channel channel = omap_crtc_channel(crtc); - if (irqstatus & pipe2vbl(crtc)) + if (irqstatus & dispc_mgr_get_vsync_irq(channel)) { drm_handle_vblank(dev, id); + omap_crtc_vblank_irq(crtc); + } + + if (irqstatus & dispc_mgr_get_sync_lost_irq(channel)) + omap_crtc_error_irq(crtc, irqstatus); } - spin_lock_irqsave(&list_lock, flags); - list_for_each_entry_safe(handler, n, &priv->irq_list, node) { - if (handler->irqmask & irqstatus) { - spin_unlock_irqrestore(&list_lock, flags); - handler->irq(handler, handler->irqmask & irqstatus); - spin_lock_irqsave(&list_lock, flags); - } + omap_irq_ocp_error_handler(irqstatus); + omap_irq_fifo_underflow(priv, irqstatus); + + spin_lock_irqsave(&priv->wait_lock, flags); + list_for_each_entry_safe(wait, n, &priv->wait_list, node) { + if (wait->irqmask & irqstatus) + omap_irq_wait_handler(wait); } - spin_unlock_irqrestore(&list_lock, flags); + spin_unlock_irqrestore(&priv->wait_lock, flags); return IRQ_HANDLED; } +static const u32 omap_underflow_irqs[] = { + [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, + [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, + [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, + [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, +}; + /* * We need a special version, instead of just using drm_irq_install(), * because we need to register the irq via omapdss. Once omapdss and @@ -228,10 +247,25 @@ static irqreturn_t omap_irq_handler(int irq, void *arg) int omap_drm_irq_install(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - struct omap_drm_irq *error_handler = &priv->error_handler; + unsigned int num_mgrs = dss_feat_get_num_mgrs(); + unsigned int max_planes; + unsigned int i; int ret; - INIT_LIST_HEAD(&priv->irq_list); + spin_lock_init(&priv->wait_lock); + INIT_LIST_HEAD(&priv->wait_list); + + priv->irq_mask = DISPC_IRQ_OCP_ERR; + + max_planes = min(ARRAY_SIZE(priv->planes), + ARRAY_SIZE(omap_underflow_irqs)); + for (i = 0; i < max_planes; ++i) { + if (priv->planes[i]) + priv->irq_mask |= omap_underflow_irqs[i]; + } + + for (i = 0; i < num_mgrs; ++i) + priv->irq_mask |= dispc_mgr_get_sync_lost_irq(i); dispc_runtime_get(); dispc_clear_irqstatus(0xffffffff); @@ -241,16 +275,6 @@ int omap_drm_irq_install(struct drm_device *dev) if (ret < 0) return ret; - error_handler->irq = omap_irq_error_handler; - error_handler->irqmask = DISPC_IRQ_OCP_ERR; - - /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think - * we just need to ignore it while enabling tv-out - */ - error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT; - - omap_irq_register(dev, error_handler); - dev->irq_enabled = true; return 0; diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 82b2c23d6769..386d90af70f7 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -43,8 +43,6 @@ struct omap_plane { uint32_t nformats; uint32_t formats[32]; - - struct omap_drm_irq error_irq; }; struct omap_plane_state { @@ -204,8 +202,6 @@ static void omap_plane_destroy(struct drm_plane *plane) DBG("%s", omap_plane->name); - omap_irq_unregister(plane->dev, &omap_plane->error_irq); - drm_plane_cleanup(plane); kfree(omap_plane); @@ -332,14 +328,6 @@ static const struct drm_plane_funcs omap_plane_funcs = { .atomic_get_property = omap_plane_atomic_get_property, }; -static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus) -{ - struct omap_plane *omap_plane = - container_of(irq, struct omap_plane, error_irq); - DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name, - irqstatus); -} - static const char *plane_names[] = { [OMAP_DSS_GFX] = "gfx", [OMAP_DSS_VIDEO1] = "vid1", @@ -347,13 +335,6 @@ static const char *plane_names[] = { [OMAP_DSS_VIDEO3] = "vid3", }; -static const uint32_t error_irqs[] = { - [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW, - [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW, - [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW, - [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW, -}; - /* initialize plane */ struct drm_plane *omap_plane_init(struct drm_device *dev, int id, enum drm_plane_type type, @@ -377,10 +358,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, plane = &omap_plane->base; - omap_plane->error_irq.irqmask = error_irqs[id]; - omap_plane->error_irq.irq = omap_plane_error_irq; - omap_irq_register(dev, &omap_plane->error_irq); - ret = drm_universal_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs, omap_plane->formats, omap_plane->nformats, type, NULL); @@ -394,7 +371,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, return plane; error: - omap_irq_unregister(plane->dev, &omap_plane->error_irq); kfree(omap_plane); return NULL; } diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 241af9131dc8..057b2b547cac 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -84,8 +84,18 @@ int qxl_debugfs_init(struct drm_minor *minor) { #if defined(CONFIG_DEBUG_FS) + int r; + struct qxl_device *dev = + (struct qxl_device *) minor->dev->dev_private; + drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES, minor->debugfs_root, minor); + + r = qxl_ttm_debugfs_init(dev); + if (r) { + DRM_ERROR("Failed to init TTM debugfs\n"); + return r; + } #endif return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 659c77742649..416ade8566b7 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -1077,7 +1077,6 @@ static int qdev_output_init(struct drm_device *dev, int num_output) dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); - drm_connector_register(connector); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 460bbceae297..6e0f8a2d8ac9 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -62,20 +62,83 @@ static struct pci_driver qxl_pci_driver; static int qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct drm_device *drm; + struct qxl_device *qdev; + int ret; + if (pdev->revision < 4) { DRM_ERROR("qxl too old, doesn't support client_monitors_config," " use xf86-video-qxl in user mode"); return -EINVAL; /* TODO: ENODEV ? */ } - return drm_get_pci_dev(pdev, ent, &qxl_driver); + + drm = drm_dev_alloc(&qxl_driver, &pdev->dev); + if (IS_ERR(drm)) + return -ENOMEM; + + qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); + if (!qdev) { + ret = -ENOMEM; + goto free_drm_device; + } + + ret = pci_enable_device(pdev); + if (ret) + goto free_drm_device; + + drm->pdev = pdev; + pci_set_drvdata(pdev, drm); + drm->dev_private = qdev; + + ret = qxl_device_init(qdev, drm, pdev, ent->driver_data); + if (ret) + goto disable_pci; + + ret = drm_vblank_init(drm, 1); + if (ret) + goto unload; + + ret = qxl_modeset_init(qdev); + if (ret) + goto vblank_cleanup; + + drm_kms_helper_poll_init(qdev->ddev); + + /* Complete initialization. */ + ret = drm_dev_register(drm, ent->driver_data); + if (ret) + goto modeset_cleanup; + + return 0; + +modeset_cleanup: + qxl_modeset_fini(qdev); +vblank_cleanup: + drm_vblank_cleanup(drm); +unload: + qxl_device_fini(qdev); +disable_pci: + pci_disable_device(pdev); +free_drm_device: + kfree(qdev); + kfree(drm); + return ret; } static void qxl_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + struct qxl_device *qdev = dev->dev_private; + + drm_dev_unregister(dev); + + qxl_modeset_fini(qdev); + qxl_device_fini(qdev); - drm_put_dev(dev); + dev->dev_private = NULL; + kfree(qdev); + drm_dev_unref(dev); } static const struct file_operations qxl_fops = { @@ -230,8 +293,6 @@ static struct pci_driver qxl_pci_driver = { static struct drm_driver qxl_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, - .load = qxl_driver_load, - .unload = qxl_driver_unload, .get_vblank_counter = qxl_noop_get_vblank_counter, .enable_vblank = qxl_noop_enable_vblank, .disable_vblank = qxl_noop_disable_vblank, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 883d8639c04e..0d877fa61162 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -336,8 +336,9 @@ __printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); extern const struct drm_ioctl_desc qxl_ioctls[]; extern int qxl_max_ioctl; -int qxl_driver_load(struct drm_device *dev, unsigned long flags); -void qxl_driver_unload(struct drm_device *dev); +int qxl_device_init(struct qxl_device *qdev, struct drm_device *ddev, + struct pci_dev *pdev, unsigned long flags); +void qxl_device_fini(struct qxl_device *qdev); int qxl_modeset_init(struct qxl_device *qdev); void qxl_modeset_fini(struct qxl_device *qdev); @@ -531,6 +532,7 @@ int qxl_garbage_collect(struct qxl_device *qdev); int qxl_debugfs_init(struct drm_minor *minor); void qxl_debugfs_takedown(struct drm_minor *minor); +int qxl_ttm_debugfs_init(struct qxl_device *qdev); /* qxl_prime.c */ int qxl_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index b2491407b616..d0666f5dccd6 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -115,7 +115,7 @@ static void qxl_gc_work(struct work_struct *work) qxl_garbage_collect(qdev); } -static int qxl_device_init(struct qxl_device *qdev, +int qxl_device_init(struct qxl_device *qdev, struct drm_device *ddev, struct pci_dev *pdev, unsigned long flags) @@ -263,7 +263,7 @@ static int qxl_device_init(struct qxl_device *qdev, return 0; } -static void qxl_device_fini(struct qxl_device *qdev) +void qxl_device_fini(struct qxl_device *qdev) { if (qdev->current_release_bo[0]) qxl_bo_unref(&qdev->current_release_bo[0]); @@ -284,55 +284,3 @@ static void qxl_device_fini(struct qxl_device *qdev) qdev->mode_info.num_modes = 0; qxl_debugfs_remove_files(qdev); } - -void qxl_driver_unload(struct drm_device *dev) -{ - struct qxl_device *qdev = dev->dev_private; - - if (qdev == NULL) - return; - - drm_vblank_cleanup(dev); - - qxl_modeset_fini(qdev); - qxl_device_fini(qdev); - - kfree(qdev); - dev->dev_private = NULL; -} - -int qxl_driver_load(struct drm_device *dev, unsigned long flags) -{ - struct qxl_device *qdev; - int r; - - qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); - if (qdev == NULL) - return -ENOMEM; - - dev->dev_private = qdev; - - r = qxl_device_init(qdev, dev, dev->pdev, flags); - if (r) - goto out; - - r = drm_vblank_init(dev, 1); - if (r) - goto unload; - - r = qxl_modeset_init(qdev); - if (r) - goto unload; - - drm_kms_helper_poll_init(qdev->ddev); - - return 0; -unload: - qxl_driver_unload(dev); - -out: - kfree(qdev); - return r; -} - - diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 1b096c5252ad..3dcc48431015 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -35,7 +35,6 @@ #include "qxl_object.h" #include <linux/delay.h> -static int qxl_ttm_debugfs_init(struct qxl_device *qdev); static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) { @@ -367,6 +366,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *new_mem) { struct qxl_bo *qbo; @@ -394,8 +394,6 @@ static struct ttm_bo_driver qxl_bo_driver = { .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, .move_notify = &qxl_bo_move_notify, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int qxl_ttm_init(struct qxl_device *qdev) @@ -436,11 +434,6 @@ int qxl_ttm_init(struct qxl_device *qdev) ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); DRM_INFO("qxl: %uM of Surface memory size\n", (unsigned)qdev->surfaceram_size / (1024 * 1024)); - r = qxl_ttm_debugfs_init(qdev); - if (r) { - DRM_ERROR("Failed to init debugfs\n"); - return r; - } return 0; } @@ -473,7 +466,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) } #endif -static int qxl_ttm_debugfs_init(struct qxl_device *qdev) +int qxl_ttm_debugfs_init(struct qxl_device *qdev) { #if defined(CONFIG_DEBUG_FS) static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index c829cfb02fc4..00cfb5d2875f 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -596,52 +596,58 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) #ifdef CONFIG_ACPI static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) { - bool ret = false; struct acpi_table_header *hdr; acpi_size tbl_size; UEFI_ACPI_VFCT *vfct; - GOP_VBIOS_CONTENT *vbios; - VFCT_IMAGE_HEADER *vhdr; + unsigned offset; if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr))) return false; tbl_size = hdr->length; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); - goto out_unmap; + return false; } vfct = (UEFI_ACPI_VFCT *)hdr; - if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { - DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); - goto out_unmap; - } + offset = vfct->VBIOSImageOffset; - vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); - vhdr = &vbios->VbiosHeader; - DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", - vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, - vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); - - if (vhdr->PCIBus != rdev->pdev->bus->number || - vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || - vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || - vhdr->VendorID != rdev->pdev->vendor || - vhdr->DeviceID != rdev->pdev->device) { - DRM_INFO("ACPI VFCT table is not for this card\n"); - goto out_unmap; - } + while (offset < tbl_size) { + GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset); + VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader; - if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { - DRM_ERROR("ACPI VFCT image truncated\n"); - goto out_unmap; - } + offset += sizeof(VFCT_IMAGE_HEADER); + if (offset > tbl_size) { + DRM_ERROR("ACPI VFCT image header truncated\n"); + return false; + } - rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); - ret = !!rdev->bios; + offset += vhdr->ImageLength; + if (offset > tbl_size) { + DRM_ERROR("ACPI VFCT image truncated\n"); + return false; + } + + if (vhdr->ImageLength && + vhdr->PCIBus == rdev->pdev->bus->number && + vhdr->PCIDevice == PCI_SLOT(rdev->pdev->devfn) && + vhdr->PCIFunction == PCI_FUNC(rdev->pdev->devfn) && + vhdr->VendorID == rdev->pdev->vendor && + vhdr->DeviceID == rdev->pdev->device) { + rdev->bios = kmemdup(&vbios->VbiosContent, + vhdr->ImageLength, + GFP_KERNEL); + + if (!rdev->bios) { + kfree(rdev->bios); + return false; + } + return true; + } + } -out_unmap: - return ret; + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + return false; } #else static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 510ea371dacc..a8442f7196d6 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -121,7 +121,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) VRAM, also but everything into VRAM on AGP cards and older IGP chips to avoid image corruptions */ if (p->ring == R600_RING_TYPE_UVD_INDEX && - (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) || + (i == 0 || pci_find_capability(p->rdev->ddev->pdev, + PCI_CAP_ID_AGP) || p->rdev->family == CHIP_RS780 || p->rdev->family == CHIP_RS880)) { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8a1df2a1afbd..4b0c388be3f5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1549,8 +1549,6 @@ failed: return r; } -static void radeon_debugfs_remove_files(struct radeon_device *rdev); - /** * radeon_device_fini - tear down the driver * @@ -1577,7 +1575,6 @@ void radeon_device_fini(struct radeon_device *rdev) rdev->rmmio = NULL; if (rdev->family >= CHIP_BONAIRE) radeon_doorbell_fini(rdev); - radeon_debugfs_remove_files(rdev); } @@ -1954,16 +1951,3 @@ int radeon_debugfs_add_files(struct radeon_device *rdev, #endif return 0; } - -static void radeon_debugfs_remove_files(struct radeon_device *rdev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < rdev->debugfs_count; i++) { - drm_debugfs_remove_files(rdev->debugfs[i].files, - rdev->debugfs[i].num_files, - rdev->ddev->primary); - } -#endif -} diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 6d1237d6e1b8..7d5ada3980dc 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -667,7 +667,7 @@ radeon_dp_mst_init(struct radeon_connector *radeon_connector) return 0; radeon_connector->mst_mgr.cbs = &mst_cbs; - return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev->dev, + return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev, &radeon_connector->ddc_bus->aux, 16, 6, radeon_connector->base.base.id); } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 116cf0d23595..56f35c06742c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -105,7 +105,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) dev->dev_private = (void *)rdev; /* update BUS flag */ - if (drm_pci_device_is_agp(dev)) { + if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) { flags |= RADEON_IS_AGP; } else if (pci_is_pcie(dev->pdev)) { flags |= RADEON_IS_PCIE; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 41b72ce6613f..74b276060c20 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -765,6 +765,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, } void radeon_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *new_mem) { struct radeon_bo *rbo; diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index a10bb3deee54..9ffd8215d38a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -150,6 +150,7 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop); extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *new_mem); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1888144d0fed..7a10b3852970 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -502,6 +502,8 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ mem->bus.addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); + if (!mem->bus.addr) + return -ENOMEM; /* * Alpha: Use just the bus offset plus @@ -871,8 +873,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, .io_mem_free = &radeon_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int radeon_ttm_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ad4d7b8b8322..414776811e71 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin"); MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); MODULE_FIRMWARE("radeon/tahiti_smc.bin"); -MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); @@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin"); MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); + +MODULE_FIRMWARE("radeon/si58_mc.bin"); static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); @@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev) int err; int new_fw = 0; bool new_smc = false; + bool si58_fw = false; + bool banks2_fw = false; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; - /* XXX: figure out which Tahitis need the new ucode */ - if (0) - new_smc = true; new_chip_name = "tahiti"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->device == 0x6810) || - (rdev->pdev->device == 0x6811) || - (rdev->pdev->device == 0x6816) || - (rdev->pdev->device == 0x6817) || - (rdev->pdev->device == 0x6806)) + if ((rdev->pdev->revision == 0x81) && + ((rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811))) new_smc = true; new_chip_name = "pitcairn"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; @@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_VERDE: chip_name = "VERDE"; - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6820) || - (rdev->pdev->device == 0x6821) || - (rdev->pdev->device == 0x6822) || - (rdev->pdev->device == 0x6823) || - (rdev->pdev->device == 0x682A) || - (rdev->pdev->device == 0x682B)) + if (((rdev->pdev->device == 0x6820) && + ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83))) || + ((rdev->pdev->device == 0x6821) && + ((rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87))) || + ((rdev->pdev->revision == 0x87) && + ((rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682b)))) new_smc = true; new_chip_name = "verde"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; @@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_OLAND: chip_name = "OLAND"; - if ((rdev->pdev->revision == 0xC7) || - (rdev->pdev->revision == 0x80) || - (rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6604) || - (rdev->pdev->device == 0x6605)) + if (((rdev->pdev->revision == 0x81) && + ((rdev->pdev->device == 0x6600) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605) || + (rdev->pdev->device == 0x6610))) || + ((rdev->pdev->revision == 0x83) && + (rdev->pdev->device == 0x6610))) new_smc = true; new_chip_name = "oland"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; @@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_HAINAN: chip_name = "HAINAN"; - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0xC3) || - (rdev->pdev->device == 0x6664) || - (rdev->pdev->device == 0x6665) || - (rdev->pdev->device == 0x6667)) + if (((rdev->pdev->revision == 0x81) && + (rdev->pdev->device == 0x6660)) || + ((rdev->pdev->revision == 0x83) && + ((rdev->pdev->device == 0x6660) || + (rdev->pdev->device == 0x6663) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)))) new_smc = true; + else if ((rdev->pdev->revision == 0xc3) && + (rdev->pdev->device == 0x6665)) + banks2_fw = true; new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev) default: BUG(); } + /* this memory configuration requires special firmware */ + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + si58_fw = true; + DRM_INFO("Loading %s Microcode\n", new_chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); @@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); + if (si58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); @@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev) } } - if (new_smc) + if (banks2_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); + else if (new_smc) snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); else snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8b5e697f2549..d12b8978142f 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2912,29 +2912,6 @@ static int si_init_smc_spll_table(struct radeon_device *rdev) return ret; } -struct si_dpm_quirk { - u32 chip_vendor; - u32 chip_device; - u32 subsys_vendor; - u32 subsys_device; - u32 max_sclk; - u32 max_mclk; -}; - -/* cards with dpm stability problems */ -static struct si_dpm_quirk si_dpm_quirk_list[] = { - /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ - { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, - { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 }, - { 0, 0, 0, 0 }, -}; - static u16 si_get_lower_of_leakage_and_vce_voltage(struct radeon_device *rdev, u16 vce_voltage) { @@ -2997,42 +2974,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; u32 max_sclk = 0, max_mclk = 0; int i; - struct si_dpm_quirk *p = si_dpm_quirk_list; - /* limit all SI kickers */ - if (rdev->family == CHIP_PITCAIRN) { - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->device == 0x6810) || - (rdev->pdev->device == 0x6811) || - (rdev->pdev->device == 0x6816) || - (rdev->pdev->device == 0x6817) || - (rdev->pdev->device == 0x6806)) - max_mclk = 120000; - } else if (rdev->family == CHIP_VERDE) { - if ((rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6820) || - (rdev->pdev->device == 0x6821) || - (rdev->pdev->device == 0x6822) || - (rdev->pdev->device == 0x6823) || - (rdev->pdev->device == 0x682A) || - (rdev->pdev->device == 0x682B)) { - max_sclk = 75000; - max_mclk = 80000; - } - } else if (rdev->family == CHIP_OLAND) { - if ((rdev->pdev->revision == 0xC7) || - (rdev->pdev->revision == 0x80) || - (rdev->pdev->revision == 0x81) || - (rdev->pdev->revision == 0x83) || - (rdev->pdev->revision == 0x87) || - (rdev->pdev->device == 0x6604) || - (rdev->pdev->device == 0x6605)) { - max_sclk = 75000; - max_mclk = 80000; - } - } else if (rdev->family == CHIP_HAINAN) { + if (rdev->family == CHIP_HAINAN) { if ((rdev->pdev->revision == 0x81) || (rdev->pdev->revision == 0x83) || (rdev->pdev->revision == 0xC3) || @@ -3040,20 +2983,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, (rdev->pdev->device == 0x6665) || (rdev->pdev->device == 0x6667)) { max_sclk = 75000; - max_mclk = 80000; - } - } - /* Apply dpm quirks */ - while (p && p->chip_device != 0) { - if (rdev->pdev->vendor == p->chip_vendor && - rdev->pdev->device == p->chip_device && - rdev->pdev->subsystem_vendor == p->subsys_vendor && - rdev->pdev->subsystem_device == p->subsys_device) { - max_sclk = p->max_sclk; - max_mclk = p->max_mclk; - break; } - ++p; } if (rps->vce_active) { diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 0665fb915579..a6d4a0236e8f 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -257,8 +257,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct drm_encoder *encoder; struct rockchip_hdmi *hdmi; - struct resource *iores; - int irq; int ret; if (!pdev->dev.of_node) @@ -273,14 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, hdmi->dev = &pdev->dev; encoder = &hdmi->encoder; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iores) - return -ENXIO; - encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); /* * If we failed to find the CRTC(s) which this encoder is @@ -301,7 +291,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); - ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); + ret = dw_hdmi_bind(pdev, encoder, plat_data); /* * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(), @@ -316,7 +306,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, void *data) { - return dw_hdmi_unbind(dev, master, data); + return dw_hdmi_unbind(dev); } static const struct component_ops dw_hdmi_rockchip_ops = { diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 6d2a5cd211f3..6df53e6c1308 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -1274,13 +1274,19 @@ static bool evict_everything(struct drm_mm *mm, if (drm_mm_scan_add_block(&scan, &e->node)) break; } + + err = 0; list_for_each_entry(e, &evict_list, link) { if (!drm_mm_scan_remove_block(&scan, &e->node)) { - pr_err("Node %lld not marked for eviction!\n", - e->node.start); - list_del(&e->link); + if (!err) { + pr_err("Node %lld not marked for eviction!\n", + e->node.start); + err = -EINVAL; + } } } + if (err) + return false; list_for_each_entry(e, &evict_list, link) drm_mm_remove_node(&e->node); diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index d20f7c0b4eac..c35db12435c3 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -13,7 +13,6 @@ sti-drm-y := \ sti_dvo.o \ sti_awg_utils.o \ sti_vtg.o \ - sti_vtac.o \ sti_hda.o \ sti_tvout.o \ sti_hqvdp.o \ diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index ff71e25ab5bf..acc056644cd0 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -89,38 +89,9 @@ static struct drm_info_list sti_drm_dbg_list[] = { {"fps_get", sti_drm_fps_dbg_show, 0}, }; -static int sti_drm_debugfs_create(struct dentry *root, - struct drm_minor *minor, - const char *name, - const struct file_operations *fops) -{ - struct drm_device *dev = minor->dev; - struct drm_info_node *node; - struct dentry *ent; - - ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops); - if (IS_ERR(ent)) - return PTR_ERR(ent); - - node = kmalloc(sizeof(*node), GFP_KERNEL); - if (!node) { - debugfs_remove(ent); - return -ENOMEM; - } - - node->minor = minor; - node->dent = ent; - node->info_ent = (void *)fops; - - mutex_lock(&minor->debugfs_lock); - list_add(&node->list, &minor->debugfs_list); - mutex_unlock(&minor->debugfs_lock); - - return 0; -} - static int sti_drm_dbg_init(struct drm_minor *minor) { + struct dentry *dentry; int ret; ret = drm_debugfs_create_files(sti_drm_dbg_list, @@ -129,10 +100,13 @@ static int sti_drm_dbg_init(struct drm_minor *minor) if (ret) goto err; - ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show", + dentry = debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, + minor->debugfs_root, minor->dev, &sti_drm_fps_fops); - if (ret) + if (!dentry) { + ret = -ENOMEM; goto err; + } DRM_INFO("%s: debugfs installed\n", DRIVER_NAME); return 0; @@ -141,15 +115,6 @@ err: return ret; } -static void sti_drm_dbg_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(sti_drm_dbg_list, - ARRAY_SIZE(sti_drm_dbg_list), minor); - - drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops, - 1, minor); -} - static void sti_atomic_schedule(struct sti_private *private, struct drm_atomic_state *state) { @@ -252,19 +217,7 @@ static void sti_output_poll_changed(struct drm_device *ddev) { struct sti_private *private = ddev->dev_private; - if (!ddev->mode_config.num_connector) - return; - - if (private->fbdev) { - drm_fbdev_cma_hotplug_event(private->fbdev); - return; - } - - private->fbdev = drm_fbdev_cma_init(ddev, 32, - ddev->mode_config.num_crtc, - ddev->mode_config.num_connector); - if (IS_ERR(private->fbdev)) - private->fbdev = NULL; + drm_fbdev_cma_hotplug_event(private->fbdev); } static const struct drm_mode_config_funcs sti_mode_config_funcs = { @@ -326,7 +279,6 @@ static struct drm_driver sti_driver = { .gem_prime_mmap = drm_gem_cma_prime_mmap, .debugfs_init = sti_drm_dbg_init, - .debugfs_cleanup = sti_drm_dbg_cleanup, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -382,6 +334,8 @@ static void sti_cleanup(struct drm_device *ddev) static int sti_bind(struct device *dev) { struct drm_device *ddev; + struct sti_private *private; + struct drm_fbdev_cma *fbdev; int ret; ddev = drm_dev_alloc(&sti_driver, dev); @@ -404,6 +358,17 @@ static int sti_bind(struct device *dev) drm_mode_config_reset(ddev); + private = ddev->dev_private; + if (ddev->mode_config.num_connector) { + fbdev = drm_fbdev_cma_init(ddev, 32, ddev->mode_config.num_crtc, + ddev->mode_config.num_connector); + if (IS_ERR(fbdev)) { + DRM_DEBUG_DRIVER("Warning: fails to create fbdev\n"); + fbdev = NULL; + } + private->fbdev = fbdev; + } + return 0; err_register: @@ -476,7 +441,6 @@ static struct platform_driver sti_platform_driver = { static struct platform_driver * const drivers[] = { &sti_tvout_driver, - &sti_vtac_driver, &sti_hqvdp_driver, &sti_hdmi_driver, &sti_hda_driver, diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h index 78ebe5e30f53..4c75845cc9ab 100644 --- a/drivers/gpu/drm/sti/sti_drv.h +++ b/drivers/gpu/drm/sti/sti_drv.h @@ -34,7 +34,6 @@ struct sti_private { }; extern struct platform_driver sti_tvout_driver; -extern struct platform_driver sti_vtac_driver; extern struct platform_driver sti_hqvdp_driver; extern struct platform_driver sti_hdmi_driver; extern struct platform_driver sti_hda_driver; diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 411dc6ec976e..bb23318a44b7 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -195,13 +195,6 @@ static struct drm_info_list dvo_debugfs_files[] = { { "dvo", dvo_dbg_show, 0, NULL }, }; -static void dvo_debugfs_exit(struct sti_dvo *dvo, struct drm_minor *minor) -{ - drm_debugfs_remove_files(dvo_debugfs_files, - ARRAY_SIZE(dvo_debugfs_files), - minor); -} - static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor) { unsigned int i; @@ -514,9 +507,6 @@ static void sti_dvo_unbind(struct device *dev, struct device *master, void *data) { struct sti_dvo *dvo = dev_get_drvdata(dev); - struct drm_device *drm_dev = data; - - dvo_debugfs_exit(dvo, drm_dev->primary); drm_bridge_remove(dvo->bridge); } diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 66d37d78152a..0c0a75bc8bc3 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -365,13 +365,6 @@ static struct drm_info_list hda_debugfs_files[] = { { "hda", hda_dbg_show, 0, NULL }, }; -static void hda_debugfs_exit(struct sti_hda *hda, struct drm_minor *minor) -{ - drm_debugfs_remove_files(hda_debugfs_files, - ARRAY_SIZE(hda_debugfs_files), - minor); -} - static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor) { unsigned int i; @@ -739,10 +732,6 @@ err_sysfs: static void sti_hda_unbind(struct device *dev, struct device *master, void *data) { - struct sti_hda *hda = dev_get_drvdata(dev); - struct drm_device *drm_dev = data; - - hda_debugfs_exit(hda, drm_dev->primary); } static const struct component_ops sti_hda_ops = { diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index f0af1ae82ee9..c9151849d604 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -731,13 +731,6 @@ static struct drm_info_list hdmi_debugfs_files[] = { { "hdmi", hdmi_dbg_show, 0, NULL }, }; -static void hdmi_debugfs_exit(struct sti_hdmi *hdmi, struct drm_minor *minor) -{ - drm_debugfs_remove_files(hdmi_debugfs_files, - ARRAY_SIZE(hdmi_debugfs_files), - minor); -} - static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) { unsigned int i; @@ -788,6 +781,95 @@ static void sti_hdmi_disable(struct drm_bridge *bridge) hdmi->enabled = false; } +/** + * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent + * clocks. None-coherent clocks means that audio and TMDS clocks have not the + * same source (drifts between clocks). In this case assumption is that CTS is + * automatically calculated by hardware. + * + * @audio_fs: audio frame clock frequency in Hz + * + * Values computed are based on table described in HDMI specification 1.4b + * + * Returns n value. + */ +static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs) +{ + unsigned int n; + + switch (audio_fs) { + case 32000: + n = 4096; + break; + case 44100: + n = 6272; + break; + case 48000: + n = 6144; + break; + case 88200: + n = 6272 * 2; + break; + case 96000: + n = 6144 * 2; + break; + case 176400: + n = 6272 * 4; + break; + case 192000: + n = 6144 * 4; + break; + default: + /* Not pre-defined, recommended value: 128 * fs / 1000 */ + n = (audio_fs * 128) / 1000; + } + + return n; +} + +static int hdmi_audio_configure(struct sti_hdmi *hdmi) +{ + int audio_cfg, n; + struct hdmi_audio_params *params = &hdmi->audio; + struct hdmi_audio_infoframe *info = ¶ms->cea; + + DRM_DEBUG_DRIVER("\n"); + + if (!hdmi->enabled) + return 0; + + /* update N parameter */ + n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate); + + DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n", + params->sample_rate, hdmi->mode.clock * 1000, n); + hdmi_write(hdmi, n, HDMI_AUDN); + + /* update HDMI registers according to configuration */ + audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID | + HDMI_AUD_CFG_ONE_BIT_INVALID; + + switch (info->channels) { + case 8: + audio_cfg |= HDMI_AUD_CFG_CH78_VALID; + case 6: + audio_cfg |= HDMI_AUD_CFG_CH56_VALID; + case 4: + audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH; + case 2: + audio_cfg |= HDMI_AUD_CFG_CH12_VALID; + break; + default: + DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n", + info->channels); + return -EINVAL; + } + + hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG); + + return hdmi_audio_infoframe_config(hdmi); +} + static void sti_hdmi_pre_enable(struct drm_bridge *bridge) { struct sti_hdmi *hdmi = bridge->driver_private; @@ -826,9 +908,12 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge) if (hdmi_avi_infoframe_config(hdmi)) DRM_ERROR("Unable to configure AVI infoframe\n"); - /* Program AUDIO infoframe */ - if (hdmi_audio_infoframe_config(hdmi)) - DRM_ERROR("Unable to configure AUDIO infoframe\n"); + if (hdmi->audio.enabled) { + if (hdmi_audio_configure(hdmi)) + DRM_ERROR("Unable to configure audio\n"); + } else { + hdmi_audio_infoframe_config(hdmi); + } /* Program VS infoframe */ if (hdmi_vendor_infoframe_config(hdmi)) @@ -1078,97 +1163,6 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) return NULL; } -/** - * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent - * clocks. None-coherent clocks means that audio and TMDS clocks have not the - * same source (drifts between clocks). In this case assumption is that CTS is - * automatically calculated by hardware. - * - * @audio_fs: audio frame clock frequency in Hz - * - * Values computed are based on table described in HDMI specification 1.4b - * - * Returns n value. - */ -static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs) -{ - unsigned int n; - - switch (audio_fs) { - case 32000: - n = 4096; - break; - case 44100: - n = 6272; - break; - case 48000: - n = 6144; - break; - case 88200: - n = 6272 * 2; - break; - case 96000: - n = 6144 * 2; - break; - case 176400: - n = 6272 * 4; - break; - case 192000: - n = 6144 * 4; - break; - default: - /* Not pre-defined, recommended value: 128 * fs / 1000 */ - n = (audio_fs * 128) / 1000; - } - - return n; -} - -static int hdmi_audio_configure(struct sti_hdmi *hdmi, - struct hdmi_audio_params *params) -{ - int audio_cfg, n; - struct hdmi_audio_infoframe *info = ¶ms->cea; - - DRM_DEBUG_DRIVER("\n"); - - if (!hdmi->enabled) - return 0; - - /* update N parameter */ - n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate); - - DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n", - params->sample_rate, hdmi->mode.clock * 1000, n); - hdmi_write(hdmi, n, HDMI_AUDN); - - /* update HDMI registers according to configuration */ - audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID | - HDMI_AUD_CFG_ONE_BIT_INVALID; - - switch (info->channels) { - case 8: - audio_cfg |= HDMI_AUD_CFG_CH78_VALID; - case 6: - audio_cfg |= HDMI_AUD_CFG_CH56_VALID; - case 4: - audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH; - case 2: - audio_cfg |= HDMI_AUD_CFG_CH12_VALID; - break; - default: - DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n", - info->channels); - return -EINVAL; - } - - hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG); - - hdmi->audio = *params; - - return hdmi_audio_infoframe_config(hdmi); -} - static void hdmi_audio_shutdown(struct device *dev, void *data) { struct sti_hdmi *hdmi = dev_get_drvdata(dev); @@ -1192,17 +1186,9 @@ static int hdmi_audio_hw_params(struct device *dev, { struct sti_hdmi *hdmi = dev_get_drvdata(dev); int ret; - struct hdmi_audio_params audio = { - .sample_width = params->sample_width, - .sample_rate = params->sample_rate, - .cea = params->cea, - }; DRM_DEBUG_DRIVER("\n"); - if (!hdmi->enabled) - return 0; - if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv || daifmt->frame_clk_inv || daifmt->bit_clk_master || daifmt->frame_clk_master) { @@ -1213,9 +1199,13 @@ static int hdmi_audio_hw_params(struct device *dev, return -EINVAL; } - audio.enabled = true; + hdmi->audio.sample_width = params->sample_width; + hdmi->audio.sample_rate = params->sample_rate; + hdmi->audio.cea = params->cea; + + hdmi->audio.enabled = true; - ret = hdmi_audio_configure(hdmi, &audio); + ret = hdmi_audio_configure(hdmi); if (ret < 0) return ret; @@ -1359,10 +1349,6 @@ err_sysfs: static void sti_hdmi_unbind(struct device *dev, struct device *master, void *data) { - struct sti_hdmi *hdmi = dev_get_drvdata(dev); - struct drm_device *drm_dev = data; - - hdmi_debugfs_exit(hdmi, drm_dev->primary); } static const struct component_ops sti_hdmi_ops = { diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index becf10d255c4..4376fd8a8e52 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -332,6 +332,7 @@ struct sti_hqvdp_cmd { * @hqvdp_cmd_paddr: physical address of hqvdp_cmd * @vtg: vtg for main data path * @xp70_initialized: true if xp70 is already initialized + * @vtg_registered: true if registered to VTG */ struct sti_hqvdp { struct device *dev; @@ -347,6 +348,7 @@ struct sti_hqvdp { u32 hqvdp_cmd_paddr; struct sti_vtg *vtg; bool xp70_initialized; + bool vtg_registered; }; #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane) @@ -771,7 +773,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp) DRM_ERROR("XP70 could not revert to idle\n"); hqvdp->plane.status = STI_PLANE_DISABLED; - hqvdp->xp70_initialized = false; + hqvdp->vtg_registered = false; } /** @@ -1064,10 +1066,11 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, return -EINVAL; } - if (!hqvdp->xp70_initialized) { + if (!hqvdp->xp70_initialized) /* Start HQVDP XP70 coprocessor */ sti_hqvdp_start_xp70(hqvdp); + if (!hqvdp->vtg_registered) { /* Prevent VTG shutdown */ if (clk_prepare_enable(hqvdp->clk_pix_main)) { DRM_ERROR("Failed to prepare/enable pix main clk\n"); @@ -1081,6 +1084,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, DRM_ERROR("Cannot register VTG notifier\n"); return -EINVAL; } + hqvdp->vtg_registered = true; } DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", @@ -1113,6 +1117,21 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, if (!crtc || !fb) return; + if ((oldstate->fb == state->fb) && + (oldstate->crtc_x == state->crtc_x) && + (oldstate->crtc_y == state->crtc_y) && + (oldstate->crtc_w == state->crtc_w) && + (oldstate->crtc_h == state->crtc_h) && + (oldstate->src_x == state->src_x) && + (oldstate->src_y == state->src_y) && + (oldstate->src_w == state->src_w) && + (oldstate->src_h == state->src_h)) { + /* No change since last update, do not post cmd */ + DRM_DEBUG_DRIVER("No change, not posting cmd\n"); + plane->status = STI_PLANE_UPDATED; + return; + } + mode = &crtc->mode; dst_x = state->crtc_x; dst_y = state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c index ca4b3719a64a..427d8f58c6b1 100644 --- a/drivers/gpu/drm/sti/sti_plane.c +++ b/drivers/gpu/drm/sti/sti_plane.c @@ -65,9 +65,18 @@ void sti_plane_update_fps(struct sti_plane *plane, fps->last_timestamp = now; fps->last_frame_counter = fps->curr_frame_counter; - fpks = (num_frames * 1000000) / ms_since_last; - snprintf(plane->fps_info.fps_str, FPS_LENGTH, "%-6s @ %d.%.3d fps", - sti_plane_to_str(plane), fpks / 1000, fpks % 1000); + + if (plane->drm_plane.fb) { + fpks = (num_frames * 1000000) / ms_since_last; + snprintf(plane->fps_info.fps_str, FPS_LENGTH, + "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)", + plane->drm_plane.name, + plane->drm_plane.fb->width, + plane->drm_plane.fb->height, + (char *)&plane->drm_plane.fb->format->format, + fpks / 1000, fpks % 1000, + sti_plane_to_str(plane)); + } if (fps->curr_field_counter) { /* Compute number of field updates */ @@ -75,7 +84,7 @@ void sti_plane_update_fps(struct sti_plane *plane, fps->last_field_counter = fps->curr_field_counter; fipks = (num_fields * 1000000) / ms_since_last; snprintf(plane->fps_info.fips_str, - FPS_LENGTH, " - %d.%.3d field/sec", + FPS_LENGTH, " - %3d.%-3.3d field/sec", fipks / 1000, fipks % 1000); } else { plane->fps_info.fips_str[0] = '\0'; diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h index ce3e8d6c88bb..c36c13faaa18 100644 --- a/drivers/gpu/drm/sti/sti_plane.h +++ b/drivers/gpu/drm/sti/sti_plane.h @@ -48,7 +48,7 @@ enum sti_plane_status { STI_PLANE_DISABLED, }; -#define FPS_LENGTH 64 +#define FPS_LENGTH 128 struct sti_fps_info { bool output; unsigned int curr_frame_counter; diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index ad46d3558d91..8b8ea717c121 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -567,13 +567,6 @@ static struct drm_info_list tvout_debugfs_files[] = { { "tvout", tvout_dbg_show, 0, NULL }, }; -static void tvout_debugfs_exit(struct sti_tvout *tvout, struct drm_minor *minor) -{ - drm_debugfs_remove_files(tvout_debugfs_files, - ARRAY_SIZE(tvout_debugfs_files), - minor); -} - static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor) { unsigned int i; @@ -627,7 +620,6 @@ static void sti_tvout_early_unregister(struct drm_encoder *encoder) if (!tvout->debugfs_registered) return; - tvout_debugfs_exit(tvout, encoder->dev->primary); tvout->debugfs_registered = false; } diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c deleted file mode 100644 index cf7fe8a1db42..000000000000 --- a/drivers/gpu/drm/sti/sti_vtac.c +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (C) STMicroelectronics SA 2014 - * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. - * License terms: GNU General Public License (GPL), version 2 - */ - -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/platform_device.h> - -#include <drm/drmP.h> - -#include "sti_drv.h" - -/* registers offset */ -#define VTAC_CONFIG 0x00 -#define VTAC_RX_FIFO_CONFIG 0x04 -#define VTAC_FIFO_CONFIG_VAL 0x04 - -#define VTAC_SYS_CFG8521 0x824 -#define VTAC_SYS_CFG8522 0x828 - -/* Number of phyts per pixel */ -#define VTAC_2_5_PPP 0x0005 -#define VTAC_3_PPP 0x0006 -#define VTAC_4_PPP 0x0008 -#define VTAC_5_PPP 0x000A -#define VTAC_6_PPP 0x000C -#define VTAC_13_PPP 0x001A -#define VTAC_14_PPP 0x001C -#define VTAC_15_PPP 0x001E -#define VTAC_16_PPP 0x0020 -#define VTAC_17_PPP 0x0022 -#define VTAC_18_PPP 0x0024 - -/* enable bits */ -#define VTAC_ENABLE 0x3003 - -#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0) -#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1) -#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3) -#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4) -#define VTAC_TX_PHY_PROG_N3 BIT(9) - - -/** - * VTAC mode structure - * - * @vid_in_width: Video Data Resolution - * @phyts_width: Width of phyt buses(phyt low and phyt high). - * @phyts_per_pixel: Number of phyts sent per pixel - */ -struct sti_vtac_mode { - u32 vid_in_width; - u32 phyts_width; - u32 phyts_per_pixel; -}; - -static const struct sti_vtac_mode vtac_mode_main = { - .vid_in_width = 0x2, - .phyts_width = 0x2, - .phyts_per_pixel = VTAC_5_PPP, -}; -static const struct sti_vtac_mode vtac_mode_aux = { - .vid_in_width = 0x1, - .phyts_width = 0x0, - .phyts_per_pixel = VTAC_17_PPP, -}; - -/** - * VTAC structure - * - * @dev: pointer to device structure - * @regs: ioremapped registers for RX and TX devices - * @phy_regs: phy registers for TX device - * @clk: clock - * @mode: main or auxillary configuration mode - */ -struct sti_vtac { - struct device *dev; - void __iomem *regs; - void __iomem *phy_regs; - struct clk *clk; - const struct sti_vtac_mode *mode; -}; - -static void sti_vtac_rx_set_config(struct sti_vtac *vtac) -{ - u32 config; - - /* Enable VTAC clock */ - if (clk_prepare_enable(vtac->clk)) - DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n"); - - writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG); - - config = VTAC_ENABLE; - config |= vtac->mode->vid_in_width << 4; - config |= vtac->mode->phyts_width << 16; - config |= vtac->mode->phyts_per_pixel << 23; - writel(config, vtac->regs + VTAC_CONFIG); -} - -static void sti_vtac_tx_set_config(struct sti_vtac *vtac) -{ - u32 phy_config; - u32 config; - - /* Enable VTAC clock */ - if (clk_prepare_enable(vtac->clk)) - DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n"); - - /* Configure vtac phy */ - phy_config = 0x00000000; - writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522); - phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY; - writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config |= VTAC_TX_PHY_PROG_N3; - writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL; - writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH; - writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521); - phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE; - writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521); - - /* Configure vtac tx */ - config = VTAC_ENABLE; - config |= vtac->mode->vid_in_width << 4; - config |= vtac->mode->phyts_width << 16; - config |= vtac->mode->phyts_per_pixel << 23; - writel(config, vtac->regs + VTAC_CONFIG); -} - -static const struct of_device_id vtac_of_match[] = { - { - .compatible = "st,vtac-main", - .data = &vtac_mode_main, - }, { - .compatible = "st,vtac-aux", - .data = &vtac_mode_aux, - }, { - /* end node */ - } -}; -MODULE_DEVICE_TABLE(of, vtac_of_match); - -static int sti_vtac_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - const struct of_device_id *id; - struct sti_vtac *vtac; - struct resource *res; - - vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL); - if (!vtac) - return -ENOMEM; - - vtac->dev = dev; - - id = of_match_node(vtac_of_match, np); - if (!id) - return -ENOMEM; - - vtac->mode = id->data; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("Invalid resource\n"); - return -ENOMEM; - } - vtac->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(vtac->regs)) - return PTR_ERR(vtac->regs); - - - vtac->clk = devm_clk_get(dev, "vtac"); - if (IS_ERR(vtac->clk)) { - DRM_ERROR("Cannot get vtac clock\n"); - return PTR_ERR(vtac->clk); - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - vtac->phy_regs = devm_ioremap_nocache(dev, res->start, - resource_size(res)); - sti_vtac_tx_set_config(vtac); - } else { - - sti_vtac_rx_set_config(vtac); - } - - platform_set_drvdata(pdev, vtac); - DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev)); - - return 0; -} - -static int sti_vtac_remove(struct platform_device *pdev) -{ - return 0; -} - -struct platform_driver sti_vtac_driver = { - .driver = { - .name = "sti-vtac", - .owner = THIS_MODULE, - .of_match_table = vtac_of_match, - }, - .probe = sti_vtac_probe, - .remove = sti_vtac_remove, -}; - -MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); -MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index a8882bdd0f8b..c3d9c8ae14af 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -429,6 +429,10 @@ static int vtg_probe(struct platform_device *pdev) return -ENOMEM; } vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); + if (!vtg->regs) { + DRM_ERROR("failed to remap I/O memory\n"); + return -ENOMEM; + } np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); if (np) { diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index a278e1f44661..08ce15070f80 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -193,7 +193,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend, &val); if (ret) { DRM_DEBUG_DRIVER("Invalid format\n"); - return val; + return ret; } regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer), diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 0f4eacb0af4f..ef215fef63d6 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -804,23 +804,10 @@ static const struct file_operations tegra_drm_fops = { .llseek = noop_llseek, }; -static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, - unsigned int pipe) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { - if (pipe == drm_crtc_index(crtc)) - return crtc; - } - - return NULL; -} - static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, unsigned int pipe) { - struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe); struct tegra_dc *dc = to_tegra_dc(crtc); if (!crtc) @@ -831,7 +818,7 @@ static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, static int tegra_drm_enable_vblank(struct drm_device *drm, unsigned int pipe) { - struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe); struct tegra_dc *dc = to_tegra_dc(crtc); if (!crtc) @@ -844,7 +831,7 @@ static int tegra_drm_enable_vblank(struct drm_device *drm, unsigned int pipe) static void tegra_drm_disable_vblank(struct drm_device *drm, unsigned int pipe) { - struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); + struct drm_crtc *crtc = drm_crtc_from_index(drm, pipe); struct tegra_dc *dc = to_tegra_dc(crtc); if (crtc) @@ -907,12 +894,6 @@ static int tegra_debugfs_init(struct drm_minor *minor) ARRAY_SIZE(tegra_debugfs_list), minor->debugfs_root, minor); } - -static void tegra_debugfs_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(tegra_debugfs_list, - ARRAY_SIZE(tegra_debugfs_list), minor); -} #endif static struct drm_driver tegra_drm_driver = { @@ -930,7 +911,6 @@ static struct drm_driver tegra_drm_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = tegra_debugfs_init, - .debugfs_cleanup = tegra_debugfs_cleanup, #endif .gem_free_object_unlocked = tegra_bo_free_object, diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 8df7783cecc2..f896e2ff7d47 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -271,8 +271,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, return 0; destroy: - drm_framebuffer_unregister_private(fb); - tegra_fb_destroy(fb); + drm_framebuffer_remove(fb); release: drm_fb_helper_release_fbi(helper); return err; @@ -342,10 +341,8 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) drm_fb_helper_unregister_fbi(&fbdev->base); drm_fb_helper_release_fbi(&fbdev->base); - if (fbdev->fb) { - drm_framebuffer_unregister_private(&fbdev->fb->base); + if (fbdev->fb) drm_framebuffer_remove(&fbdev->fb->base); - } drm_fb_helper_fini(&fbdev->base); tegra_fbdev_free(fbdev); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 3a763f7cb743..f80bf9385e41 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; struct tilcdc_drm_private *priv = dev->dev_private; - uint32_t stat; + uint32_t stat, reg; stat = tilcdc_read_irqstatus(dev); tilcdc_clear_irqstatus(dev, stat); @@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", __func__, stat); tilcdc_crtc->frame_intact = false; - if (tilcdc_crtc->sync_lost_count++ > - SYNC_LOST_COUNT_LIMIT) { - dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat); - queue_work(system_wq, &tilcdc_crtc->recover_work); - if (priv->rev == 1) + if (priv->rev == 1) { + reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG); + if (reg & LCDC_RASTER_ENABLE) { tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, - LCDC_V1_SYNC_LOST_INT_ENA); - else + LCDC_RASTER_ENABLE); + tilcdc_set(dev, LCDC_RASTER_CTRL_REG, + LCDC_RASTER_ENABLE); + } + } else { + if (tilcdc_crtc->sync_lost_count++ > + SYNC_LOST_COUNT_LIMIT) { + dev_err(dev->dev, + "%s(0x%08x): Sync lost flood detected, recovering", + __func__, stat); + queue_work(system_wq, + &tilcdc_crtc->recover_work); tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_SYNC_LOST); - tilcdc_crtc->sync_lost_count = 0; + tilcdc_crtc->sync_lost_count = 0; + } } } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index ec15585c7a27..919294a735fe 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -539,17 +539,6 @@ static int tilcdc_debugfs_init(struct drm_minor *minor) return ret; } - -static void tilcdc_debugfs_cleanup(struct drm_minor *minor) -{ - struct tilcdc_module *mod; - drm_debugfs_remove_files(tilcdc_debugfs_list, - ARRAY_SIZE(tilcdc_debugfs_list), minor); - - list_for_each_entry(mod, &module_list, list) - if (mod->funcs->debugfs_cleanup) - mod->funcs->debugfs_cleanup(mod, minor); -} #endif static const struct file_operations fops = { @@ -589,7 +578,6 @@ static struct drm_driver tilcdc_driver = { .gem_prime_mmap = drm_gem_cma_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = tilcdc_debugfs_init, - .debugfs_cleanup = tilcdc_debugfs_cleanup, #endif .fops = &fops, .name = "tilcdc", diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index 0e71daf5b5cb..8caa11bc7aec 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -111,8 +111,6 @@ struct tilcdc_module_ops { #ifdef CONFIG_DEBUG_FS /* create debugfs nodes (can be NULL): */ int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor); - /* cleanup debugfs nodes (can be NULL): */ - void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor); #endif }; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d5063618efa7..4562e53c8244 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -163,6 +163,7 @@ static void ttm_bo_release_list(struct kref *list_kref) void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man; lockdep_assert_held(&bo->resv->lock.base); @@ -170,11 +171,13 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) BUG_ON(!list_empty(&bo->lru)); - list_add(&bo->lru, bdev->driver->lru_tail(bo)); + man = &bdev->man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru[bo->priority]); kref_get(&bo->list_kref); if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { - list_add(&bo->swap, bdev->driver->swap_lru_tail(bo)); + list_add_tail(&bo->swap, + &bo->glob->swap_lru[bo->priority]); kref_get(&bo->list_kref); } } @@ -183,12 +186,8 @@ EXPORT_SYMBOL(ttm_bo_add_to_lru); int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; int put_count = 0; - if (bdev->driver->lru_removal) - bdev->driver->lru_removal(bo); - if (!list_empty(&bo->swap)) { list_del_init(&bo->swap); ++put_count; @@ -198,6 +197,11 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) ++put_count; } + /* + * TODO: Add a driver hook to delete from + * driver-specific LRU's here. + */ + return put_count; } @@ -226,32 +230,16 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; int put_count = 0; lockdep_assert_held(&bo->resv->lock.base); - if (bdev->driver->lru_removal) - bdev->driver->lru_removal(bo); - put_count = ttm_bo_del_from_lru(bo); ttm_bo_list_ref_sub(bo, put_count, true); ttm_bo_add_to_lru(bo); } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); -struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) -{ - return bo->bdev->man[bo->mem.mem_type].lru.prev; -} -EXPORT_SYMBOL(ttm_bo_default_lru_tail); - -struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) -{ - return bo->glob->swap_lru.prev; -} -EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); - /* * Call bo->mutex locked. */ @@ -342,7 +330,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, mem); + bdev->driver->move_notify(bo, evict, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; @@ -350,7 +338,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, mem); + bdev->driver->move_notify(bo, evict, mem); if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) @@ -366,7 +354,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg tmp_mem = *mem; *mem = bo->mem; bo->mem = tmp_mem; - bdev->driver->move_notify(bo, mem); + bdev->driver->move_notify(bo, false, mem); bo->mem = *mem; *mem = tmp_mem; } @@ -414,7 +402,7 @@ out_err: static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { if (bo->bdev->driver->move_notify) - bo->bdev->driver->move_notify(bo, NULL); + bo->bdev->driver->move_notify(bo, false, NULL); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; @@ -741,20 +729,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_buffer_object *bo; int ret = -EBUSY, put_count; + unsigned i; spin_lock(&glob->lru_lock); - list_for_each_entry(bo, &man->lru, lru) { - ret = __ttm_bo_reserve(bo, false, true, NULL); - if (ret) - continue; + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + list_for_each_entry(bo, &man->lru[i], lru) { + ret = __ttm_bo_reserve(bo, false, true, NULL); + if (ret) + continue; - if (place && !bdev->driver->eviction_valuable(bo, place)) { - __ttm_bo_unreserve(bo); - ret = -EBUSY; - continue; + if (place && !bdev->driver->eviction_valuable(bo, + place)) { + __ttm_bo_unreserve(bo); + ret = -EBUSY; + continue; + } + + break; } - break; + if (!ret) + break; } if (ret) { @@ -1197,6 +1192,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); + bo->priority = 0; /* * For ttm_bo_type_device buffers, allocate @@ -1291,29 +1287,27 @@ int ttm_bo_create(struct ttm_bo_device *bdev, EXPORT_SYMBOL(ttm_bo_create); static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, - unsigned mem_type, bool allow_errors) + unsigned mem_type) { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; struct dma_fence *fence; int ret; + unsigned i; /* * Can't use standard list traversal since we're unlocking. */ spin_lock(&glob->lru_lock); - while (!list_empty(&man->lru)) { - spin_unlock(&glob->lru_lock); - ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); - if (ret) { - if (allow_errors) { + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + while (!list_empty(&man->lru[i])) { + spin_unlock(&glob->lru_lock); + ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); + if (ret) return ret; - } else { - pr_err("Cleanup eviction failed\n"); - } + spin_lock(&glob->lru_lock); } - spin_lock(&glob->lru_lock); } spin_unlock(&glob->lru_lock); @@ -1324,13 +1318,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, if (fence) { ret = dma_fence_wait(fence, false); dma_fence_put(fence); - if (ret) { - if (allow_errors) { - return ret; - } else { - pr_err("Cleanup eviction failed\n"); - } - } + if (ret) + return ret; } return 0; @@ -1359,7 +1348,11 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ret = 0; if (mem_type > 0) { - ttm_bo_force_list_clean(bdev, mem_type, false); + ret = ttm_bo_force_list_clean(bdev, mem_type); + if (ret) { + pr_err("Cleanup eviction failed\n"); + return ret; + } ret = (*man->func->takedown)(man); } @@ -1382,7 +1375,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) return 0; } - return ttm_bo_force_list_clean(bdev, mem_type, true); + return ttm_bo_force_list_clean(bdev, mem_type); } EXPORT_SYMBOL(ttm_bo_evict_mm); @@ -1391,6 +1384,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, { int ret = -EINVAL; struct ttm_mem_type_manager *man; + unsigned i; BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; @@ -1416,7 +1410,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, man->use_type = true; man->size = p_size; - INIT_LIST_HEAD(&man->lru); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&man->lru[i]); man->move = NULL; return 0; @@ -1448,6 +1443,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref) container_of(ref, struct ttm_bo_global_ref, ref); struct ttm_bo_global *glob = ref->object; int ret; + unsigned i; mutex_init(&glob->device_list_mutex); spin_lock_init(&glob->lru_lock); @@ -1459,7 +1455,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref) goto out_no_drp; } - INIT_LIST_HEAD(&glob->swap_lru); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + INIT_LIST_HEAD(&glob->swap_lru[i]); INIT_LIST_HEAD(&glob->device_list); ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); @@ -1518,8 +1515,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); - if (list_empty(&bdev->man[0].lru)) - TTM_DEBUG("Swap list was clean\n"); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) + if (list_empty(&bdev->man[0].lru[0])) + TTM_DEBUG("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); drm_vma_offset_manager_destroy(&bdev->vma_manager); @@ -1670,11 +1668,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; - uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); + unsigned i; spin_lock(&glob->lru_lock); - list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = __ttm_bo_reserve(bo, false, true, NULL); + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + list_for_each_entry(bo, &glob->swap_lru[i], swap) { + ret = __ttm_bo_reserve(bo, false, true, NULL); + if (!ret) + break; + } if (!ret) break; } @@ -1701,7 +1703,8 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * Move to system cached */ - if ((bo->mem.placement & swap_placement) != swap_placement) { + if (bo->mem.mem_type != TTM_PL_SYSTEM || + bo->ttm->caching_state != tt_cached) { struct ttm_mem_reg evict_mem; evict_mem = bo->mem; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index a0fd3e66bc4b..75b708b36890 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -156,7 +156,8 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, const struct drm_display_mode *mode) { struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; + struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); u32 val; int fifo_lines; int vblank_lines; @@ -272,9 +273,7 @@ int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id, int *max_error, struct timeval *vblank_time, unsigned flags) { - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; - struct drm_crtc *crtc = &vc4_crtc->base; + struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id); struct drm_crtc_state *state = crtc->state; /* Helper routine in DRM core does all the work: */ @@ -652,8 +651,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id) { - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; + struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); CRTC_WRITE(PV_INTEN, PV_INT_VFP_START); @@ -662,8 +661,8 @@ int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id) void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id) { - struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; + struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); CRTC_WRITE(PV_INTEN, 0); } @@ -937,7 +936,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_crtc *vc4_crtc; struct drm_crtc *crtc; struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp; @@ -975,7 +973,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) &vc4_crtc_funcs, NULL); drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); primary_plane->crtc = crtc; - vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc; vc4_crtc->channel = vc4_crtc->data->hvs_channel; drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index caf817bac885..c4d5e6a8d6f2 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -36,9 +36,3 @@ vc4_debugfs_init(struct drm_minor *minor) return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, minor->debugfs_root, minor); } - -void -vc4_debugfs_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES, minor); -} diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index ac09ca7ff430..e4f42ebee517 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -145,7 +145,6 @@ static struct drm_driver vc4_drm_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, - .debugfs_cleanup = vc4_debugfs_cleanup, #endif .gem_create_object = vc4_create_object, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 489956efbff8..78e3e5a43fb0 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -16,7 +16,6 @@ struct vc4_dev { struct vc4_hdmi *hdmi; struct vc4_hvs *hvs; - struct vc4_crtc *crtc[3]; struct vc4_v3d *v3d; struct vc4_dpi *dpi; struct vc4_vec *vec; @@ -458,7 +457,6 @@ int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id, /* vc4_debugfs.c */ int vc4_debugfs_init(struct drm_minor *minor); -void vc4_debugfs_cleanup(struct drm_minor *minor); /* vc4_drv.c */ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index c4cb2e26de32..93d5994f3a04 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -356,15 +356,11 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) return; } - if (vc4_encoder->rgb_range_selectable) { - if (vc4_encoder->limited_rgb_range) { - frame.avi.quantization_range = - HDMI_QUANTIZATION_RANGE_LIMITED; - } else { - frame.avi.quantization_range = - HDMI_QUANTIZATION_RANGE_FULL; - } - } + drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode, + vc4_encoder->limited_rgb_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL, + vc4_encoder->rgb_range_selectable); vc4_hdmi_write_infoframe(encoder, &frame); } @@ -463,7 +459,9 @@ static void vc4_hdmi_encoder_mode_set(struct drm_encoder *encoder, csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, VC4_HD_CSC_CTL_ORDER); - if (vc4_encoder->hdmi_monitor && drm_match_cea_mode(mode) > 1) { + if (vc4_encoder->hdmi_monitor && + drm_default_rgb_quant_range(mode) == + HDMI_QUANTIZATION_RANGE_LIMITED) { /* CEA VICs other than #1 requre limited range RGB * output unless overridden by an AVI infoframe. * Apply a colorspace conversion to squash 0-255 down diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index 1f8798ad329c..cb59c7ab98b9 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -31,6 +31,7 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +#include <drm/drm_cache.h> #include <uapi/drm/vgem_drm.h> diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index da25dfe7b80e..3109c8308eb5 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -190,12 +190,12 @@ int vgem_fence_attach_ioctl(struct drm_device *dev, /* Expose the fence via the dma-buf */ ret = 0; - ww_mutex_lock(&resv->lock, NULL); + reservation_object_lock(resv, NULL); if (arg->flags & VGEM_FENCE_WRITE) reservation_object_add_excl_fence(resv, fence); else if ((ret = reservation_object_reserve_shared(resv)) == 0) reservation_object_add_shared_fence(resv, fence); - ww_mutex_unlock(&resv->lock); + reservation_object_unlock(resv); /* Record the fence in our idr for later signaling */ if (ret == 0) { diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index 61254b991265..24f99fc9d8a4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, info->fbops = &virtio_gpufb_ops; info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->screen_base = obj->vmap; + info->screen_buffer = obj->vmap; info->screen_size = obj->gem_base.size; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 63b3d5d35cf6..9cc7079f7aca 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -386,6 +386,7 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, } static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo, + bool evict, struct ttm_mem_reg *new_mem) { struct virtio_gpu_object *bo; @@ -433,8 +434,6 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = { .io_mem_free = &virtio_gpu_ttm_io_mem_free, .move_notify = &virtio_gpu_bo_move_notify, .swap_notify = &virtio_gpu_bo_swap_notify, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index 531d22025fec..babe7cb84fc2 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -980,6 +980,8 @@ svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level) size.width = max_t(u32, base_level.width >> mip_level, 1); size.height = max_t(u32, base_level.height >> mip_level, 1); size.depth = max_t(u32, base_level.depth >> mip_level, 1); + size.pad64 = 0; + return size; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index c894a48a74a6..4c7f24a67a2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -825,6 +825,7 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) * (currently only resources). */ static void vmw_move_notify(struct ttm_buffer_object *bo, + bool evict, struct ttm_mem_reg *mem) { vmw_resource_move_notify(bo, mem); @@ -839,7 +840,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, */ static void vmw_swap_notify(struct ttm_buffer_object *bo) { - ttm_bo_wait(bo, false, false); + (void) ttm_bo_wait(bo, false, false); } @@ -858,6 +859,4 @@ struct ttm_bo_driver vmw_bo_driver = { .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, .io_mem_free = &vmw_ttm_io_mem_free, - .lru_tail = &ttm_bo_default_lru_tail, - .swap_lru_tail = &ttm_bo_default_swap_lru_tail, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index be35385bb26c..541a5887dd6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1293,7 +1293,7 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv) */ void vmw_svga_enable(struct vmw_private *dev_priv) { - ttm_read_lock(&dev_priv->reservation_sem, false); + (void) ttm_read_lock(&dev_priv->reservation_sem, false); __vmw_svga_enable(dev_priv); ttm_read_unlock(&dev_priv->reservation_sem); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 867a8442220c..e9005b9a5e8c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -482,8 +482,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info) mode_cmd.height = var->yres; mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; mode_cmd.pixel_format = - drm_mode_legacy_fb_format(var->bits_per_pixel, - ((var->bits_per_pixel + 7) / 8) * mode_cmd.width); + drm_mode_legacy_fb_format(var->bits_per_pixel, depth); cur_fb = par->set_fb; if (cur_fb && cur_fb->width == mode_cmd.width && diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index cf22110e9eee..d492d57d5309 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -757,7 +757,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev, struct vmw_surface **srf_out) { uint32_t format; - struct drm_vmw_size content_base_size; + struct drm_vmw_size content_base_size = {0}; struct vmw_resource *res; unsigned int bytes_pp; struct drm_format_name_buf format_name; @@ -1671,7 +1671,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem */ - u64 bb_mem = bounding_box.w * bounding_box.h * 4; + u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4; u64 pixel_mem = total_pixels * 4; if (bb_mem > dev_priv->prim_bb_mem) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index b6126a5f1269..941bcfd131ff 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -319,18 +319,17 @@ int vmw_otables_setup(struct vmw_private *dev_priv) int ret; if (dev_priv->has_dx) { - *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL); + *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); if (*otables == NULL) return -ENOMEM; - memcpy(*otables, dx_tables, sizeof(dx_tables)); dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); } else { - *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL); + *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), + GFP_KERNEL); if (*otables == NULL) return -ENOMEM; - memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables)); dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 8e86d6d4141b..65b3f0369636 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1760,7 +1760,7 @@ void vmw_resource_unpin(struct vmw_resource *res) struct vmw_private *dev_priv = res->dev_priv; int ret; - ttm_read_lock(&dev_priv->reservation_sem, false); + (void) ttm_read_lock(&dev_priv->reservation_sem, false); mutex_lock(&dev_priv->cmdbuf_mutex); ret = vmw_resource_reserve(res, false, true); @@ -1770,7 +1770,7 @@ void vmw_resource_unpin(struct vmw_resource *res) if (--res->pin_count == 0 && res->backup) { struct vmw_dma_buffer *vbo = res->backup; - ttm_bo_reserve(&vbo->base, false, false, NULL); + (void) ttm_bo_reserve(&vbo->base, false, false, NULL); vmw_bo_pin_reserved(vbo, false); ttm_bo_unreserve(&vbo->base); } diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig index 4065b2840f1c..5b36421ef3e5 100644 --- a/drivers/gpu/drm/zte/Kconfig +++ b/drivers/gpu/drm/zte/Kconfig @@ -4,5 +4,7 @@ config DRM_ZTE select DRM_KMS_CMA_HELPER select DRM_KMS_FB_HELPER select DRM_KMS_HELPER + select SND_SOC_HDMI_CODEC if SND_SOC + select VIDEOMODE_HELPERS help Choose this option to enable DRM on ZTE ZX SoCs. diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile index 699180bfd57c..01352b56c418 100644 --- a/drivers/gpu/drm/zte/Makefile +++ b/drivers/gpu/drm/zte/Makefile @@ -2,6 +2,7 @@ zxdrm-y := \ zx_drm_drv.o \ zx_hdmi.o \ zx_plane.o \ + zx_tvenc.o \ zx_vou.o obj-$(CONFIG_DRM_ZTE) += zxdrm.o diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c index 3e76f72c92ff..13081fed902d 100644 --- a/drivers/gpu/drm/zte/zx_drm_drv.c +++ b/drivers/gpu/drm/zte/zx_drm_drv.c @@ -247,6 +247,7 @@ static struct platform_driver zx_drm_platform_driver = { static struct platform_driver *drivers[] = { &zx_crtc_driver, &zx_hdmi_driver, + &zx_tvenc_driver, &zx_drm_platform_driver, }; diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h index e65cd18a6cba..5ca035b079c7 100644 --- a/drivers/gpu/drm/zte/zx_drm_drv.h +++ b/drivers/gpu/drm/zte/zx_drm_drv.h @@ -13,6 +13,7 @@ extern struct platform_driver zx_crtc_driver; extern struct platform_driver zx_hdmi_driver; +extern struct platform_driver zx_tvenc_driver; static inline u32 zx_readl(void __iomem *reg) { diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c index 6bf6c364811e..c47b9cbfe270 100644 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ b/drivers/gpu/drm/zte/zx_hdmi.c @@ -25,6 +25,8 @@ #include <drm/drm_of.h> #include <drm/drmP.h> +#include <sound/hdmi-codec.h> + #include "zx_hdmi_regs.h" #include "zx_vou.h" @@ -49,17 +51,11 @@ struct zx_hdmi { bool sink_is_hdmi; bool sink_has_audio; const struct vou_inf *inf; + struct platform_device *audio_pdev; }; #define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x) -static const struct vou_inf vou_inf_hdmi = { - .id = VOU_HDMI, - .data_sel = VOU_YUV444, - .clocks_en_bits = BIT(24) | BIT(18) | BIT(6), - .clocks_sel_bits = BIT(13) | BIT(2), -}; - static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset) { return readl_relaxed(hdmi->mmio + offset * 4); @@ -238,14 +234,14 @@ static void zx_hdmi_encoder_enable(struct drm_encoder *encoder) zx_hdmi_hw_enable(hdmi); - vou_inf_enable(hdmi->inf, encoder->crtc); + vou_inf_enable(VOU_HDMI, encoder->crtc); } static void zx_hdmi_encoder_disable(struct drm_encoder *encoder) { struct zx_hdmi *hdmi = to_zx_hdmi(encoder); - vou_inf_disable(hdmi->inf, encoder->crtc); + vou_inf_disable(VOU_HDMI, encoder->crtc); zx_hdmi_hw_disable(hdmi); @@ -366,6 +362,142 @@ static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id) return IRQ_NONE; } +static int zx_hdmi_audio_startup(struct device *dev, void *data) +{ + struct zx_hdmi *hdmi = dev_get_drvdata(dev); + struct drm_encoder *encoder = &hdmi->encoder; + + vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF); + + return 0; +} + +static void zx_hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct zx_hdmi *hdmi = dev_get_drvdata(dev); + + /* Disable audio input */ + hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0); +} + +static inline int zx_hdmi_audio_get_n(unsigned int fs) +{ + unsigned int n; + + if (fs && (fs % 44100) == 0) + n = 6272 * (fs / 44100); + else + n = fs * 128 / 1000; + + return n; +} + +static int zx_hdmi_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct zx_hdmi *hdmi = dev_get_drvdata(dev); + struct hdmi_audio_infoframe *cea = ¶ms->cea; + union hdmi_infoframe frame; + int n; + + /* We only support spdif for now */ + if (daifmt->fmt != HDMI_SPDIF) { + DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt); + return -EINVAL; + } + + switch (params->sample_width) { + case 16: + hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK, + SPDIF_SAMPLE_SIZE_16BIT); + break; + case 20: + hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK, + SPDIF_SAMPLE_SIZE_20BIT); + break; + case 24: + hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK, + SPDIF_SAMPLE_SIZE_24BIT); + break; + default: + DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n", + params->sample_width); + return -EINVAL; + } + + /* CTS is calculated by hardware, and we only need to take care of N */ + n = zx_hdmi_audio_get_n(params->sample_rate); + hdmi_writeb(hdmi, N_SVAL1, n & 0xff); + hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff); + hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf); + + /* Enable spdif mode */ + hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN); + + /* Enable audio input */ + hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN); + + memcpy(&frame.audio, cea, sizeof(*cea)); + + return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO); +} + +static int zx_hdmi_audio_digital_mute(struct device *dev, void *data, + bool enable) +{ + struct zx_hdmi *hdmi = dev_get_drvdata(dev); + + if (enable) + hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, + TPI_AUD_MUTE); + else + hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0); + + return 0; +} + +static int zx_hdmi_audio_get_eld(struct device *dev, void *data, + uint8_t *buf, size_t len) +{ + struct zx_hdmi *hdmi = dev_get_drvdata(dev); + struct drm_connector *connector = &hdmi->connector; + + memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops zx_hdmi_codec_ops = { + .audio_startup = zx_hdmi_audio_startup, + .hw_params = zx_hdmi_audio_hw_params, + .audio_shutdown = zx_hdmi_audio_shutdown, + .digital_mute = zx_hdmi_audio_digital_mute, + .get_eld = zx_hdmi_audio_get_eld, +}; + +static struct hdmi_codec_pdata zx_hdmi_codec_pdata = { + .ops = &zx_hdmi_codec_ops, + .spdif = 1, +}; + +static int zx_hdmi_audio_register(struct zx_hdmi *hdmi) +{ + struct platform_device *pdev; + + pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &zx_hdmi_codec_pdata, + sizeof(zx_hdmi_codec_pdata)); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + hdmi->audio_pdev = pdev; + + return 0; +} + static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg) { int len = msg->len; @@ -523,7 +655,6 @@ static int zx_hdmi_bind(struct device *dev, struct device *master, void *data) hdmi->dev = dev; hdmi->drm = drm; - hdmi->inf = &vou_inf_hdmi; dev_set_drvdata(dev, hdmi); @@ -566,6 +697,12 @@ static int zx_hdmi_bind(struct device *dev, struct device *master, void *data) return ret; } + ret = zx_hdmi_audio_register(hdmi); + if (ret) { + DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret); + return ret; + } + ret = zx_hdmi_register(drm, hdmi); if (ret) { DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret); @@ -590,6 +727,9 @@ static void zx_hdmi_unbind(struct device *dev, struct device *master, hdmi->connector.funcs->destroy(&hdmi->connector); hdmi->encoder.funcs->destroy(&hdmi->encoder); + + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); } static const struct component_ops zx_hdmi_component_ops = { diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h index de911f66b658..c6d5d8211725 100644 --- a/drivers/gpu/drm/zte/zx_hdmi_regs.h +++ b/drivers/gpu/drm/zte/zx_hdmi_regs.h @@ -52,5 +52,19 @@ #define TPI_INFO_TRANS_RPT BIT(6) #define TPI_DDC_MASTER_EN 0x06f8 #define HW_DDC_MASTER BIT(7) +#define N_SVAL1 0xa03 +#define N_SVAL2 0xa04 +#define N_SVAL3 0xa05 +#define AUD_EN 0xa13 +#define AUD_IN_EN BIT(0) +#define AUD_MODE 0xa14 +#define SPDIF_EN BIT(1) +#define TPI_AUD_CONFIG 0xa62 +#define SPDIF_SAMPLE_SIZE_SHIFT 6 +#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT) +#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT) +#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT) +#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT) +#define TPI_AUD_MUTE BIT(4) #endif /* __ZX_HDMI_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c index b634b090cdc1..1d08ba381098 100644 --- a/drivers/gpu/drm/zte/zx_plane.c +++ b/drivers/gpu/drm/zte/zx_plane.c @@ -21,16 +21,6 @@ #include "zx_plane_regs.h" #include "zx_vou.h" -struct zx_plane { - struct drm_plane plane; - void __iomem *layer; - void __iomem *csc; - void __iomem *hbsc; - void __iomem *rsz; -}; - -#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane) - static const uint32_t gl_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, @@ -40,6 +30,261 @@ static const uint32_t gl_formats[] = { DRM_FORMAT_ARGB4444, }; +static const uint32_t vl_formats[] = { + DRM_FORMAT_NV12, /* Semi-planar YUV420 */ + DRM_FORMAT_YUV420, /* Planar YUV420 */ + DRM_FORMAT_YUYV, /* Packed YUV422 */ + DRM_FORMAT_YVYU, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, + DRM_FORMAT_YUV444, /* YUV444 8bit */ + /* + * TODO: add formats below that HW supports: + * - YUV420 P010 + * - YUV420 Hantro + * - YUV444 10bit + */ +}; + +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) + +static int zx_vl_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_framebuffer *fb = plane_state->fb; + struct drm_crtc *crtc = plane_state->crtc; + struct drm_crtc_state *crtc_state; + struct drm_rect clip; + int min_scale = FRAC_16_16(1, 8); + int max_scale = FRAC_16_16(8, 1); + + if (!crtc || !fb) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state, + crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + /* nothing to check when disabling or disabled */ + if (!crtc_state->enable) + return 0; + + /* plane must be enabled */ + if (!plane_state->crtc) + return -EINVAL; + + clip.x1 = 0; + clip.y1 = 0; + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + + return drm_plane_helper_check_state(plane_state, &clip, + min_scale, max_scale, + true, true); +} + +static int zx_vl_get_fmt(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_NV12: + return VL_FMT_YUV420; + case DRM_FORMAT_YUV420: + return VL_YUV420_PLANAR | VL_FMT_YUV420; + case DRM_FORMAT_YUYV: + return VL_YUV422_YUYV | VL_FMT_YUV422; + case DRM_FORMAT_YVYU: + return VL_YUV422_YVYU | VL_FMT_YUV422; + case DRM_FORMAT_UYVY: + return VL_YUV422_UYVY | VL_FMT_YUV422; + case DRM_FORMAT_VYUY: + return VL_YUV422_VYUY | VL_FMT_YUV422; + case DRM_FORMAT_YUV444: + return VL_FMT_YUV444_8BIT; + default: + WARN_ONCE(1, "invalid pixel format %d\n", format); + return -EINVAL; + } +} + +static inline void zx_vl_set_update(struct zx_plane *zplane) +{ + void __iomem *layer = zplane->layer; + + zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE); +} + +static inline void zx_vl_rsz_set_update(struct zx_plane *zplane) +{ + zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1); +} + +static int zx_vl_rsz_get_fmt(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_YUV420: + return RSZ_VL_FMT_YCBCR420; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return RSZ_VL_FMT_YCBCR422; + case DRM_FORMAT_YUV444: + return RSZ_VL_FMT_YCBCR444; + default: + WARN_ONCE(1, "invalid pixel format %d\n", format); + return -EINVAL; + } +} + +static inline u32 rsz_step_value(u32 src, u32 dst) +{ + u32 val = 0; + + if (src == dst) + val = 0; + else if (src < dst) + val = RSZ_PARA_STEP((src << 16) / dst); + else if (src > dst) + val = RSZ_DATA_STEP(src / dst) | + RSZ_PARA_STEP(((src << 16) / dst) & 0xffff); + + return val; +} + +static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format, + u32 src_w, u32 src_h, u32 dst_w, u32 dst_h) +{ + void __iomem *rsz = zplane->rsz; + u32 src_chroma_w = src_w; + u32 src_chroma_h = src_h; + u32 fmt; + + /* Set up source and destination resolution */ + zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1)); + zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1)); + + /* Configure data format for VL RSZ */ + fmt = zx_vl_rsz_get_fmt(format); + if (fmt >= 0) + zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt); + + /* Calculate Chroma height and width */ + if (fmt == RSZ_VL_FMT_YCBCR420) { + src_chroma_w = src_w >> 1; + src_chroma_h = src_h >> 1; + } else if (fmt == RSZ_VL_FMT_YCBCR422) { + src_chroma_w = src_w >> 1; + } + + /* Set up Luma and Chroma step registers */ + zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w)); + zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h)); + zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w)); + zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h)); + + zx_vl_rsz_set_update(zplane); +} + +static void zx_vl_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct zx_plane *zplane = to_zx_plane(plane); + struct drm_plane_state *state = plane->state; + struct drm_framebuffer *fb = state->fb; + struct drm_rect *src = &state->src; + struct drm_rect *dst = &state->dst; + struct drm_gem_cma_object *cma_obj; + void __iomem *layer = zplane->layer; + void __iomem *hbsc = zplane->hbsc; + void __iomem *paddr_reg; + dma_addr_t paddr; + u32 src_x, src_y, src_w, src_h; + u32 dst_x, dst_y, dst_w, dst_h; + uint32_t format; + u32 fmt; + int num_planes; + int i; + + if (!fb) + return; + + format = fb->format->format; + + src_x = src->x1 >> 16; + src_y = src->y1 >> 16; + src_w = drm_rect_width(src) >> 16; + src_h = drm_rect_height(src) >> 16; + + dst_x = dst->x1; + dst_y = dst->y1; + dst_w = drm_rect_width(dst); + dst_h = drm_rect_height(dst); + + /* Set up data address registers for Y, Cb and Cr planes */ + num_planes = drm_format_num_planes(format); + paddr_reg = layer + VL_Y; + for (i = 0; i < num_planes; i++) { + cma_obj = drm_fb_cma_get_gem_obj(fb, i); + paddr = cma_obj->paddr + fb->offsets[i]; + paddr += src_y * fb->pitches[i]; + paddr += src_x * drm_format_plane_cpp(format, i); + zx_writel(paddr_reg, paddr); + paddr_reg += 4; + } + + /* Set up source height/width register */ + zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h)); + + /* Set up start position register */ + zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y)); + + /* Set up end position register */ + zx_writel(layer + VL_POS_END, + GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h)); + + /* Strides of Cb and Cr planes should be identical */ + zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) | + CHROMA_STRIDE(fb->pitches[1])); + + /* Set up video layer data format */ + fmt = zx_vl_get_fmt(format); + if (fmt >= 0) + zx_writel(layer + VL_CTRL1, fmt); + + /* Always use scaler since it exists (set for not bypass) */ + zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE, + VL_SCALER_BYPASS_MODE); + + zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h); + + /* Enable HBSC block */ + zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN); + + zx_vou_layer_enable(plane); + + zx_vl_set_update(zplane); +} + +static void zx_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct zx_plane *zplane = to_zx_plane(plane); + void __iomem *hbsc = zplane->hbsc; + + zx_vou_layer_disable(plane); + + /* Disable HBSC block */ + zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0); +} + +static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = { + .atomic_check = zx_vl_plane_atomic_check, + .atomic_update = zx_vl_plane_atomic_update, + .atomic_disable = zx_plane_atomic_disable, +}; + static int zx_gl_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *plane_state) { @@ -107,14 +352,6 @@ static inline void zx_gl_rsz_set_update(struct zx_plane *zplane) zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1); } -void zx_plane_set_update(struct drm_plane *plane) -{ - struct zx_plane *zplane = to_zx_plane(plane); - - zx_gl_rsz_set_update(zplane); - zx_gl_set_update(zplane); -} - static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h, u32 dst_w, u32 dst_h) { @@ -207,12 +444,15 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane, /* Enable HBSC block */ zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN); + zx_vou_layer_enable(plane); + zx_gl_set_update(zplane); } static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = { .atomic_check = zx_gl_plane_atomic_check, .atomic_update = zx_gl_plane_atomic_update, + .atomic_disable = zx_plane_atomic_disable, }; static void zx_plane_destroy(struct drm_plane *plane) @@ -230,6 +470,28 @@ static const struct drm_plane_funcs zx_plane_funcs = { .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; +void zx_plane_set_update(struct drm_plane *plane) +{ + struct zx_plane *zplane = to_zx_plane(plane); + + /* Do nothing if the plane is not enabled */ + if (!plane->state->crtc) + return; + + switch (plane->type) { + case DRM_PLANE_TYPE_PRIMARY: + zx_gl_rsz_set_update(zplane); + zx_gl_set_update(zplane); + break; + case DRM_PLANE_TYPE_OVERLAY: + zx_vl_rsz_set_update(zplane); + zx_vl_set_update(zplane); + break; + default: + WARN_ONCE(1, "unsupported plane type %d\n", plane->type); + } +} + static void zx_plane_hbsc_init(struct zx_plane *zplane) { void __iomem *hbsc = zplane->hbsc; @@ -248,28 +510,16 @@ static void zx_plane_hbsc_init(struct zx_plane *zplane) zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40); } -struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev, - struct zx_layer_data *data, - enum drm_plane_type type) +int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane, + enum drm_plane_type type) { const struct drm_plane_helper_funcs *helper; - struct zx_plane *zplane; - struct drm_plane *plane; + struct drm_plane *plane = &zplane->plane; + struct device *dev = zplane->dev; const uint32_t *formats; unsigned int format_count; int ret; - zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL); - if (!zplane) - return ERR_PTR(-ENOMEM); - - plane = &zplane->plane; - - zplane->layer = data->layer; - zplane->hbsc = data->hbsc; - zplane->csc = data->csc; - zplane->rsz = data->rsz; - zx_plane_hbsc_init(zplane); switch (type) { @@ -279,10 +529,12 @@ struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev, format_count = ARRAY_SIZE(gl_formats); break; case DRM_PLANE_TYPE_OVERLAY: - /* TODO: add video layer (vl) support */ + helper = &zx_vl_plane_helper_funcs; + formats = vl_formats; + format_count = ARRAY_SIZE(vl_formats); break; default: - return ERR_PTR(-ENODEV); + return -ENODEV; } ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK, @@ -290,10 +542,10 @@ struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev, type, NULL); if (ret) { DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret); - return ERR_PTR(ret); + return ret; } drm_plane_helper_add(plane, helper); - return plane; + return 0; } diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h index 2b82cd558d9d..933611ddffd0 100644 --- a/drivers/gpu/drm/zte/zx_plane.h +++ b/drivers/gpu/drm/zte/zx_plane.h @@ -11,16 +11,20 @@ #ifndef __ZX_PLANE_H__ #define __ZX_PLANE_H__ -struct zx_layer_data { +struct zx_plane { + struct drm_plane plane; + struct device *dev; void __iomem *layer; void __iomem *csc; void __iomem *hbsc; void __iomem *rsz; + const struct vou_layer_bits *bits; }; -struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev, - struct zx_layer_data *data, - enum drm_plane_type type); +#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane) + +int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane, + enum drm_plane_type type); void zx_plane_set_update(struct drm_plane *plane); #endif /* __ZX_PLANE_H__ */ diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h index 3dde6716a558..65f271aeabed 100644 --- a/drivers/gpu/drm/zte/zx_plane_regs.h +++ b/drivers/gpu/drm/zte/zx_plane_regs.h @@ -46,6 +46,37 @@ #define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK) #define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK) +/* VL registers */ +#define VL_CTRL0 0x00 +#define VL_UPDATE BIT(3) +#define VL_CTRL1 0x04 +#define VL_YUV420_PLANAR BIT(5) +#define VL_YUV422_SHIFT 3 +#define VL_YUV422_YUYV (0 << VL_YUV422_SHIFT) +#define VL_YUV422_YVYU (1 << VL_YUV422_SHIFT) +#define VL_YUV422_UYVY (2 << VL_YUV422_SHIFT) +#define VL_YUV422_VYUY (3 << VL_YUV422_SHIFT) +#define VL_FMT_YUV420 0 +#define VL_FMT_YUV422 1 +#define VL_FMT_YUV420_P010 2 +#define VL_FMT_YUV420_HANTRO 3 +#define VL_FMT_YUV444_8BIT 4 +#define VL_FMT_YUV444_10BIT 5 +#define VL_CTRL2 0x08 +#define VL_SCALER_BYPASS_MODE BIT(0) +#define VL_STRIDE 0x0c +#define LUMA_STRIDE_SHIFT 16 +#define LUMA_STRIDE_MASK (0xffff << LUMA_STRIDE_SHIFT) +#define CHROMA_STRIDE_SHIFT 0 +#define CHROMA_STRIDE_MASK (0xffff << CHROMA_STRIDE_SHIFT) +#define VL_SRC_SIZE 0x10 +#define VL_Y 0x14 +#define VL_POS_START 0x30 +#define VL_POS_END 0x34 + +#define LUMA_STRIDE(x) (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK) +#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK) + /* CSC registers */ #define CSC_CTRL0 0x30 #define CSC_COV_MODE_SHIFT 16 @@ -69,6 +100,18 @@ #define RSZ_DEST_CFG 0x04 #define RSZ_ENABLE_CFG 0x14 +#define RSZ_VL_LUMA_HOR 0x08 +#define RSZ_VL_LUMA_VER 0x0c +#define RSZ_VL_CHROMA_HOR 0x10 +#define RSZ_VL_CHROMA_VER 0x14 +#define RSZ_VL_CTRL_CFG 0x18 +#define RSZ_VL_FMT_SHIFT 3 +#define RSZ_VL_FMT_MASK (0x3 << RSZ_VL_FMT_SHIFT) +#define RSZ_VL_FMT_YCBCR420 (0x0 << RSZ_VL_FMT_SHIFT) +#define RSZ_VL_FMT_YCBCR422 (0x1 << RSZ_VL_FMT_SHIFT) +#define RSZ_VL_FMT_YCBCR444 (0x2 << RSZ_VL_FMT_SHIFT) +#define RSZ_VL_ENABLE_CFG 0x1c + #define RSZ_VER_SHIFT 16 #define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT) #define RSZ_HOR_SHIFT 0 @@ -77,6 +120,14 @@ #define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK) #define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK) +#define RSZ_DATA_STEP_SHIFT 16 +#define RSZ_DATA_STEP_MASK (0xffff << RSZ_DATA_STEP_SHIFT) +#define RSZ_PARA_STEP_SHIFT 0 +#define RSZ_PARA_STEP_MASK (0xffff << RSZ_PARA_STEP_SHIFT) + +#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK) +#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK) + /* HBSC registers */ #define HBSC_SATURATION 0x00 #define HBSC_HUE 0x04 diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c new file mode 100644 index 000000000000..b56dc69843fc --- /dev/null +++ b/drivers/gpu/drm/zte/zx_tvenc.c @@ -0,0 +1,407 @@ +/* + * Copyright 2017 Linaro Ltd. + * Copyright 2017 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drmP.h> + +#include "zx_drm_drv.h" +#include "zx_tvenc_regs.h" +#include "zx_vou.h" + +struct zx_tvenc_pwrctrl { + struct regmap *regmap; + u32 reg; + u32 mask; +}; + +struct zx_tvenc { + struct drm_connector connector; + struct drm_encoder encoder; + struct device *dev; + void __iomem *mmio; + const struct vou_inf *inf; + struct zx_tvenc_pwrctrl pwrctrl; +}; + +#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x) + +struct zx_tvenc_mode { + struct drm_display_mode mode; + u32 video_info; + u32 video_res; + u32 field1_param; + u32 field2_param; + u32 burst_line_odd1; + u32 burst_line_even1; + u32 burst_line_odd2; + u32 burst_line_even2; + u32 line_timing_param; + u32 weight_value; + u32 blank_black_level; + u32 burst_level; + u32 control_param; + u32 sub_carrier_phase1; + u32 phase_line_incr_cvbs; +}; + +/* + * The CRM cannot directly provide a suitable frequency, and we have to + * ask a multiplied rate from CRM and use the divider in VOU to get the + * desired one. + */ +#define TVENC_CLOCK_MULTIPLIER 4 + +static const struct zx_tvenc_mode tvenc_mode_pal = { + .mode = { + .clock = 13500 * TVENC_CLOCK_MULTIPLIER, + .hdisplay = 720, + .hsync_start = 720 + 12, + .hsync_end = 720 + 12 + 2, + .htotal = 720 + 12 + 2 + 130, + .vdisplay = 576, + .vsync_start = 576 + 2, + .vsync_end = 576 + 2 + 2, + .vtotal = 576 + 2 + 2 + 20, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE, + }, + .video_info = 0x00040040, + .video_res = 0x05a9c760, + .field1_param = 0x0004d416, + .field2_param = 0x0009b94f, + .burst_line_odd1 = 0x0004d406, + .burst_line_even1 = 0x0009b53e, + .burst_line_odd2 = 0x0004d805, + .burst_line_even2 = 0x0009b93f, + .line_timing_param = 0x06a96fdf, + .weight_value = 0x00c188a0, + .blank_black_level = 0x0000fcfc, + .burst_level = 0x00001595, + .control_param = 0x00000001, + .sub_carrier_phase1 = 0x1504c566, + .phase_line_incr_cvbs = 0xc068db8c, +}; + +static const struct zx_tvenc_mode tvenc_mode_ntsc = { + .mode = { + .clock = 13500 * TVENC_CLOCK_MULTIPLIER, + .hdisplay = 720, + .hsync_start = 720 + 16, + .hsync_end = 720 + 16 + 2, + .htotal = 720 + 16 + 2 + 120, + .vdisplay = 480, + .vsync_start = 480 + 3, + .vsync_end = 480 + 3 + 2, + .vtotal = 480 + 3 + 2 + 17, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | + DRM_MODE_FLAG_INTERLACE, + }, + .video_info = 0x00040080, + .video_res = 0x05a8375a, + .field1_param = 0x00041817, + .field2_param = 0x0008351e, + .burst_line_odd1 = 0x00041006, + .burst_line_even1 = 0x0008290d, + .burst_line_odd2 = 0x00000000, + .burst_line_even2 = 0x00000000, + .line_timing_param = 0x06a8ef9e, + .weight_value = 0x00b68197, + .blank_black_level = 0x0000f0f0, + .burst_level = 0x0000009c, + .control_param = 0x00000001, + .sub_carrier_phase1 = 0x10f83e10, + .phase_line_incr_cvbs = 0x80000000, +}; + +static const struct zx_tvenc_mode *tvenc_modes[] = { + &tvenc_mode_pal, + &tvenc_mode_ntsc, +}; + +static const struct zx_tvenc_mode * +zx_tvenc_find_zmode(struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) { + const struct zx_tvenc_mode *zmode = tvenc_modes[i]; + + if (drm_mode_equal(mode, &zmode->mode)) + return zmode; + } + + return NULL; +} + +static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + struct zx_tvenc *tvenc = to_zx_tvenc(encoder); + const struct zx_tvenc_mode *zmode; + struct vou_div_config configs[] = { + { VOU_DIV_INF, VOU_DIV_4 }, + { VOU_DIV_TVENC, VOU_DIV_1 }, + { VOU_DIV_LAYER, VOU_DIV_2 }, + }; + + zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs)); + + zmode = zx_tvenc_find_zmode(mode); + if (!zmode) { + DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n"); + return; + } + + zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info); + zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res); + zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param); + zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param); + zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1); + zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1); + zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2); + zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2); + zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM, + zmode->line_timing_param); + zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value); + zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL, + zmode->blank_black_level); + zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level); + zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param); + zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1, + zmode->sub_carrier_phase1); + zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS, + zmode->phase_line_incr_cvbs); +} + +static void zx_tvenc_encoder_enable(struct drm_encoder *encoder) +{ + struct zx_tvenc *tvenc = to_zx_tvenc(encoder); + struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl; + + /* Set bit to power up TVENC DAC */ + regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, + pwrctrl->mask); + + vou_inf_enable(VOU_TV_ENC, encoder->crtc); + + zx_writel(tvenc->mmio + VENC_ENABLE, 1); +} + +static void zx_tvenc_encoder_disable(struct drm_encoder *encoder) +{ + struct zx_tvenc *tvenc = to_zx_tvenc(encoder); + struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl; + + zx_writel(tvenc->mmio + VENC_ENABLE, 0); + + vou_inf_disable(VOU_TV_ENC, encoder->crtc); + + /* Clear bit to power down TVENC DAC */ + regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0); +} + +static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = { + .enable = zx_tvenc_encoder_enable, + .disable = zx_tvenc_encoder_disable, + .mode_set = zx_tvenc_encoder_mode_set, +}; + +static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int zx_tvenc_connector_get_modes(struct drm_connector *connector) +{ + struct zx_tvenc *tvenc = to_zx_tvenc(connector); + struct device *dev = tvenc->dev; + int i; + + for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) { + const struct zx_tvenc_mode *zmode = tvenc_modes[i]; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &zmode->mode); + if (!mode) { + DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n"); + continue; + } + + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + } + + return i; +} + +static enum drm_mode_status +zx_tvenc_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct zx_tvenc *tvenc = to_zx_tvenc(connector); + const struct zx_tvenc_mode *zmode; + + zmode = zx_tvenc_find_zmode(mode); + if (!zmode) { + DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name); + return MODE_NOMODE; + } + + return MODE_OK; +} + +static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = { + .get_modes = zx_tvenc_connector_get_modes, + .mode_valid = zx_tvenc_connector_mode_valid, +}; + +static const struct drm_connector_funcs zx_tvenc_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc) +{ + struct drm_encoder *encoder = &tvenc->encoder; + struct drm_connector *connector = &tvenc->connector; + + /* + * The tvenc is designed to use aux channel, as there is a deflicker + * block for the channel. + */ + encoder->possible_crtcs = BIT(1); + + drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs, + DRM_MODE_ENCODER_TVDAC, NULL); + drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs); + + connector->interlace_allowed = true; + + drm_connector_init(drm, connector, &zx_tvenc_connector_funcs, + DRM_MODE_CONNECTOR_Composite); + drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs); + + drm_mode_connector_attach_encoder(connector, encoder); + + return 0; +} + +static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc) +{ + struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl; + struct device *dev = tvenc->dev; + struct of_phandle_args out_args; + struct regmap *regmap; + int ret; + + ret = of_parse_phandle_with_fixed_args(dev->of_node, + "zte,tvenc-power-control", 2, 0, &out_args); + if (ret) + return ret; + + regmap = syscon_node_to_regmap(out_args.np); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto out; + } + + pwrctrl->regmap = regmap; + pwrctrl->reg = out_args.args[0]; + pwrctrl->mask = out_args.args[1]; + +out: + of_node_put(out_args.np); + return ret; +} + +static int zx_tvenc_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = data; + struct resource *res; + struct zx_tvenc *tvenc; + int ret; + + tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL); + if (!tvenc) + return -ENOMEM; + + tvenc->dev = dev; + dev_set_drvdata(dev, tvenc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + tvenc->mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(tvenc->mmio)) { + ret = PTR_ERR(tvenc->mmio); + DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret); + return ret; + } + + ret = zx_tvenc_pwrctrl_init(tvenc); + if (ret) { + DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret); + return ret; + } + + ret = zx_tvenc_register(drm, tvenc); + if (ret) { + DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret); + return ret; + } + + return 0; +} + +static void zx_tvenc_unbind(struct device *dev, struct device *master, + void *data) +{ + /* Nothing to do */ +} + +static const struct component_ops zx_tvenc_component_ops = { + .bind = zx_tvenc_bind, + .unbind = zx_tvenc_unbind, +}; + +static int zx_tvenc_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &zx_tvenc_component_ops); +} + +static int zx_tvenc_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &zx_tvenc_component_ops); + return 0; +} + +static const struct of_device_id zx_tvenc_of_match[] = { + { .compatible = "zte,zx296718-tvenc", }, + { /* end */ }, +}; +MODULE_DEVICE_TABLE(of, zx_tvenc_of_match); + +struct platform_driver zx_tvenc_driver = { + .probe = zx_tvenc_probe, + .remove = zx_tvenc_remove, + .driver = { + .name = "zx-tvenc", + .of_match_table = zx_tvenc_of_match, + }, +}; diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h new file mode 100644 index 000000000000..bd91f5dcc1f3 --- /dev/null +++ b/drivers/gpu/drm/zte/zx_tvenc_regs.h @@ -0,0 +1,31 @@ +/* + * Copyright 2017 Linaro Ltd. + * Copyright 2017 ZTE Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ZX_TVENC_REGS_H__ +#define __ZX_TVENC_REGS_H__ + +#define VENC_VIDEO_INFO 0x04 +#define VENC_VIDEO_RES 0x08 +#define VENC_FIELD1_PARAM 0x10 +#define VENC_FIELD2_PARAM 0x14 +#define VENC_LINE_O_1 0x18 +#define VENC_LINE_E_1 0x1c +#define VENC_LINE_O_2 0x20 +#define VENC_LINE_E_2 0x24 +#define VENC_LINE_TIMING_PARAM 0x28 +#define VENC_WEIGHT_VALUE 0x2c +#define VENC_BLANK_BLACK_LEVEL 0x30 +#define VENC_BURST_LEVEL 0x34 +#define VENC_CONTROL_PARAM 0x3c +#define VENC_SUB_CARRIER_PHASE1 0x40 +#define VENC_PHASE_LINE_INCR_CVBS 0x48 +#define VENC_ENABLE 0xa8 + +#endif /* __ZX_TVENC_REGS_H__ */ diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c index a86e3a5852a2..cf92d675feaa 100644 --- a/drivers/gpu/drm/zte/zx_vou.c +++ b/drivers/gpu/drm/zte/zx_vou.c @@ -40,6 +40,7 @@ struct zx_crtc_regs { u32 fir_active; u32 fir_htiming; u32 fir_vtiming; + u32 sec_vtiming; u32 timing_shift; u32 timing_pi_shift; }; @@ -48,6 +49,7 @@ static const struct zx_crtc_regs main_crtc_regs = { .fir_active = FIR_MAIN_ACTIVE, .fir_htiming = FIR_MAIN_H_TIMING, .fir_vtiming = FIR_MAIN_V_TIMING, + .sec_vtiming = SEC_MAIN_V_TIMING, .timing_shift = TIMING_MAIN_SHIFT, .timing_pi_shift = TIMING_MAIN_PI_SHIFT, }; @@ -56,6 +58,7 @@ static const struct zx_crtc_regs aux_crtc_regs = { .fir_active = FIR_AUX_ACTIVE, .fir_htiming = FIR_AUX_H_TIMING, .fir_vtiming = FIR_AUX_V_TIMING, + .sec_vtiming = SEC_AUX_V_TIMING, .timing_shift = TIMING_AUX_SHIFT, .timing_pi_shift = TIMING_AUX_PI_SHIFT, }; @@ -65,7 +68,17 @@ struct zx_crtc_bits { u32 polarity_shift; u32 int_frame_mask; u32 tc_enable; - u32 gl_enable; + u32 sec_vactive_shift; + u32 sec_vactive_mask; + u32 interlace_select; + u32 pi_enable; + u32 div_vga_shift; + u32 div_pic_shift; + u32 div_tvenc_shift; + u32 div_hdmi_pnx_shift; + u32 div_hdmi_shift; + u32 div_inf_shift; + u32 div_layer_shift; }; static const struct zx_crtc_bits main_crtc_bits = { @@ -73,7 +86,17 @@ static const struct zx_crtc_bits main_crtc_bits = { .polarity_shift = MAIN_POL_SHIFT, .int_frame_mask = TIMING_INT_MAIN_FRAME, .tc_enable = MAIN_TC_EN, - .gl_enable = OSD_CTRL0_GL0_EN, + .sec_vactive_shift = SEC_VACT_MAIN_SHIFT, + .sec_vactive_mask = SEC_VACT_MAIN_MASK, + .interlace_select = MAIN_INTERLACE_SEL, + .pi_enable = MAIN_PI_EN, + .div_vga_shift = VGA_MAIN_DIV_SHIFT, + .div_pic_shift = PIC_MAIN_DIV_SHIFT, + .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT, + .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT, + .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT, + .div_inf_shift = INF_MAIN_DIV_SHIFT, + .div_layer_shift = LAYER_MAIN_DIV_SHIFT, }; static const struct zx_crtc_bits aux_crtc_bits = { @@ -81,7 +104,17 @@ static const struct zx_crtc_bits aux_crtc_bits = { .polarity_shift = AUX_POL_SHIFT, .int_frame_mask = TIMING_INT_AUX_FRAME, .tc_enable = AUX_TC_EN, - .gl_enable = OSD_CTRL0_GL1_EN, + .sec_vactive_shift = SEC_VACT_AUX_SHIFT, + .sec_vactive_mask = SEC_VACT_AUX_MASK, + .interlace_select = AUX_INTERLACE_SEL, + .pi_enable = AUX_PI_EN, + .div_vga_shift = VGA_AUX_DIV_SHIFT, + .div_pic_shift = PIC_AUX_DIV_SHIFT, + .div_tvenc_shift = TVENC_AUX_DIV_SHIFT, + .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT, + .div_hdmi_shift = HDMI_AUX_DIV_SHIFT, + .div_inf_shift = INF_AUX_DIV_SHIFT, + .div_layer_shift = LAYER_AUX_DIV_SHIFT, }; struct zx_crtc { @@ -97,6 +130,40 @@ struct zx_crtc { #define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc) +struct vou_layer_bits { + u32 enable; + u32 chnsel; + u32 clksel; +}; + +static const struct vou_layer_bits zx_gl_bits[GL_NUM] = { + { + .enable = OSD_CTRL0_GL0_EN, + .chnsel = OSD_CTRL0_GL0_SEL, + .clksel = VOU_CLK_GL0_SEL, + }, { + .enable = OSD_CTRL0_GL1_EN, + .chnsel = OSD_CTRL0_GL1_SEL, + .clksel = VOU_CLK_GL1_SEL, + }, +}; + +static const struct vou_layer_bits zx_vl_bits[VL_NUM] = { + { + .enable = OSD_CTRL0_VL0_EN, + .chnsel = OSD_CTRL0_VL0_SEL, + .clksel = VOU_CLK_VL0_SEL, + }, { + .enable = OSD_CTRL0_VL1_EN, + .chnsel = OSD_CTRL0_VL1_SEL, + .clksel = VOU_CLK_VL1_SEL, + }, { + .enable = OSD_CTRL0_VL2_EN, + .chnsel = OSD_CTRL0_VL2_SEL, + .clksel = VOU_CLK_VL2_SEL, + }, +}; + struct zx_vou_hw { struct device *dev; void __iomem *osd; @@ -112,6 +179,33 @@ struct zx_vou_hw { struct zx_crtc *aux_crtc; }; +enum vou_inf_data_sel { + VOU_YUV444 = 0, + VOU_RGB_101010 = 1, + VOU_RGB_888 = 2, + VOU_RGB_666 = 3, +}; + +struct vou_inf { + enum vou_inf_id id; + enum vou_inf_data_sel data_sel; + u32 clocks_en_bits; + u32 clocks_sel_bits; +}; + +static struct vou_inf vou_infs[] = { + [VOU_HDMI] = { + .data_sel = VOU_YUV444, + .clocks_en_bits = BIT(24) | BIT(18) | BIT(6), + .clocks_sel_bits = BIT(13) | BIT(2), + }, + [VOU_TV_ENC] = { + .data_sel = VOU_YUV444, + .clocks_en_bits = BIT(15), + .clocks_sel_bits = BIT(11) | BIT(0), + }, +}; + static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc) { struct zx_crtc *zcrtc = to_zx_crtc(crtc); @@ -119,20 +213,30 @@ static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc) return zcrtc->vou; } -void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc) +void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc, + enum vou_inf_hdmi_audio aud) { struct zx_crtc *zcrtc = to_zx_crtc(crtc); struct zx_vou_hw *vou = zcrtc->vou; + + zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud); +} + +void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc) +{ + struct zx_crtc *zcrtc = to_zx_crtc(crtc); + struct zx_vou_hw *vou = zcrtc->vou; + struct vou_inf *inf = &vou_infs[id]; bool is_main = zcrtc->chn_type == VOU_CHN_MAIN; - u32 data_sel_shift = inf->id << 1; + u32 data_sel_shift = id << 1; /* Select data format */ zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift, inf->data_sel << data_sel_shift); /* Select channel */ - zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id, - zcrtc->chn_type << inf->id); + zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id, + zcrtc->chn_type << id); /* Select interface clocks */ zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits, @@ -143,20 +247,79 @@ void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc) inf->clocks_en_bits); /* Enable the device */ - zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id); + zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id); } -void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc) +void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc) { struct zx_vou_hw *vou = crtc_to_vou(crtc); + struct vou_inf *inf = &vou_infs[id]; /* Disable the device */ - zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0); + zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0); /* Disable interface clocks */ zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0); } +void zx_vou_config_dividers(struct drm_crtc *crtc, + struct vou_div_config *configs, int num) +{ + struct zx_crtc *zcrtc = to_zx_crtc(crtc); + struct zx_vou_hw *vou = zcrtc->vou; + const struct zx_crtc_bits *bits = zcrtc->bits; + int i; + + /* Clear update flag bit */ + zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0); + + for (i = 0; i < num; i++) { + struct vou_div_config *cfg = configs + i; + u32 reg, shift; + + switch (cfg->id) { + case VOU_DIV_VGA: + reg = VOU_CLK_SEL; + shift = bits->div_vga_shift; + break; + case VOU_DIV_PIC: + reg = VOU_CLK_SEL; + shift = bits->div_pic_shift; + break; + case VOU_DIV_TVENC: + reg = VOU_DIV_PARA; + shift = bits->div_tvenc_shift; + break; + case VOU_DIV_HDMI_PNX: + reg = VOU_DIV_PARA; + shift = bits->div_hdmi_pnx_shift; + break; + case VOU_DIV_HDMI: + reg = VOU_DIV_PARA; + shift = bits->div_hdmi_shift; + break; + case VOU_DIV_INF: + reg = VOU_DIV_PARA; + shift = bits->div_inf_shift; + break; + case VOU_DIV_LAYER: + reg = VOU_DIV_PARA; + shift = bits->div_layer_shift; + break; + default: + continue; + } + + /* Each divider occupies 3 bits */ + zx_writel_mask(vou->vouctl + reg, 0x7 << shift, + cfg->val << shift); + } + + /* Set update flag bit to get dividers effected */ + zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, + DIV_PARA_UPDATE); +} + static inline void vou_chn_set_update(struct zx_crtc *zcrtc) { zx_writel(zcrtc->chnreg + CHN_UPDATE, 1); @@ -165,11 +328,13 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc) static void zx_crtc_enable(struct drm_crtc *crtc) { struct drm_display_mode *mode = &crtc->state->adjusted_mode; + bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; struct zx_crtc *zcrtc = to_zx_crtc(crtc); struct zx_vou_hw *vou = zcrtc->vou; const struct zx_crtc_regs *regs = zcrtc->regs; const struct zx_crtc_bits *bits = zcrtc->bits; struct videomode vm; + u32 scan_mask; u32 pol = 0; u32 val; int ret; @@ -177,7 +342,7 @@ static void zx_crtc_enable(struct drm_crtc *crtc) drm_display_mode_to_videomode(mode, &vm); /* Set up timing parameters */ - val = V_ACTIVE(vm.vactive - 1); + val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1); val |= H_ACTIVE(vm.hactive - 1); zx_writel(vou->timing + regs->fir_active, val); @@ -191,6 +356,25 @@ static void zx_crtc_enable(struct drm_crtc *crtc) val |= FRONT_PORCH(vm.vfront_porch - 1); zx_writel(vou->timing + regs->fir_vtiming, val); + if (interlaced) { + u32 shift = bits->sec_vactive_shift; + u32 mask = bits->sec_vactive_mask; + + val = zx_readl(vou->timing + SEC_V_ACTIVE); + val &= ~mask; + val |= ((vm.vactive / 2 - 1) << shift) & mask; + zx_writel(vou->timing + SEC_V_ACTIVE, val); + + val = SYNC_WIDE(vm.vsync_len - 1); + /* + * The vback_porch for the second field needs to shift one on + * the value for the first field. + */ + val |= BACK_PORCH(vm.vback_porch); + val |= FRONT_PORCH(vm.vfront_porch - 1); + zx_writel(vou->timing + regs->sec_vtiming, val); + } + /* Set up polarities */ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW) pol |= 1 << POL_VSYNC_SHIFT; @@ -201,9 +385,17 @@ static void zx_crtc_enable(struct drm_crtc *crtc) pol << bits->polarity_shift); /* Setup SHIFT register by following what ZTE BSP does */ - zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL); + val = H_SHIFT_VAL; + if (interlaced) + val |= V_SHIFT_VAL << 16; + zx_writel(vou->timing + regs->timing_shift, val); zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL); + /* Progressive or interlace scan select */ + scan_mask = bits->interlace_select | bits->pi_enable; + zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask, + interlaced ? scan_mask : 0); + /* Enable TIMING_CTRL */ zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, bits->tc_enable); @@ -214,16 +406,16 @@ static void zx_crtc_enable(struct drm_crtc *crtc) zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK, vm.vactive << CHN_SCREEN_H_SHIFT); + /* Configure channel interlace buffer control */ + zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN, + interlaced ? CHN_INTERLACE_EN : 0); + /* Update channel */ vou_chn_set_update(zcrtc); /* Enable channel */ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE); - /* Enable Graphic Layer */ - zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, - bits->gl_enable); - drm_crtc_vblank_on(crtc); ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000); @@ -247,9 +439,6 @@ static void zx_crtc_disable(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); - /* Disable Graphic Layer */ - zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0); - /* Disable channel */ zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0); @@ -294,7 +483,7 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou, enum vou_chn_type chn_type) { struct device *dev = vou->dev; - struct zx_layer_data data; + struct zx_plane *zplane; struct zx_crtc *zcrtc; int ret; @@ -305,19 +494,27 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou, zcrtc->vou = vou; zcrtc->chn_type = chn_type; + zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL); + if (!zplane) + return -ENOMEM; + + zplane->dev = dev; + if (chn_type == VOU_CHN_MAIN) { - data.layer = vou->osd + MAIN_GL_OFFSET; - data.csc = vou->osd + MAIN_CSC_OFFSET; - data.hbsc = vou->osd + MAIN_HBSC_OFFSET; - data.rsz = vou->otfppu + MAIN_RSZ_OFFSET; + zplane->layer = vou->osd + MAIN_GL_OFFSET; + zplane->csc = vou->osd + MAIN_CSC_OFFSET; + zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET; + zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET; + zplane->bits = &zx_gl_bits[0]; zcrtc->chnreg = vou->osd + OSD_MAIN_CHN; zcrtc->regs = &main_crtc_regs; zcrtc->bits = &main_crtc_bits; } else { - data.layer = vou->osd + AUX_GL_OFFSET; - data.csc = vou->osd + AUX_CSC_OFFSET; - data.hbsc = vou->osd + AUX_HBSC_OFFSET; - data.rsz = vou->otfppu + AUX_RSZ_OFFSET; + zplane->layer = vou->osd + AUX_GL_OFFSET; + zplane->csc = vou->osd + AUX_CSC_OFFSET; + zplane->hbsc = vou->osd + AUX_HBSC_OFFSET; + zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET; + zplane->bits = &zx_gl_bits[1]; zcrtc->chnreg = vou->osd + OSD_AUX_CHN; zcrtc->regs = &aux_crtc_regs; zcrtc->bits = &aux_crtc_bits; @@ -331,13 +528,14 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou, return ret; } - zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY); - if (IS_ERR(zcrtc->primary)) { - ret = PTR_ERR(zcrtc->primary); + ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY); + if (ret) { DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret); return ret; } + zcrtc->primary = &zplane->plane; + ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL, &zx_crtc_funcs, NULL); if (ret) { @@ -393,6 +591,78 @@ void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe) zcrtc->bits->int_frame_mask, 0); } +void zx_vou_layer_enable(struct drm_plane *plane) +{ + struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc); + struct zx_vou_hw *vou = zcrtc->vou; + struct zx_plane *zplane = to_zx_plane(plane); + const struct vou_layer_bits *bits = zplane->bits; + + if (zcrtc->chn_type == VOU_CHN_MAIN) { + zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0); + zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0); + } else { + zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, + bits->chnsel); + zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, + bits->clksel); + } + + zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable); +} + +void zx_vou_layer_disable(struct drm_plane *plane) +{ + struct zx_crtc *zcrtc = to_zx_crtc(plane->crtc); + struct zx_vou_hw *vou = zcrtc->vou; + struct zx_plane *zplane = to_zx_plane(plane); + const struct vou_layer_bits *bits = zplane->bits; + + zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0); +} + +static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou) +{ + struct device *dev = vou->dev; + struct zx_plane *zplane; + int i; + int ret; + + /* + * VL0 has some quirks on scaling support which need special handling. + * Let's leave it out for now. + */ + for (i = 1; i < VL_NUM; i++) { + zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL); + if (!zplane) { + DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i); + return; + } + + zplane->layer = vou->osd + OSD_VL_OFFSET(i); + zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i); + zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i); + zplane->bits = &zx_vl_bits[i]; + + ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY); + if (ret) { + DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i); + continue; + } + } +} + +static inline void zx_osd_int_update(struct zx_crtc *zcrtc) +{ + struct drm_crtc *crtc = &zcrtc->crtc; + struct drm_plane *plane; + + vou_chn_set_update(zcrtc); + + drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask) + zx_plane_set_update(plane); +} + static irqreturn_t vou_irq_handler(int irq, void *dev_id) { struct zx_vou_hw *vou = dev_id; @@ -412,15 +682,11 @@ static irqreturn_t vou_irq_handler(int irq, void *dev_id) state = zx_readl(vou->osd + OSD_INT_STA); zx_writel(vou->osd + OSD_INT_CLRSTA, state); - if (state & OSD_INT_MAIN_UPT) { - vou_chn_set_update(vou->main_crtc); - zx_plane_set_update(vou->main_crtc->primary); - } + if (state & OSD_INT_MAIN_UPT) + zx_osd_int_update(vou->main_crtc); - if (state & OSD_INT_AUX_UPT) { - vou_chn_set_update(vou->aux_crtc); - zx_plane_set_update(vou->aux_crtc->primary); - } + if (state & OSD_INT_AUX_UPT) + zx_osd_int_update(vou->aux_crtc); if (state & OSD_INT_ERROR) DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state); @@ -451,19 +717,9 @@ static void vou_dtrc_init(struct zx_vou_hw *vou) static void vou_hw_init(struct zx_vou_hw *vou) { - /* Set GL0 to main channel and GL1 to aux channel */ - zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0); - zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL, - OSD_CTRL0_GL1_SEL); - /* Release reset for all VOU modules */ zx_writel(vou->vouctl + VOU_SOFT_RST, ~0); - /* Select main clock for GL0 and aux clock for GL1 module */ - zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0); - zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL, - VOU_CLK_GL1_SEL); - /* Enable clock auto-gating for all VOU modules */ zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0); @@ -600,6 +856,8 @@ static int zx_crtc_bind(struct device *dev, struct device *master, void *data) goto disable_ppu_clk; } + zx_overlay_init(drm, vou); + return 0; disable_ppu_clk: diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h index 349e06cd86f4..57e3c31ee6a5 100644 --- a/drivers/gpu/drm/zte/zx_vou.h +++ b/drivers/gpu/drm/zte/zx_vou.h @@ -23,24 +23,48 @@ enum vou_inf_id { VOU_VGA = 5, }; -enum vou_inf_data_sel { - VOU_YUV444 = 0, - VOU_RGB_101010 = 1, - VOU_RGB_888 = 2, - VOU_RGB_666 = 3, +enum vou_inf_hdmi_audio { + VOU_HDMI_AUD_SPDIF = BIT(0), + VOU_HDMI_AUD_I2S = BIT(1), + VOU_HDMI_AUD_DSD = BIT(2), + VOU_HDMI_AUD_HBR = BIT(3), + VOU_HDMI_AUD_PARALLEL = BIT(4), }; -struct vou_inf { - enum vou_inf_id id; - enum vou_inf_data_sel data_sel; - u32 clocks_en_bits; - u32 clocks_sel_bits; +void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc, + enum vou_inf_hdmi_audio aud); +void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc); +void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc); + +enum vou_div_id { + VOU_DIV_VGA, + VOU_DIV_PIC, + VOU_DIV_TVENC, + VOU_DIV_HDMI_PNX, + VOU_DIV_HDMI, + VOU_DIV_INF, + VOU_DIV_LAYER, +}; + +enum vou_div_val { + VOU_DIV_1 = 0, + VOU_DIV_2 = 1, + VOU_DIV_4 = 3, + VOU_DIV_8 = 7, }; -void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc); -void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc); +struct vou_div_config { + enum vou_div_id id; + enum vou_div_val val; +}; + +void zx_vou_config_dividers(struct drm_crtc *crtc, + struct vou_div_config *configs, int num); int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe); void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe); +void zx_vou_layer_enable(struct drm_plane *plane); +void zx_vou_layer_disable(struct drm_plane *plane); + #endif /* __ZX_VOU_H__ */ diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h index f44e7a4ae441..c066ef123434 100644 --- a/drivers/gpu/drm/zte/zx_vou_regs.h +++ b/drivers/gpu/drm/zte/zx_vou_regs.h @@ -22,6 +22,15 @@ #define AUX_HBSC_OFFSET 0x860 #define AUX_RSZ_OFFSET 0x800 +#define OSD_VL0_OFFSET 0x040 +#define OSD_VL_OFFSET(i) (OSD_VL0_OFFSET + 0x050 * (i)) + +#define HBSC_VL0_OFFSET 0x760 +#define HBSC_VL_OFFSET(i) (HBSC_VL0_OFFSET + 0x040 * (i)) + +#define RSZ_VL1_U0 0xa00 +#define RSZ_VL_OFFSET(i) (RSZ_VL1_U0 + 0x200 * (i)) + /* OSD (GPC_GLOBAL) registers */ #define OSD_INT_STA 0x04 #define OSD_INT_CLRSTA 0x08 @@ -42,6 +51,12 @@ ) #define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT) #define OSD_CTRL0 0x10 +#define OSD_CTRL0_VL0_EN BIT(13) +#define OSD_CTRL0_VL0_SEL BIT(12) +#define OSD_CTRL0_VL1_EN BIT(11) +#define OSD_CTRL0_VL1_SEL BIT(10) +#define OSD_CTRL0_VL2_EN BIT(9) +#define OSD_CTRL0_VL2_SEL BIT(8) #define OSD_CTRL0_GL0_EN BIT(7) #define OSD_CTRL0_GL0_SEL BIT(6) #define OSD_CTRL0_GL1_EN BIT(5) @@ -60,6 +75,8 @@ #define CHN_SCREEN_H_SHIFT 5 #define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT) #define CHN_UPDATE 0x08 +#define CHN_INTERLACE_BUF_CTRL 0x24 +#define CHN_INTERLACE_EN BIT(2) /* TIMING_CTRL registers */ #define TIMING_TC_ENABLE 0x04 @@ -102,6 +119,19 @@ #define TIMING_MAIN_SHIFT 0x2c #define TIMING_AUX_SHIFT 0x30 #define H_SHIFT_VAL 0x0048 +#define V_SHIFT_VAL 0x0001 +#define SCAN_CTRL 0x34 +#define AUX_PI_EN BIT(19) +#define MAIN_PI_EN BIT(18) +#define AUX_INTERLACE_SEL BIT(1) +#define MAIN_INTERLACE_SEL BIT(0) +#define SEC_V_ACTIVE 0x38 +#define SEC_VACT_MAIN_SHIFT 0 +#define SEC_VACT_MAIN_MASK (0xffff << SEC_VACT_MAIN_SHIFT) +#define SEC_VACT_AUX_SHIFT 16 +#define SEC_VACT_AUX_MASK (0xffff << SEC_VACT_AUX_SHIFT) +#define SEC_MAIN_V_TIMING 0x3c +#define SEC_AUX_V_TIMING 0x40 #define TIMING_MAIN_PI_SHIFT 0x68 #define TIMING_AUX_PI_SHIFT 0x6c #define H_PI_SHIFT_VAL 0x000f @@ -146,10 +176,31 @@ #define VOU_INF_DATA_SEL 0x08 #define VOU_SOFT_RST 0x14 #define VOU_CLK_SEL 0x18 +#define VGA_AUX_DIV_SHIFT 29 +#define VGA_MAIN_DIV_SHIFT 26 +#define PIC_MAIN_DIV_SHIFT 23 +#define PIC_AUX_DIV_SHIFT 20 +#define VOU_CLK_VL2_SEL BIT(8) +#define VOU_CLK_VL1_SEL BIT(7) +#define VOU_CLK_VL0_SEL BIT(6) #define VOU_CLK_GL1_SEL BIT(5) #define VOU_CLK_GL0_SEL BIT(4) +#define VOU_DIV_PARA 0x1c +#define DIV_PARA_UPDATE BIT(31) +#define TVENC_AUX_DIV_SHIFT 28 +#define HDMI_AUX_PNX_DIV_SHIFT 25 +#define HDMI_MAIN_PNX_DIV_SHIFT 22 +#define HDMI_AUX_DIV_SHIFT 19 +#define HDMI_MAIN_DIV_SHIFT 16 +#define TVENC_MAIN_DIV_SHIFT 13 +#define INF_AUX_DIV_SHIFT 9 +#define INF_MAIN_DIV_SHIFT 6 +#define LAYER_AUX_DIV_SHIFT 3 +#define LAYER_MAIN_DIV_SHIFT 0 #define VOU_CLK_REQEN 0x20 #define VOU_CLK_EN 0x24 +#define VOU_INF_HDMI_CTRL 0x30 +#define VOU_HDMI_AUD_MASK 0x1f /* OTFPPU_CTRL registers */ #define OTFPPU_RSZ_DATA_SOURCE 0x04 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index cff060b56da9..ea36b557d5ee 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 717704e9ae07..c0303f61c26a 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev) struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int brightness; - char data[8]; + char *data; + + data = kmalloc(8, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 5) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } brightness = data[4]; if (brightness < 0 || brightness > 3) { dev_warn(dev, "Read invalid backlight brightness: %02hhx.\n", data[4]); - return -EIO; + ret = -EIO; + goto out; } - return brightness; + ret = brightness; +out: + kfree(data); + + return ret; } static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) @@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev, struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); const char *macro_mode; - char data[8]; + char *data; + + data = kmalloc(2, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_GET_MODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 2, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 1) { dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } switch (data[0]) { @@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev, default: dev_warn(dev, "K90 in unknown mode: %02hhx.\n", data[0]); - return -EIO; + ret = -EIO; + goto out; } - return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); + ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); +out: + kfree(data); + + return ret; } static ssize_t k90_store_macro_mode(struct device *dev, @@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev, struct usb_interface *usbif = to_usb_interface(dev->parent); struct usb_device *usbdev = interface_to_usbdev(usbif); int current_profile; - char data[8]; + char *data; + + data = kmalloc(8, GFP_KERNEL); + if (!data) + return -ENOMEM; ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), K90_REQUEST_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, 8, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + if (ret < 8) { dev_warn(dev, "Failed to get K90 initial state (error %d).\n", ret); - return -EIO; + ret = -EIO; + goto out; } current_profile = data[7]; if (current_profile < 1 || current_profile > 3) { dev_warn(dev, "Read invalid current profile: %02hhx.\n", data[7]); - return -EIO; + ret = -EIO; + goto out; } - return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); + ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile); +out: + kfree(data); + + return ret; } static ssize_t k90_store_current_profile(struct device *dev, diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c index 1b764d1745f3..1689568b597d 100644 --- a/drivers/hid/hid-cypress.c +++ b/drivers/hid/hid-cypress.c @@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) return rdesc; + if (*rsize < 4) + return rdesc; + for (i = 0; i < *rsize - 4; i++) if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { rdesc[i] = 0x19; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 54bd22dc1411..f46f2c5117fa 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -816,6 +816,9 @@ #define USB_VENDOR_ID_PETALYNX 0x18b1 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 +#define USB_VENDOR_ID_PETZL 0x2122 +#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234 + #define USB_VENDOR_ID_PHILIPS 0x0471 #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 78fb32a7b103..ea3c3546cef7 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index b9779bcbd140..8aeca038cc73 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev) return retval; } + if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) + wacom_wac->shared->touch = hdev; + else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) + wacom_wac->shared->pen = hdev; + out: mutex_unlock(&wacom_udev_list_lock); return retval; @@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) if (error) goto fail; - error = wacom_add_shared_data(hdev); - if (error) - goto fail; - /* * Bamboo Pad has a generic hid handling for the Pen, and we switch it * into debug mode for the touch part. @@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) wacom_update_name(wacom, wireless ? " (WL)" : ""); - if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) - wacom_wac->shared->touch = hdev; - else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) - wacom_wac->shared->pen = hdev; + error = wacom_add_shared_data(hdev); + if (error) + goto fail; if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) && (features->quirks & WACOM_QUIRK_BATTERY)) { diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index b1a9a3ca6d56..0884dc9554fd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) wacom_report_events(hdev, report); + /* + * Non-input reports may be sent prior to the device being + * completely initialized. Since only their events need + * to be processed, exit after 'wacom_report_events' has + * been called to prevent potential crashes in the report- + * processing functions. + */ + if (report->type != HID_INPUT_REPORT) + return; + if (WACOM_PAD_FIELD(field)) { wacom_wac_pad_battery_report(hdev, report); if (wacom->wacom_wac.pad_input) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index c2268cdf38e8..e34d82e79b98 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, u8 command, int size, union i2c_smbus_data *data) { struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); + unsigned short piix4_smba = adapdata->smba; + int retries = MAX_TIMEOUT; + int smbslvcnt; u8 smba_en_lo; u8 port; int retval; + /* Request the SMBUS semaphore, avoid conflicts with the IMC */ + smbslvcnt = inb_p(SMBSLVCNT); + do { + outb_p(smbslvcnt | 0x10, SMBSLVCNT); + + /* Check the semaphore status */ + smbslvcnt = inb_p(SMBSLVCNT); + if (smbslvcnt & 0x10) + break; + + usleep_range(1000, 2000); + } while (--retries); + /* SMBus is still owned by the IMC, we give up */ + if (!retries) + return -EBUSY; + mutex_lock(&piix4_mutex_sb800); outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); @@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, mutex_unlock(&piix4_mutex_sb800); + /* Release the semaphore */ + outb_p(smbslvcnt | 0x20, SMBSLVCNT); + return retval; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index cf9e396d7702..583e95042a21 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev) if (!client->irq) { int irq = -ENOENT; - if (dev->of_node) { + if (client->flags & I2C_CLIENT_HOST_NOTIFY) { + dev_dbg(dev, "Using Host Notify IRQ\n"); + irq = i2c_smbus_host_notify_to_irq(client); + } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); if (irq == -EINVAL || irq == -ENODATA) irq = of_irq_get(dev->of_node, 0); @@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev) } if (irq == -EPROBE_DEFER) return irq; - /* - * ACPI and OF did not find any useful IRQ, try to see - * if Host Notify can be used. - */ - if (irq < 0) { - dev_dbg(dev, "Using Host Notify IRQ\n"); - irq = i2c_smbus_host_notify_to_irq(client); - } + if (irq < 0) irq = 0; @@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, if (i2c_check_addr_validity(addr, info.flags)) { dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", - info.addr, node->full_name); + addr, node->full_name); return ERR_PTR(-EINVAL); } @@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, info.of_node = of_node_get(node); info.archdata = &dev_ad; + if (of_property_read_bool(node, "host-notify")) + info.flags |= I2C_CLIENT_HOST_NOTIFY; + if (of_get_property(node, "wakeup-source", NULL)) info.flags |= I2C_CLIENT_WAKE; @@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) int ret; if (!client || !slave_cb) { - WARN(1, "insufficent data\n"); + WARN(1, "insufficient data\n"); return -EINVAL; } diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index 66f323fd3982..6f638bbc922d 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, unsigned long arg) { struct i2c_smbus_ioctl_data data_arg; - union i2c_smbus_data temp; + union i2c_smbus_data temp = {}; int datasize, res; if (copy_from_user(&data_arg, diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index f3135ae22df4..abd18f31b24f 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -22,7 +22,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> -#include <linux/miscdevice.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/init.h> diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6d9499658671..c7d5b2b643d1 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad) input_dev->name = xpad->name; input_dev->phys = xpad->phys; usb_to_input_id(xpad->udev, &input_dev->id); + + if (xpad->xtype == XTYPE_XBOX360W) { + /* x360w controllers and the receiver have different ids */ + input_dev->id.product = 0x02a1; + } + input_dev->dev.parent = &xpad->intf->dev; input_set_drvdata(input_dev, xpad); diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c index a8b0a2eec344..7fed92fb8cc1 100644 --- a/drivers/input/misc/adxl34x-i2c.c +++ b/drivers/input/misc/adxl34x-i2c.c @@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = { MODULE_DEVICE_TABLE(i2c, adxl34x_id); -#ifdef CONFIG_OF static const struct of_device_id adxl34x_of_id[] = { /* * The ADXL346 is backward-compatible with the ADXL345. Differences are @@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = { }; MODULE_DEVICE_TABLE(of, adxl34x_of_id); -#endif static struct i2c_driver adxl34x_driver = { .driver = { .name = "adxl34x", .pm = &adxl34x_i2c_pm, - .of_match_table = of_match_ptr(adxl34x_of_id), + .of_match_table = adxl34x_of_id, }, .probe = adxl34x_i2c_probe, .remove = adxl34x_i2c_remove, diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index cde6f4bd8ea2..6d279aa27cb9 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -114,7 +114,7 @@ enum SS4_PACKET_ID { (_b[1] & 0x7F) \ ) -#define SS4_TS_Y_V2(_b) (s8)( \ +#define SS4_TS_Y_V2(_b) -(s8)( \ ((_b[3] & 0x01) << 7) | \ (_b[2] & 0x7F) \ ) diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c index aa7c5da60800..cb2bf203f4ca 100644 --- a/drivers/input/mouse/synaptics_i2c.c +++ b/drivers/input/mouse/synaptics_i2c.c @@ -29,7 +29,7 @@ * after soft reset, we should wait for 1 ms * before the device becomes operational */ -#define SOFT_RESET_DELAY_MS 3 +#define SOFT_RESET_DELAY_US 3000 /* and after hard reset, we should wait for max 500ms */ #define HARD_RESET_DELAY_MS 500 @@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client) if (ret) { dev_err(&client->dev, "Unable to reset device\n"); } else { - msleep(SOFT_RESET_DELAY_MS); + usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100); ret = synaptics_i2c_config(client); if (ret) dev_err(&client->dev, "Unable to config device\n"); diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig index 30cc627a4f45..8993983e3fe4 100644 --- a/drivers/input/rmi4/Kconfig +++ b/drivers/input/rmi4/Kconfig @@ -41,7 +41,8 @@ config RMI4_SMB config RMI4_F03 bool "RMI4 Function 03 (PS2 Guest)" - depends on RMI4_CORE && SERIO + depends on RMI4_CORE + depends on SERIO=y || RMI4_CORE=SERIO help Say Y here if you want to add support for RMI4 function 03. diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 77551f522202..a7618776705a 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 02aec284deca..3e6003d32e56 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev) case QUEUE_HEADER_NORMAL: report_count = ts->buf[FW_HDR_COUNT]; - if (report_count > 3) { + if (report_count == 0 || report_count > 3) { dev_err(&client->dev, - "too large report count: %*ph\n", + "bad report count: %*ph\n", HEADER_SIZE, ts->buf); break; } diff --git a/drivers/md/md.h b/drivers/md/md.h index e38936d05df1..2a514036a83d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); struct md_cluster_info; +/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */ enum mddev_flags { MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ MD_CLOSING, /* If set, we are closing the array, do not open @@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev) { return mddev->cluster_info && mddev->bitmap_info.nodes > 1; } + +/* clear unsupported mddev_flags */ +static inline void mddev_clear_unsupported_flags(struct mddev *mddev, + unsigned long unsupported_flags) +{ + mddev->flags &= ~unsupported_flags; +} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a162fedeb51a..848365d474f3 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -26,6 +26,11 @@ #include "raid0.h" #include "raid5.h" +#define UNSUPPORTED_MDDEV_FLAGS \ + ((1L << MD_HAS_JOURNAL) | \ + (1L << MD_JOURNAL_CLEAN) | \ + (1L << MD_FAILFAST_SUPPORTED)) + static int raid0_congested(struct mddev *mddev, int bits) { struct r0conf *conf = mddev->private; @@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev) mddev->delta_disks = -1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; - clear_bit(MD_HAS_JOURNAL, &mddev->flags); - clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); + mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); @@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev) mddev->degraded = 0; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; - clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); return priv_conf; @@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev) mddev->raid_disks = 1; /* make sure it will be not marked as dirty */ mddev->recovery_cp = MaxSector; - clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); create_strip_zones(mddev, &priv_conf); return priv_conf; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a1f3fbed9100..7b0f647bcccb 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -42,6 +42,10 @@ #include "raid1.h" #include "bitmap.h" +#define UNSUPPORTED_MDDEV_FLAGS \ + ((1L << MD_HAS_JOURNAL) | \ + (1L << MD_JOURNAL_CLEAN)) + /* * Number of guaranteed r1bios in case of extreme VM load: */ @@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void raid1_make_request(struct mddev *mddev, struct bio * bio) +static void raid1_read_request(struct mddev *mddev, struct bio *bio, + struct r1bio *r1_bio) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; - struct r1bio *r1_bio; struct bio *read_bio; + struct bitmap *bitmap = mddev->bitmap; + const int op = bio_op(bio); + const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + int sectors_handled; + int max_sectors; + int rdisk; + + wait_barrier(conf, bio); + +read_again: + rdisk = read_balance(conf, r1_bio, &max_sectors); + + if (rdisk < 0) { + /* couldn't find anywhere to read from */ + raid_end_bio_io(r1_bio); + return; + } + mirror = conf->mirrors + rdisk; + + if (test_bit(WriteMostly, &mirror->rdev->flags) && + bitmap) { + /* + * Reading from a write-mostly device must take care not to + * over-take any writes that are 'behind' + */ + raid1_log(mddev, "wait behind writes"); + wait_event(bitmap->behind_wait, + atomic_read(&bitmap->behind_writes) == 0); + } + r1_bio->read_disk = rdisk; + r1_bio->start_next_window = 0; + + read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); + + r1_bio->bios[rdisk] = read_bio; + + read_bio->bi_iter.bi_sector = r1_bio->sector + + mirror->rdev->data_offset; + read_bio->bi_bdev = mirror->rdev->bdev; + read_bio->bi_end_io = raid1_end_read_request; + bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &mirror->rdev->flags) && + test_bit(R1BIO_FailFast, &r1_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; + read_bio->bi_private = r1_bio; + + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r1_bio->sector); + + if (max_sectors < r1_bio->sectors) { + /* + * could not read all from this device, so we will need another + * r1_bio. + */ + sectors_handled = (r1_bio->sector + max_sectors + - bio->bi_iter.bi_sector); + r1_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + + /* + * Cannot call generic_make_request directly as that will be + * queued in __make_request and subsequent mempool_alloc might + * block waiting for it. So hand bio over to raid1d. + */ + reschedule_retry(r1_bio); + + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio) - sectors_handled; + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); +} + +static void raid1_write_request(struct mddev *mddev, struct bio *bio, + struct r1bio *r1_bio) +{ + struct r1conf *conf = mddev->private; int i, disks; - struct bitmap *bitmap; + struct bitmap *bitmap = mddev->bitmap; unsigned long flags; const int op = bio_op(bio); - const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)); @@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) md_write_start(mddev, bio); /* wait on superblock update early */ - if (bio_data_dir(bio) == WRITE && - ((bio_end_sector(bio) > mddev->suspend_lo && + if ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio))))) { - /* As the suspend_* range is controlled by - * userspace, we want an interruptible - * wait. + bio->bi_iter.bi_sector, bio_end_sector(bio)))) { + + /* + * As the suspend_* range is controlled by userspace, we want + * an interruptible wait. */ DEFINE_WAIT(w); for (;;) { @@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) bio->bi_iter.bi_sector >= mddev->suspend_hi || (mddev_is_clustered(mddev) && !md_cluster_ops->area_resyncing(mddev, WRITE, - bio->bi_iter.bi_sector, bio_end_sector(bio)))) + bio->bi_iter.bi_sector, + bio_end_sector(bio)))) break; schedule(); } finish_wait(&conf->wait_barrier, &w); } - start_next_window = wait_barrier(conf, bio); - bitmap = mddev->bitmap; - - /* - * make_request() can abort the operation when read-ahead is being - * used and no empty request is available. - * - */ - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio); - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector; - - /* We might need to issue multiple reads to different - * devices if there are bad blocks around, so we keep - * track of the number of reads in bio->bi_phys_segments. - * If this is 0, there is only one r1_bio and no locking - * will be needed when requests complete. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - - if (rw == READ) { - /* - * read balancing logic: - */ - int rdisk; - -read_again: - rdisk = read_balance(conf, r1_bio, &max_sectors); - - if (rdisk < 0) { - /* couldn't find anywhere to read from */ - raid_end_bio_io(r1_bio); - return; - } - mirror = conf->mirrors + rdisk; - - if (test_bit(WriteMostly, &mirror->rdev->flags) && - bitmap) { - /* Reading from a write-mostly device must - * take care not to over-take any writes - * that are 'behind' - */ - raid1_log(mddev, "wait behind writes"); - wait_event(bitmap->behind_wait, - atomic_read(&bitmap->behind_writes) == 0); - } - r1_bio->read_disk = rdisk; - r1_bio->start_next_window = 0; - - read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, - max_sectors); - - r1_bio->bios[rdisk] = read_bio; - - read_bio->bi_iter.bi_sector = r1_bio->sector + - mirror->rdev->data_offset; - read_bio->bi_bdev = mirror->rdev->bdev; - read_bio->bi_end_io = raid1_end_read_request; - bio_set_op_attrs(read_bio, op, do_sync); - if (test_bit(FailFast, &mirror->rdev->flags) && - test_bit(R1BIO_FailFast, &r1_bio->state)) - read_bio->bi_opf |= MD_FAILFAST; - read_bio->bi_private = r1_bio; - - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), - read_bio, disk_devt(mddev->gendisk), - r1_bio->sector); - - if (max_sectors < r1_bio->sectors) { - /* could not read all from this device, so we will - * need another r1_bio. - */ - - sectors_handled = (r1_bio->sector + max_sectors - - bio->bi_iter.bi_sector); - r1_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - /* Cannot call generic_make_request directly - * as that will be queued in __make_request - * and subsequent mempool_alloc might block waiting - * for it. So hand bio over to raid1d. - */ - reschedule_retry(r1_bio); - - r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); - - r1_bio->master_bio = bio; - r1_bio->sectors = bio_sectors(bio) - sectors_handled; - r1_bio->state = 0; - r1_bio->mddev = mddev; - r1_bio->sector = bio->bi_iter.bi_sector + - sectors_handled; - goto read_again; - } else - generic_make_request(read_bio); - return; - } - - /* - * WRITE: - */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); raid1_log(mddev, "wait queued"); @@ -1280,8 +1261,7 @@ read_again: int bad_sectors; int is_bad; - is_bad = is_badblock(rdev, r1_bio->sector, - max_sectors, + is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, &first_bad, &bad_sectors); if (is_bad < 0) { /* mustn't write here until the bad block is @@ -1370,7 +1350,8 @@ read_again: continue; mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); + bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, + max_sectors); if (first_clone) { /* do behind I/O ? @@ -1464,6 +1445,40 @@ read_again: wake_up(&conf->wait_barrier); } +static void raid1_make_request(struct mddev *mddev, struct bio *bio) +{ + struct r1conf *conf = mddev->private; + struct r1bio *r1_bio; + + /* + * make_request() can abort the operation when read-ahead is being + * used and no empty request is available. + * + */ + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); + + r1_bio->master_bio = bio; + r1_bio->sectors = bio_sectors(bio); + r1_bio->state = 0; + r1_bio->mddev = mddev; + r1_bio->sector = bio->bi_iter.bi_sector; + + /* + * We might need to issue multiple reads to different devices if there + * are bad blocks around, so we keep track of the number of reads in + * bio->bi_phys_segments. If this is 0, there is only one r1_bio and + * no locking will be needed when requests complete. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + bio_clear_flag(bio, BIO_SEG_VALID); + + if (bio_data_dir(bio) == READ) + raid1_read_request(mddev, bio, r1_bio); + else + raid1_write_request(mddev, bio, r1_bio); +} + static void raid1_status(struct seq_file *seq, struct mddev *mddev) { struct r1conf *conf = mddev->private; @@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev) if (!IS_ERR(conf)) { /* Array must appear to be quiesced */ conf->array_frozen = 1; - clear_bit(MD_HAS_JOURNAL, &mddev->flags); - clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); + mddev_clear_unsupported_flags(mddev, + UNSUPPORTED_MDDEV_FLAGS); } return conf; } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ab5e86209322..1920756828df 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void __make_request(struct mddev *mddev, struct bio *bio) +static void raid10_read_request(struct mddev *mddev, struct bio *bio, + struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; - struct r10bio *r10_bio; struct bio *read_bio; + const int op = bio_op(bio); + const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); + int sectors_handled; + int max_sectors; + sector_t sectors; + struct md_rdev *rdev; + int slot; + + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + + sectors = bio_sectors(bio); + while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && + bio->bi_iter.bi_sector < conf->reshape_progress && + bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { + /* + * IO spans the reshape position. Need to wait for reshape to + * pass + */ + raid10_log(conf->mddev, "wait reshape"); + allow_barrier(conf); + wait_event(conf->wait_barrier, + conf->reshape_progress <= bio->bi_iter.bi_sector || + conf->reshape_progress >= bio->bi_iter.bi_sector + + sectors); + wait_barrier(conf); + } + +read_again: + rdev = read_balance(conf, r10_bio, &max_sectors); + if (!rdev) { + raid_end_bio_io(r10_bio); + return; + } + slot = r10_bio->read_slot; + + read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); + bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, + max_sectors); + + r10_bio->devs[slot].bio = read_bio; + r10_bio->devs[slot].rdev = rdev; + + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + + choose_data_offset(r10_bio, rdev); + read_bio->bi_bdev = rdev->bdev; + read_bio->bi_end_io = raid10_end_read_request; + bio_set_op_attrs(read_bio, op, do_sync); + if (test_bit(FailFast, &rdev->flags) && + test_bit(R10BIO_FailFast, &r10_bio->state)) + read_bio->bi_opf |= MD_FAILFAST; + read_bio->bi_private = r10_bio; + + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), + read_bio, disk_devt(mddev->gendisk), + r10_bio->sector); + if (max_sectors < r10_bio->sectors) { + /* + * Could not read all from this device, so we will need another + * r10_bio. + */ + sectors_handled = (r10_bio->sector + max_sectors + - bio->bi_iter.bi_sector); + r10_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); + if (bio->bi_phys_segments == 0) + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; + spin_unlock_irq(&conf->device_lock); + /* + * Cannot call generic_make_request directly as that will be + * queued in __generic_make_request and subsequent + * mempool_alloc might block waiting for it. so hand bio over + * to raid10d. + */ + reschedule_retry(r10_bio); + + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = bio_sectors(bio) - sectors_handled; + r10_bio->state = 0; + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; + goto read_again; + } else + generic_make_request(read_bio); + return; +} + +static void raid10_write_request(struct mddev *mddev, struct bio *bio, + struct r10bio *r10_bio) +{ + struct r10conf *conf = mddev->private; int i; const int op = bio_op(bio); - const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); const unsigned long do_fua = (bio->bi_opf & REQ_FUA); unsigned long flags; struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; struct raid10_plug_cb *plug = NULL; + sector_t sectors; int sectors_handled; int max_sectors; - int sectors; md_write_start(mddev, bio); @@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio) while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio->bi_iter.bi_sector < conf->reshape_progress && bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { - /* IO spans the reshape position. Need to wait for - * reshape to pass + /* + * IO spans the reshape position. Need to wait for reshape to + * pass */ raid10_log(conf->mddev, "wait reshape"); allow_barrier(conf); @@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio) sectors); wait_barrier(conf); } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && - bio_data_dir(bio) == WRITE && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && bio->bi_iter.bi_sector + sectors > conf->reshape_progress) @@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio) conf->reshape_safe = mddev->reshape_position; } - r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); - - r10_bio->master_bio = bio; - r10_bio->sectors = sectors; - - r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_iter.bi_sector; - r10_bio->state = 0; - - /* We might need to issue multiple reads to different - * devices if there are bad blocks around, so we keep - * track of the number of reads in bio->bi_phys_segments. - * If this is 0, there is only one r10_bio and no locking - * will be needed when the request completes. If it is - * non-zero, then it is the number of not-completed requests. - */ - bio->bi_phys_segments = 0; - bio_clear_flag(bio, BIO_SEG_VALID); - - if (rw == READ) { - /* - * read balancing logic: - */ - struct md_rdev *rdev; - int slot; - -read_again: - rdev = read_balance(conf, r10_bio, &max_sectors); - if (!rdev) { - raid_end_bio_io(r10_bio); - return; - } - slot = r10_bio->read_slot; - - read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); - bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, - max_sectors); - - r10_bio->devs[slot].bio = read_bio; - r10_bio->devs[slot].rdev = rdev; - - read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + - choose_data_offset(r10_bio, rdev); - read_bio->bi_bdev = rdev->bdev; - read_bio->bi_end_io = raid10_end_read_request; - bio_set_op_attrs(read_bio, op, do_sync); - if (test_bit(FailFast, &rdev->flags) && - test_bit(R10BIO_FailFast, &r10_bio->state)) - read_bio->bi_opf |= MD_FAILFAST; - read_bio->bi_private = r10_bio; - - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), - read_bio, disk_devt(mddev->gendisk), - r10_bio->sector); - if (max_sectors < r10_bio->sectors) { - /* Could not read all from this device, so we will - * need another r10_bio. - */ - sectors_handled = (r10_bio->sector + max_sectors - - bio->bi_iter.bi_sector); - r10_bio->sectors = max_sectors; - spin_lock_irq(&conf->device_lock); - if (bio->bi_phys_segments == 0) - bio->bi_phys_segments = 2; - else - bio->bi_phys_segments++; - spin_unlock_irq(&conf->device_lock); - /* Cannot call generic_make_request directly - * as that will be queued in __generic_make_request - * and subsequent mempool_alloc might block - * waiting for it. so hand bio over to raid10d. - */ - reschedule_retry(r10_bio); - - r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); - - r10_bio->master_bio = bio; - r10_bio->sectors = bio_sectors(bio) - sectors_handled; - r10_bio->state = 0; - r10_bio->mddev = mddev; - r10_bio->sector = bio->bi_iter.bi_sector + - sectors_handled; - goto read_again; - } else - generic_make_request(read_bio); - return; - } - - /* - * WRITE: - */ if (conf->pending_count >= max_queued_requests) { md_wakeup_thread(mddev->thread); raid10_log(mddev, "wait queued"); @@ -1300,8 +1308,7 @@ retry_write: int bad_sectors; int is_bad; - is_bad = is_badblock(rdev, dev_sector, - max_sectors, + is_bad = is_badblock(rdev, dev_sector, max_sectors, &first_bad, &bad_sectors); if (is_bad < 0) { /* Mustn't write here until the bad block @@ -1405,8 +1412,7 @@ retry_write: r10_bio->devs[i].bio = mbio; mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ - choose_data_offset(r10_bio, - rdev)); + choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); @@ -1457,8 +1463,7 @@ retry_write: r10_bio->devs[i].repl_bio = mbio; mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + - choose_data_offset( - r10_bio, rdev)); + choose_data_offset(r10_bio, rdev)); mbio->bi_bdev = rdev->bdev; mbio->bi_end_io = raid10_end_write_request; bio_set_op_attrs(mbio, op, do_sync | do_fua); @@ -1503,6 +1508,36 @@ retry_write: one_write_done(r10_bio); } +static void __make_request(struct mddev *mddev, struct bio *bio) +{ + struct r10conf *conf = mddev->private; + struct r10bio *r10_bio; + + r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); + + r10_bio->master_bio = bio; + r10_bio->sectors = bio_sectors(bio); + + r10_bio->mddev = mddev; + r10_bio->sector = bio->bi_iter.bi_sector; + r10_bio->state = 0; + + /* + * We might need to issue multiple reads to different devices if there + * are bad blocks around, so we keep track of the number of reads in + * bio->bi_phys_segments. If this is 0, there is only one r10_bio and + * no locking will be needed when the request completes. If it is + * non-zero, then it is the number of not-completed requests. + */ + bio->bi_phys_segments = 0; + bio_clear_flag(bio, BIO_SEG_VALID); + + if (bio_data_dir(bio) == READ) + raid10_read_request(mddev, bio, r10_bio); + else + raid10_write_request(mddev, bio, r10_bio); +} + static void raid10_make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index d7bfb6fc8aef..0e8ed2c327b0 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1682,8 +1682,7 @@ out: static struct stripe_head * r5c_recovery_alloc_stripe(struct r5conf *conf, - sector_t stripe_sect, - sector_t log_start) + sector_t stripe_sect) { struct stripe_head *sh; @@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf, return NULL; /* no more stripe available */ r5l_recovery_reset_stripe(sh); - sh->log_start = log_start; return sh; } @@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, stripe_sect, ctx->pos); + conf, stripe_sect); } if (!sh) { pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", @@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, conf->min_nr_stripes * 2); raid5_set_cache_size(mddev, conf->min_nr_stripes * 2); - sh = r5c_recovery_alloc_stripe( - conf, stripe_sect, ctx->pos); + sh = r5c_recovery_alloc_stripe(conf, + stripe_sect); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", @@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { r5l_recovery_replay_one_stripe(conf, sh, ctx); - sh->log_start = ctx->pos; list_move_tail(&sh->lru, cached_stripe_list); } r5l_recovery_load_data(log, sh, ctx, payload, @@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log, set_bit(R5_UPTODATE, &dev->flags); } } - list_add_tail(&sh->r5c, &log->stripe_in_journal_list); - atomic_inc(&log->stripe_in_journal_count); } /* @@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, struct stripe_head *sh, *next; struct mddev *mddev = log->rdev->mddev; struct page *page; + sector_t next_checkpoint = MaxSector; page = alloc_page(GFP_KERNEL); if (!page) { @@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, return -ENOMEM; } + WARN_ON(list_empty(&ctx->cached_list)); + list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { struct r5l_meta_block *mb; int i; @@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_WRITE, REQ_FUA, false); sh->log_start = ctx->pos; + list_add_tail(&sh->r5c, &log->stripe_in_journal_list); + atomic_inc(&log->stripe_in_journal_count); ctx->pos = write_pos; ctx->seq += 1; - + next_checkpoint = sh->log_start; list_del_init(&sh->lru); raid5_release_stripe(sh); } + log->next_checkpoint = next_checkpoint; __free_page(page); return 0; } @@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log) struct r5l_recovery_ctx ctx; int ret; sector_t pos; - struct stripe_head *sh; ctx.pos = log->last_checkpoint; ctx.seq = log->last_cp_seq; @@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log) log->next_checkpoint = ctx.pos; r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); - } else { - sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru); - log->next_checkpoint = sh->log_start; } if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) pr_debug("md/raid:%s: starting from clean shutdown\n", mdname(mddev)); else { - pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n", + pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", mdname(mddev), ctx.data_only_stripes, ctx.data_parity_stripes); @@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf, if (do_wakeup) wake_up(&conf->wait_for_overlap); - if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) - return; - spin_lock_irq(&conf->log->stripe_in_journal_lock); list_del_init(&sh->r5c); spin_unlock_irq(&conf->log->stripe_in_journal_lock); @@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) spin_lock_init(&log->stripe_in_journal_lock); atomic_set(&log->stripe_in_journal_count, 0); + rcu_assign_pointer(conf->log, log); + if (r5l_load_log(log)) goto error; - rcu_assign_pointer(conf->log, log); set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); return 0; error: + rcu_assign_pointer(conf->log, NULL); md_unregister_thread(&log->reclaim_thread); reclaim_thread: mempool_destroy(log->meta_pool); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 06d7279bdd04..36c13e4be9c9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -62,6 +62,8 @@ #include "raid0.h" #include "bitmap.h" +#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED) + #define cpu_to_group(cpu) cpu_to_node(cpu) #define ANY_GROUP NUMA_NO_NODE @@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev) mddev->new_chunk_sectors = chunksect; ret = setup_conf(mddev); - if (!IS_ERR_VALUE(ret)) - clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + if (!IS_ERR(ret)) + mddev_clear_unsupported_flags(mddev, + UNSUPPORTED_MDDEV_FLAGS); return ret; } diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index a0547dbf9806..76382c858c35 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card, struct ms_id_register id_reg; if (!(*mrq)) { - memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, + memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, sizeof(struct ms_id_register)); *mrq = &card->current_mrq; return 0; diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 18e05ca7584f..3600c9993a98 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; + if (!cldev->bus->hbm_f_os_supported) + return; + ret = mei_cldev_enable(cldev); if (ret) return; diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index c6c051b52f55..c6217a4993ad 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, dev->hbm_f_ev_supported); pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n", dev->hbm_f_fa_supported); + pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n", + dev->hbm_f_os_supported); } pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index dd7f15a65eed..25b4a1ba522d 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev) /* Fixed Address Client Support */ if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) dev->hbm_f_fa_supported = 1; + + /* OS ver message Support */ + if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) + dev->hbm_f_os_supported = 1; } /** diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 9daf3f9aed25..e1e4d47d4d7d 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -76,6 +76,12 @@ #define HBM_MINOR_VERSION_FA 0 #define HBM_MAJOR_VERSION_FA 2 +/* + * MEI version with OS ver message support + */ +#define HBM_MINOR_VERSION_OS 0 +#define HBM_MAJOR_VERSION_OS 2 + /* Host bus message command opcode */ #define MEI_HBM_CMD_OP_MSK 0x7f /* Host bus message command RESPONSE */ diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 699693cd8c59..8dadb98662a9 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state); * @hbm_f_ev_supported : hbm feature event notification * @hbm_f_fa_supported : hbm feature fixed address client * @hbm_f_ie_supported : hbm feature immediate reply to enum request + * @hbm_f_os_supported : hbm feature support OS ver message * * @me_clients_rwsem: rw lock over me_clients list * @me_clients : list of FW clients @@ -487,6 +488,7 @@ struct mei_device { unsigned int hbm_f_ev_supported:1; unsigned int hbm_f_fa_supported:1; unsigned int hbm_f_ie_supported:1; + unsigned int hbm_f_os_supported:1; struct rw_semaphore me_clients_rwsem; struct list_head me_clients; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index b11c3455b040..e6ea8503f40c 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, } } while (busy); - if (host->ops->card_busy && send_status) - return mmc_switch_status(card); - return 0; } @@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (!use_busy_signal) goto out; - /* Switch to new timing before poll and check switch status. */ - if (timing) - mmc_set_timing(host, timing); - /*If SPI or used HW busy detection above, then we don't need to poll. */ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || - mmc_host_is_spi(host)) { - if (send_status) - err = mmc_switch_status(card); + mmc_host_is_spi(host)) goto out_tim; - } /* Let's try to poll to find out when the command is completed. */ err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); + if (err) + goto out; out_tim: - if (err && timing) - mmc_set_timing(host, old_timing); + /* Switch to new timing before check switch status. */ + if (timing) + mmc_set_timing(host, timing); + + if (send_status) { + err = mmc_switch_status(card); + if (err && timing) + mmc_set_timing(host, old_timing); + } out: mmc_retune_release(host); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b352760c041e..09739352834c 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id) { struct meson_host *host = dev_id; struct mmc_request *mrq; - struct mmc_command *cmd = host->cmd; + struct mmc_command *cmd; u32 irq_en, status, raw_status; irqreturn_t ret = IRQ_HANDLED; if (WARN_ON(!host)) return IRQ_NONE; + cmd = host->cmd; + mrq = host->mrq; if (WARN_ON(!mrq)) @@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) int ret = IRQ_HANDLED; if (WARN_ON(!mrq)) - ret = IRQ_NONE; + return IRQ_NONE; if (WARN_ON(!cmd)) - ret = IRQ_NONE; + return IRQ_NONE; data = cmd->data; if (data) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 44ecebd1ea8c..c8b8ac66ff7e 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); cmd1 = cmd->arg; + if (cmd->opcode == MMC_STOP_TRANSMISSION) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; @@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) ssp->base + HW_SSP_BLOCK_SIZE); } - if ((cmd->opcode == MMC_STOP_TRANSMISSION) || - (cmd->opcode == SD_IO_RW_EXTENDED)) + if (cmd->opcode == SD_IO_RW_EXTENDED) cmd0 |= BM_SSP_CMD0_APPEND_8CYC; cmd1 = cmd->arg; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 160f695cc09c..278a5a435ab7 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev) /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power(device); list_for_each_entry(child, &device->children, node) - acpi_device_fix_up_power(child); + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); if (acpi_bus_get_status(device) || !device->status.present) return -ENODEV; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 353a9ddf6b97..9ce5dcb4abd0 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -426,6 +426,7 @@ config MTD_NAND_ORION config MTD_NAND_OXNAS tristate "NAND Flash support for Oxford Semiconductor SoC" + depends on HAS_IOMEM help This enables the NAND flash controller on Oxford Semiconductor SoCs. @@ -540,7 +541,7 @@ config MTD_NAND_FSMC Flexible Static Memory Controller (FSMC) config MTD_NAND_XWAY - tristate "Support for NAND on Lantiq XWAY SoC" + bool "Support for NAND on Lantiq XWAY SoC" depends on LANTIQ && SOC_TYPE_XWAY help Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c index 5553a5d9efd1..846a66c1b133 100644 --- a/drivers/mtd/nand/lpc32xx_mlc.c +++ b/drivers/mtd/nand/lpc32xx_mlc.c @@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) init_completion(&host->comp_controller); host->irq = platform_get_irq(pdev, 0); - if ((host->irq < 0) || (host->irq >= NR_IRQS)) { + if (host->irq < 0) { dev_err(&pdev->dev, "failed to get platform irq\n"); res = -EINVAL; goto err_exit3; diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 28c7f474be77..4a5e948c62df 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev) if (IS_ERR(nfc->pbus_base)) return PTR_ERR(nfc->pbus_base); + writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); + clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox"); + nfc->chan = dma_request_chan(&pdev->dev, "rxtx"); if (IS_ERR(nfc->chan)) return PTR_ERR(nfc->chan); diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 1f2948c0c458..895101a5e686 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c @@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = { { .compatible = "lantiq,nand-xway" }, {}, }; -MODULE_DEVICE_TABLE(of, xway_nand_match); static struct platform_driver xway_nand_driver = { .probe = xway_nand_probe, @@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = { }, }; -module_platform_driver(xway_nand_driver); - -MODULE_LICENSE("GPL"); +builtin_platform_driver(xway_nand_driver); diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index b8c293373ecc..a306de4318d7 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) */ static int ipddp_create(struct ipddp_route *new_rt) { - struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); + struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL); if (rt == NULL) return -ENOMEM; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 9ec33b51a0ed..2ce7ae97ac91 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -393,7 +393,7 @@ static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); else - return mdiobus_read(priv->master_mii_bus, addr, regnum); + return mdiobus_read_nested(priv->master_mii_bus, addr, regnum); } static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, @@ -407,7 +407,7 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); else - mdiobus_write(priv->master_mii_bus, addr, regnum, val); + mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val); return 0; } @@ -982,6 +982,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct device_node *dn = pdev->dev.of_node; struct b53_platform_data *pdata; + struct dsa_switch_ops *ops; struct bcm_sf2_priv *priv; struct b53_device *dev; struct dsa_switch *ds; @@ -995,6 +996,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; + ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; + dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); if (!dev) return -ENOMEM; @@ -1014,6 +1019,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) ds = dev->ds; /* Override the parts that are non-standard wrt. normal b53 devices */ + memcpy(ops, ds->ops, sizeof(*ops)); + ds->ops = ops; ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; ds->ops->setup = bcm_sf2_sw_setup; ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 155190db682d..9943629fcbf9 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) } } +isr_done: /* If there is not a separate AN irq, handle it here */ if (pdata->dev_irq == pdata->an_irq) pdata->phy_if.an_isr(irq, pdata); @@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data) if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq)) pdata->i2c_if.i2c_isr(irq, pdata); -isr_done: return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 7e8cf213fd81..744ed6ddaf37 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct bcm_sysport_cb *cb; - struct netdev_queue *txq; u32 hw_ind; - txq = netdev_get_tx_queue(ndev, ring->index); - /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; @@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, ring->c_index = c_index; - if (netif_tx_queue_stopped(txq) && pkts_compl) - netif_tx_wake_queue(txq); - netif_dbg(priv, tx_done, ndev, "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", ring->index, ring->c_index, pkts_compl, bytes_compl); @@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { + struct netdev_queue *txq; unsigned int released; unsigned long flags; + txq = netdev_get_tx_queue(priv->netdev, ring->index); + spin_lock_irqsave(&ring->lock, flags); released = __bcm_sysport_tx_reclaim(priv, ring); + if (released) + netif_tx_wake_queue(txq); + spin_unlock_irqrestore(&ring->lock, flags); return released; } +/* Locked version of the per-ring TX reclaim, but does not wake the queue */ +static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, + struct bcm_sysport_tx_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + __bcm_sysport_tx_reclaim(priv, ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) { struct bcm_sysport_tx_ring *ring = @@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, napi_disable(&ring->napi); netif_napi_del(&ring->napi); - bcm_sysport_tx_reclaim(priv, ring); + bcm_sysport_tx_clean(priv, ring); kfree(ring->cbs); ring->cbs = NULL; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 185e9e047aa9..ae42de4fdddf 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); + /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ + tg3_full_lock(tp, 0); if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } + tg3_full_unlock(tp); } /* diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9211c750e064..2f85b64f01fa 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -47,8 +47,9 @@ struct lmac { struct bgx { u8 bgx_id; struct lmac lmac[MAX_LMAC_PER_BGX]; - int lmac_count; + u8 lmac_count; u8 max_lmac; + u8 acpi_lmac_idx; void __iomem *reg_base; struct pci_dev *pdev; bool is_dlm; @@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); - bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; + bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; + bgx->acpi_lmac_idx++; /* move to next LMAC */ out: - bgx->lmac_count++; return AE_OK; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 0e74529a4209..30e855004c57 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, err: mutex_unlock(&adapter->mcc_lock); - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 225e9a4877d7..1a7f8ad7b9c6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac) /* Check if mac has already been added as part of uc-list */ for (i = 0; i < adapter->uc_macs; i++) { - if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN], - mac)) { + if (ether_addr_equal(adapter->uc_list[i].mac, mac)) { /* mac already added, skip addition */ adapter->pmac_id[0] = adapter->pmac_id[i + 1]; return 0; @@ -319,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) return 0; + /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC + * address + */ + if (BEx_chip(adapter) && be_virtfn(adapter) && + !check_privilege(adapter, BE_PRIV_FILTMGMT)) + return -EPERM; + /* if device is not running, copy MAC to netdev->dev_addr */ if (!netif_running(netdev)) goto done; @@ -1655,14 +1661,12 @@ static void be_clear_mc_list(struct be_adapter *adapter) static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx) { - if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], - adapter->dev_mac)) { + if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) { adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0]; return 0; } - return be_cmd_pmac_add(adapter, - (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], + return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac, adapter->if_handle, &adapter->pmac_id[uc_idx + 1], 0); } @@ -1698,9 +1702,8 @@ static void be_set_uc_list(struct be_adapter *adapter) } if (adapter->update_uc_list) { - i = 1; /* First slot is claimed by the Primary MAC */ - /* cache the uc-list in adapter array */ + i = 0; netdev_for_each_uc_addr(ha, netdev) { ether_addr_copy(adapter->uc_list[i].mac, ha->addr); i++; @@ -3613,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) static void be_disable_if_filters(struct be_adapter *adapter) { - be_dev_mac_del(adapter, adapter->pmac_id[0]); + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) + be_dev_mac_del(adapter, adapter->pmac_id[0]); + be_clear_uc_list(adapter); be_clear_mc_list(adapter); @@ -3766,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status; - /* For BE3 VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) { status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a761001308dc..1515abaa5ac9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - __page_frag_drain(buffer_info->page, 0, - buffer_info->pagecnt_bias); + __page_frag_cache_drain(buffer_info->page, + buffer_info->pagecnt_bias); buffer_info->page = NULL; } @@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - __page_frag_drain(page, 0, rx_buffer->pagecnt_bias); + __page_frag_cache_drain(page, rx_buffer->pagecnt_bias); } /* clear contents of rx_buffer */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index a849da92f857..6b8635378f1f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) { struct mlx4_cq *cq; + rcu_read_lock(); cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cqn & (dev->caps.num_cqs - 1)); + rcu_read_unlock(); + if (!cq) { mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ ++cq->arm_sn; cq->comp(cq); @@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; struct mlx4_cq *cq; - spin_lock(&cq_table->lock); - + rcu_read_lock(); cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); - if (cq) - atomic_inc(&cq->refcount); - - spin_unlock(&cq_table->lock); + rcu_read_unlock(); if (!cq) { - mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); return; } + /* Acessing the CQ outside of rcu_read_lock is safe, because + * the CQ is freed only after interrupt handling is completed. + */ cq->event(cq, event_type); - - if (atomic_dec_and_test(&cq->refcount)) - complete(&cq->free); } static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, @@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, if (err) return err; - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); if (err) goto err_icm; @@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, return 0; err_radix: - spin_lock_irq(&cq_table->lock); + spin_lock(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); + spin_unlock(&cq_table->lock); err_icm: mlx4_cq_free_icm(dev, cq->cqn); @@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + spin_lock(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock(&cq_table->lock); + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != priv->eq_table.eq[MLX4_EQ_ASYNC].irq) synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); - spin_lock_irq(&cq_table->lock); - radix_tree_delete(&cq_table->tree, cq->cqn); - spin_unlock_irq(&cq_table->lock); - if (atomic_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index edbe200ac2fa..761f8b12399c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev) /* Process all completions if exist to prevent * the queues freezing if they are full */ - for (i = 0; i < priv->rx_ring_num; i++) + for (i = 0; i < priv->rx_ring_num; i++) { + local_bh_disable(); napi_schedule(&priv->rx_cq[i]->napi); + local_bh_enable(); + } netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -2277,7 +2280,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (priv->tx_ring_num[TX_XDP] && !mlx4_en_check_xdp_mtu(dev, new_mtu)) - return -ENOTSUPP; + return -EOPNOTSUPP; dev->mtu = new_mtu; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cd3638e6fe25..0509996957d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", + __func__, be32_to_cpu(eqe->event.srq.srqn), + eq->eqn); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq->eqn, eq->cons_index, ret); break; } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", - __func__, slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 56185a0b827d..1822382212ee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, put_res(dev, slave, srqn, RES_SRQ); qp->srq = srq; } + + /* Save param3 for dynamic changes from VST back to VGT */ + qp->param3 = qpc->param3; put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, mtt_base, RES_MTT); res_end_move(dev, slave, RES_QP, qpn); @@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, int qpn = vhcr->in_modifier & 0x7fffff; struct res_qp *qp; u8 orig_sched_queue; - __be32 orig_param3 = qpc->param3; u8 orig_vlan_control = qpc->pri_path.vlan_control; u8 orig_fvl_rx = qpc->pri_path.fvl_rx; u8 orig_pri_path_fl = qpc->pri_path.fl; @@ -3814,7 +3816,6 @@ out: */ if (!err) { qp->sched_queue = orig_sched_queue; - qp->param3 = orig_param3; qp->vlan_control = orig_vlan_control; qp->fvl_rx = orig_fvl_rx; qp->pri_path_fl = orig_pri_path_fl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1236b27b1493..2b7dd315020c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3675,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - mlx5e_vxlan_cleanup(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) - mlx5_eswitch_unregister_vport_rep(esw, 0); - if (priv->xdp_prog) bpf_prog_put(priv->xdp_prog); } @@ -3807,9 +3801,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) static void mlx5e_nic_disable(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + queue_work(priv->wq, &priv->set_rx_mode_work); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5_eswitch_unregister_vport_rep(esw, 0); mlx5e_disable_async_events(priv); - mlx5_lag_remove(priv->mdev); + mlx5_lag_remove(mdev); } static const struct mlx5e_profile mlx5e_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 1fffe48a93cc..cbfac06b7ffd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) switch (am->tune_state) { case MLX5E_AM_PARKING_ON_TOP: case MLX5E_AM_PARKING_TIRED: - WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n"); return true; case MLX5E_AM_GOING_RIGHT: return (am->steps_left > 1) && (am->steps_right == 1); @@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am) switch (am->tune_state) { case MLX5E_AM_PARKING_ON_TOP: case MLX5E_AM_PARKING_TIRED: - WARN_ONCE(true, "mlx5e_am_turn: PARKING\n"); break; case MLX5E_AM_GOING_RIGHT: am->tune_state = MLX5E_AM_GOING_LEFT; @@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am) switch (am->tune_state) { case MLX5E_AM_PARKING_ON_TOP: case MLX5E_AM_PARKING_TIRED: - WARN_ONCE(true, "mlx5e_am_step: PARKING\n"); break; case MLX5E_AM_GOING_RIGHT: if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) @@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, u32 delta_us = ktime_us_delta(end->time, start->time); unsigned int npkts = end->pkt_ctr - start->pkt_ctr; - if (!delta_us) { - WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n"); + if (!delta_us) return; - } curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index f8829b517156..46bef6a26a8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, } } +/* we get here also when setting rule to the FW failed, etc. It means that the + * flow rule itself might not exist, but some offloading related to the actions + * should be cleaned. + */ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(flow->rule); - - mlx5_del_flow_rules(flow->rule); + if (!IS_ERR(flow->rule)) { + counter = mlx5_flow_rule_counter(flow->rule); + mlx5_del_flow_rules(flow->rule); + mlx5_fc_destroy(priv->mdev, counter); + } if (esw && esw->mode == SRIOV_OFFLOADS) { mlx5_eswitch_del_vlan_action(esw, flow->attr); @@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, mlx5e_detach_encap(priv, flow); } - mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; @@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); + struct flow_dissector_key_control *enc_control = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_CONTROL, + f->key); + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { struct flow_dissector_key_ports *key = skb_flow_dissector_target(f->dissector, @@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, /* Full udp dst port must be given */ if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) - return -EOPNOTSUPP; - - /* udp src port isn't supported */ - if (memchr_inv(&mask->src, 0, sizeof(mask->src))) - return -EOPNOTSUPP; + goto vxlan_match_offload_err; if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); - else + else { + netdev_warn(priv->netdev, + "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); return -EOPNOTSUPP; + } MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_sport, ntohs(key->src)); } else { /* udp dst port must be given */ - return -EOPNOTSUPP; +vxlan_match_offload_err: + netdev_warn(priv->netdev, + "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); + return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_dissector_key_ipv4_addrs *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, @@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, ntohl(key->dst)); - } - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); + } /* Enforce DMAC when offloading incoming tunneled flows. * Flow counters require a match on the DMAC. @@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (parse_tunnel_attr(priv, spec, f)) return -EOPNOTSUPP; break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + netdev_warn(priv->netdev, + "IPv6 tunnel decap offload isn't supported\n"); default: return -EOPNOTSUPP; } @@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, key->flags & FLOW_DIS_IS_FRAGMENT); + + /* the HW doesn't need L3 inline to match on frag=no */ + if (key->flags & FLOW_DIS_IS_FRAGMENT) + *min_inline = MLX5_INLINE_MODE_IP; } } @@ -646,18 +668,18 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, int ttl; #if IS_ENABLED(CONFIG_INET) + int ret; + rt = ip_route_output_key(dev_net(mirred_dev), fl4); - if (IS_ERR(rt)) { - pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr); - return -EOPNOTSUPP; - } + ret = PTR_ERR_OR_ZERO(rt); + if (ret) + return ret; #else return -EOPNOTSUPP; #endif if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { - pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n", - __func__); + pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); ip_rt_put(rt); return -EOPNOTSUPP; } @@ -718,8 +740,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, struct net_device **out_dev) { int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct neighbour *n = NULL; struct flowi4 fl4 = {}; - struct neighbour *n; char *encap_header; int encap_size; __be32 saddr; @@ -750,7 +772,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, e->out_dev = *out_dev; if (!(n->nud_state & NUD_VALID)) { - err = -ENOTSUPP; + pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr); + err = -EOPNOTSUPP; goto out; } @@ -772,6 +795,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, encap_size, encap_header, &e->encap_id); out: + if (err && n) + neigh_release(n); kfree(encap_header); return err; } @@ -792,9 +817,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, int tunnel_type; int err; - /* udp dst port must be given */ + /* udp dst port must be set */ if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) + goto vxlan_encap_offload_err; + + /* setting udp src port isn't supported */ + if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { +vxlan_encap_offload_err: + netdev_warn(priv->netdev, + "must set udp dst port and not set udp src port\n"); return -EOPNOTSUPP; + } if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { @@ -802,6 +835,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, info.tun_id = tunnel_id_to_key32(key->tun_id); tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { + netdev_warn(priv->netdev, + "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); return -EOPNOTSUPP; } @@ -809,6 +844,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, case AF_INET: info.daddr = key->u.ipv4.dst; break; + case AF_INET6: + netdev_warn(priv->netdev, + "IPv6 tunnel encap offload isn't supported\n"); default: return -EOPNOTSUPP; } @@ -986,7 +1024,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, if (IS_ERR(flow->rule)) { err = PTR_ERR(flow->rule); - goto err_free; + goto err_del_rule; } err = rhashtable_insert_fast(&tc->ht, &flow->node, @@ -997,7 +1035,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, goto out; err_del_rule: - mlx5_del_flow_rules(flow->rule); + mlx5e_tc_del_flow(priv, flow); err_free: kfree(flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6547f22e6b9b..d01e9f21d469 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1195,7 +1195,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, { int err = 0; - mlx5_drain_health_wq(dev); + if (cleanup) + mlx5_drain_health_wq(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { @@ -1359,9 +1360,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, mlx5_enter_error_state(dev); mlx5_unload_one(dev, priv, false); - /* In case of kernel call save the pci state */ + /* In case of kernel call save the pci state and drain the health wq */ if (state) { pci_save_state(pdev); + mlx5_drain_health_wq(dev); mlx5_pci_disable_device(dev); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index d147ddd97997..0af3338bfcb4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); /* pci_eqe_cmd_token * Command completion event - token */ -MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); +MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16); /* pci_eqe_cmd_status * Command completion event - status */ -MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); +MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8); /* pci_eqe_cmd_out_param_h * Command completion event - output parameter - higher part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32); /* pci_eqe_cmd_out_param_l * Command completion event - output parameter - lower part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d768c7b6c6d6..003093abb170 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } if (eth_skb_pad(skb)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 150ccf5192a9..2e88115e8735 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } mlxsw_sx_txhdr_construct(skb, &tx_info); /* TX header is consumed by HW on the way so we shouldn't count its diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 99a14df28b96..2851b4c56570 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) else adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr); + /* of_phy_find_device() claims a reference to the phydev, + * so we do that here manually as well. When the driver + * later unloads, it can unilaterally drop the reference + * without worrying about ACPI vs DT. + */ + if (adpt->phydev) + get_device(&adpt->phydev->mdio.dev); } else { struct device_node *phy_np; diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 422289c232bc..f46d300bd585 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev) err_undo_napi: netif_napi_del(&adpt->rx_q.napi); err_undo_mdiobus: - if (!has_acpi_companion(&pdev->dev)) - put_device(&adpt->phydev->mdio.dev); + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); err_undo_clocks: emac_clks_teardown(adpt); @@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev) emac_clks_teardown(adpt); - if (!has_acpi_companion(&pdev->dev)) - put_device(&adpt->phydev->mdio.dev); + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); free_netdev(netdev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 44389c90056a..8f1623bf2134 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -696,7 +696,7 @@ enum rtl_tx_desc_bit_1 { enum rtl_rx_desc_bit { /* Rx private */ PID1 = (1 << 18), /* Protocol ID bit 1/2 */ - PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + PID0 = (1 << 17), /* Protocol ID bit 0/2 */ #define RxProtoUDP (PID1) #define RxProtoTCP (PID0) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 92d7692c840d..89ac1e3f6175 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Receive error message handling */ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; - if (priv->rx_over_errors != ndev->stats.rx_over_errors) { + if (priv->rx_over_errors != ndev->stats.rx_over_errors) ndev->stats.rx_over_errors = priv->rx_over_errors; - netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); - } - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); - } out: return budget - quota; } @@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + entry / NUM_TX_DESC * DPTR_ALIGN; len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + /* Zero length DMA descriptors are problematic as they seem to + * terminate DMA transfers. Avoid them by simply using a length of + * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of + * data by the call to skb_put_padto() above this is safe with + * respect to both the length of the first DMA descriptor (len) + * overflowing the available data and the length of the second DMA + * descriptor (skb->len - len) being negative. + */ + if (len == 0) + len = DPTR_ALIGN; + memcpy(buffer, skb->data, len); dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, dma_addr)) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 00fafabab1d0..f729a6b43958 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, + .hw_crc = 1, .tsu = 1, .select_mii = 1, .shift_rd0 = 1, @@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -832,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = { .ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, - .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 39eb7a65bb9f..e3f6389e1b01 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3319,8 +3319,16 @@ int stmmac_dvr_probe(struct device *device, ndev->max_mtu = JUMBO_LEN; else ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); - if (priv->plat->maxmtu < ndev->max_mtu) + /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu + * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. + */ + if ((priv->plat->maxmtu < ndev->max_mtu) && + (priv->plat->maxmtu >= ndev->min_mtu)) ndev->max_mtu = priv->plat->maxmtu; + else if (priv->plat->maxmtu < ndev->min_mtu) + dev_warn(priv->device, + "%s: warning: maxmtu having invalid value (%d)\n", + __func__, priv->plat->maxmtu); if (flow_ctrl) priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ @@ -3332,7 +3340,8 @@ int stmmac_dvr_probe(struct device *device, */ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { priv->use_riwt = 1; - netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n"); + dev_info(priv->device, + "Enable RX Mitigation via HW Watchdog Timer\n"); } netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); @@ -3358,17 +3367,17 @@ int stmmac_dvr_probe(struct device *device, /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { - netdev_err(priv->dev, - "%s: MDIO bus (id: %d) registration failed", - __func__, priv->plat->bus_id); + dev_err(priv->device, + "%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); goto error_mdio_register; } } ret = register_netdev(ndev); if (ret) { - netdev_err(priv->dev, "%s: ERROR %i registering the device\n", - __func__, ret); + dev_err(priv->device, "%s: ERROR %i registering the device\n", + __func__, ret); goto error_netdev_register; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index a2831773431a..3da4737620cb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -89,6 +89,9 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat) /* Set default value for unicast filter entries */ plat->unicast_filter_entries = 1; + + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; } static int quark_default_data(struct plat_stmmacenet_data *plat, @@ -126,6 +129,9 @@ static int quark_default_data(struct plat_stmmacenet_data *plat, /* Set default value for unicast filter entries */ plat->unicast_filter_entries = 1; + /* Set the maxmtu to a default of JUMBO_LEN */ + plat->maxmtu = JUMBO_LEN; + return 0; } diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 77c88fcf2b86..9b8a30bf939b 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1210,7 +1210,7 @@ int cpmac_init(void) goto fail_alloc; } -#warning FIXME: unhardcode gpio&reset bits + /* FIXME: unhardcode gpio&reset bits */ ar7_gpio_disable(26); ar7_gpio_disable(27); ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c9414c054852..fcab8019dda0 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, * policy filters on the host). Deliver these via the VF * interface in the guest. */ + rcu_read_lock(); vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); if (vf_netdev && (vf_netdev->flags & IFF_UP)) net = vf_netdev; @@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); if (unlikely(!skb)) { ++net->stats.rx_dropped; + rcu_read_unlock(); return NVSP_STAT_FAIL; } @@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj, * TODO - use NAPI? */ netif_rx(skb); + rcu_read_unlock(); return 0; } diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 46d53a6c8cf8..76ba7ecfe142 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi) /* Reset */ if (gpio_is_valid(rstn)) { udelay(1); - gpio_set_value(rstn, 0); + gpio_set_value_cansleep(rstn, 0); udelay(1); - gpio_set_value(rstn, 1); + gpio_set_value_cansleep(rstn, 1); usleep_range(120, 240); } diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 1253f864737a..ef688518ad77 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg) { struct usb_device *usb_dev = atusb->usb_dev; int ret; + uint8_t *buffer; uint8_t value; + buffer = kmalloc(1, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, - 0, reg, &value, 1, 1000); - return ret >= 0 ? value : ret; + 0, reg, buffer, 1, 1000); + + if (ret >= 0) { + value = buffer[0]; + kfree(buffer); + return value; + } else { + kfree(buffer); + return ret; + } } static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, @@ -549,13 +562,6 @@ static int atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; - struct device *dev = &atusb->usb_dev->dev; - - if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { - dev_info(dev, "Automatic frame retransmission is only available from " - "firmware version 0.3. Please update if you want this feature."); - return -EINVAL; - } return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); } @@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = { static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - unsigned char buffer[3]; + unsigned char *buffer; int ret; + buffer = kmalloc(3, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + /* Get a couple of the ATMega Firmware values */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, @@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb) dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } + kfree(buffer); return ret; } static int atusb_get_and_show_build(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - char build[ATUSB_BUILD_SIZE + 1]; + char *build; int ret; + build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL); + if (!build) + return -ENOMEM; + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); @@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb) dev_info(&usb_dev->dev, "Firmware: build %s\n", build); } + kfree(build); return ret; } @@ -698,7 +714,7 @@ fail: static int atusb_set_extended_addr(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; - unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; + unsigned char *buffer; __le64 extended_addr; u64 addr; int ret; @@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb) return 0; } + buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + /* Firmware is new enough so we fetch the address from EEPROM */ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000); - if (ret < 0) - dev_err(&usb_dev->dev, "failed to fetch extended address\n"); + if (ret < 0) { + dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n"); + ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); + kfree(buffer); + return ret; + } memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); /* Check if read address is not empty and the unicast bit is set correctly */ @@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb) &addr); } + kfree(buffer); return ret; } @@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface, hw->parent = &usb_dev->dev; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | - IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS | - IEEE802154_HW_FRAME_RETRIES; + IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; @@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface, atusb_get_and_show_build(atusb); atusb_set_extended_addr(atusb); + if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3) + hw->flags |= IEEE802154_HW_FRAME_RETRIES; + ret = atusb_get_and_clear_error(atusb); if (ret) { dev_err(&atusb->usb_dev->dev, diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index d361835b315d..8dbd59baa34d 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -279,6 +279,7 @@ config MARVELL_PHY config MESON_GXL_PHY tristate "Amlogic Meson GXL Internal PHY" + depends on ARCH_MESON || COMPILE_TEST ---help--- Currently has a driver for the Amlogic Meson GXL Internal PHY diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 1b639242f9e2..ca1b462bf7b2 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -29,6 +29,7 @@ #define MII_DP83867_MICR 0x12 #define MII_DP83867_ISR 0x13 #define DP83867_CTRL 0x1f +#define DP83867_CFG3 0x1e /* Extended Registers */ #define DP83867_RGMIICTL 0x0032 @@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev) micr_status |= (MII_DP83867_MICR_AN_ERR_INT_EN | MII_DP83867_MICR_SPEED_CHNG_INT_EN | + MII_DP83867_MICR_AUTONEG_COMP_INT_EN | + MII_DP83867_MICR_LINK_STS_CHNG_INT_EN | MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); @@ -129,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)) return ret; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) return ret; return of_property_read_u32(of_node, "ti,fifo-depth", @@ -214,6 +221,13 @@ static int dp83867_config_init(struct phy_device *phydev) } } + /* Enable Interrupt output INT_OE in CFG3 register */ + if (phy_interrupt_is_valid(phydev)) { + val = phy_read(phydev, DP83867_CFG3); + val |= BIT(7); + phy_write(phydev, DP83867_CFG3, val); + } + return 0; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e269262471a4..0b78210c0fa7 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev) int err; /* Check the fiber mode first */ - if (phydev->supported & SUPPORTED_FIBRE) { + if (phydev->supported & SUPPORTED_FIBRE && + phydev->interface != PHY_INTERFACE_MODE_SGMII) { err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); if (err < 0) goto error; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 25f93a98863b..48da6e93c3f7 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1065,6 +1065,15 @@ void phy_state_machine(struct work_struct *work) if (old_link != phydev->link) phydev->state = PHY_CHANGELINK; } + /* + * Failsafe: check that nobody set phydev->link=0 between two + * poll cycles, otherwise we won't leave RUNNING state as long + * as link remains down. + */ + if (!phydev->link && phydev->state == PHY_RUNNING) { + phydev->state = PHY_CHANGELINK; + phydev_err(phydev, "no link in PHY_RUNNING\n"); + } break; case PHY_CHANGELINK: err = phy_read_status(phydev); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7dc61228c55b..f3b48ad90865 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; - if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) + if (!(tp->netdev->features & NETIF_F_RXCSUM)) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); @@ -3576,39 +3576,87 @@ static bool delay_autosuspend(struct r8152 *tp) return false; } -static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) +static int rtl8152_rumtime_suspend(struct r8152 *tp) { - struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev = tp->netdev; int ret = 0; - mutex_lock(&tp->control); + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { + u32 rcr = 0; - if (PMSG_IS_AUTO(message)) { - if (netif_running(netdev) && delay_autosuspend(tp)) { + if (delay_autosuspend(tp)) { ret = -EBUSY; goto out1; } - set_bit(SELECTIVE_SUSPEND, &tp->flags); - } else { - netif_device_detach(netdev); + if (netif_carrier_ok(netdev)) { + u32 ocp_data; + + rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data = rcr & ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + rxdy_gated_en(tp, true); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, + PLA_OOB_CTRL); + if (!(ocp_data & RXFIFO_EMPTY)) { + rxdy_gated_en(tp, false); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); + ret = -EBUSY; + goto out1; + } + } + + clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); + + tp->rtl_ops.autosuspend_en(tp, true); + + if (netif_carrier_ok(netdev)) { + napi_disable(&tp->napi); + rtl_stop_rx(tp); + rxdy_gated_en(tp, false); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); + napi_enable(&tp->napi); + } } + set_bit(SELECTIVE_SUSPEND, &tp->flags); + +out1: + return ret; +} + +static int rtl8152_system_suspend(struct r8152 *tp) +{ + struct net_device *netdev = tp->netdev; + int ret = 0; + + netif_device_detach(netdev); + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); napi_disable(&tp->napi); - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - rtl_stop_rx(tp); - tp->rtl_ops.autosuspend_en(tp, true); - } else { - cancel_delayed_work_sync(&tp->schedule); - tp->rtl_ops.down(tp); - } + cancel_delayed_work_sync(&tp->schedule); + tp->rtl_ops.down(tp); napi_enable(&tp->napi); } -out1: + + return ret; +} + +static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct r8152 *tp = usb_get_intfdata(intf); + int ret; + + mutex_lock(&tp->control); + + if (PMSG_IS_AUTO(message)) + ret = rtl8152_rumtime_suspend(tp); + else + ret = rtl8152_system_suspend(tp); + mutex_unlock(&tp->control); return ret; @@ -4308,6 +4356,11 @@ static int rtl8152_probe(struct usb_interface *intf, NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + if (tp->version == RTL_VER_01) { + netdev->features &= ~NETIF_F_RXCSUM; + netdev->hw_features &= ~NETIF_F_RXCSUM; + } + netdev->ethtool_ops = &ops; netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 23dfb0eac098..454f907d419a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, + .flowi4_proto = ip4h->protocol, .daddr = ip4h->daddr, + .saddr = ip4h->saddr, }; struct net *net = dev_net(vrf_dev); struct rtable *rt; @@ -1250,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); + if (vrf->tb_id == RT_TABLE_UNSPEC) + return -EINVAL; dev->priv_flags |= IFF_L3MDEV_MASTER; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index bb70dd5723b5..ca7196c40060 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, struct vxlan_sock *sock4, struct sk_buff *skb, int oif, u8 tos, - __be32 daddr, __be32 *saddr, + __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { @@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = daddr; fl4.saddr = *saddr; + fl4.fl4_dport = dport; + fl4.fl4_sport = sport; rt = ip_route_output_key(vxlan->net, &fl4); if (likely(!IS_ERR(rt))) { @@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, __be32 label, const struct in6_addr *daddr, struct in6_addr *saddr, + __be16 dport, __be16 sport, struct dst_cache *dst_cache, const struct ip_tunnel_info *info) { @@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_dport = dport; + fl6.fl6_sport = sport; err = ipv6_stub->ipv6_dst_lookup(vxlan->net, sock6->sock->sk, @@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rdst ? rdst->remote_ifindex : 0, tos, dst->sin.sin_addr.s_addr, &src->sin.sin_addr.s_addr, + dst_port, src_port, dst_cache, info); if (IS_ERR(rt)) { err = PTR_ERR(rt); @@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rdst ? rdst->remote_ifindex : 0, tos, label, &dst->sin6.sin6_addr, &src->sin6.sin6_addr, + dst_port, src_port, dst_cache, info); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); @@ -2430,7 +2437,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, info->key.u.ipv4.dst, - &info->key.u.ipv4.src, NULL, info); + &info->key.u.ipv4.src, dport, sport, NULL, info); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -2441,7 +2448,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, info->key.label, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, NULL, info); + &info->key.u.ipv6.src, dport, sport, NULL, info); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c index bc7397d709d3..08bc7822f820 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.c +++ b/drivers/net/wireless/intersil/orinoco/mic.c @@ -16,7 +16,7 @@ /********************************************************************/ int orinoco_mic_init(struct orinoco_private *priv) { - priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, + priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " @@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv) return -ENOMEM; } - priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, + priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_mic)) { printk(KERN_DEBUG "orinoco_mic_init: could not allocate " @@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv) void orinoco_mic_free(struct orinoco_private *priv) { if (priv->tx_tfm_mic) - crypto_free_ahash(priv->tx_tfm_mic); + crypto_free_shash(priv->tx_tfm_mic); if (priv->rx_tfm_mic) - crypto_free_ahash(priv->rx_tfm_mic); + crypto_free_shash(priv->rx_tfm_mic); } -int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, +int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { - AHASH_REQUEST_ON_STACK(req, tfm_michael); - struct scatterlist sg[2]; + SHASH_DESC_ON_STACK(desc, tfm_michael); u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ int err; @@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; - /* Use scatter gather to MIC header and data in one go */ - sg_init_table(sg, 2); - sg_set_buf(&sg[0], hdr, sizeof(hdr)); - sg_set_buf(&sg[1], data, data_len); + desc->tfm = tfm_michael; + desc->flags = 0; - if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN)) - return -1; + err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN); + if (err) + return err; + + err = crypto_shash_init(desc); + if (err) + return err; + + err = crypto_shash_update(desc, hdr, sizeof(hdr)); + if (err) + return err; + + err = crypto_shash_update(desc, data, data_len); + if (err) + return err; + + err = crypto_shash_final(desc, mic); + shash_desc_zero(desc); - ahash_request_set_tfm(req, tfm_michael); - ahash_request_set_callback(req, 0, NULL, NULL); - ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr)); - err = crypto_ahash_digest(req); - ahash_request_zero(req); return err; } diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h index ce731d05cc98..e8724e889219 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.h +++ b/drivers/net/wireless/intersil/orinoco/mic.h @@ -6,6 +6,7 @@ #define _ORINOCO_MIC_H_ #include <linux/types.h> +#include <crypto/hash.h> #define MICHAEL_MIC_LEN 8 @@ -15,7 +16,7 @@ struct crypto_ahash; int orinoco_mic_init(struct orinoco_private *priv); void orinoco_mic_free(struct orinoco_private *priv); -int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, +int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h index 2f0c84b1c440..5fa1c3e3713f 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco.h @@ -152,8 +152,8 @@ struct orinoco_private { u8 *wpa_ie; int wpa_ie_len; - struct crypto_ahash *rx_tfm_mic; - struct crypto_ahash *tx_tfm_mic; + struct crypto_shash *rx_tfm_mic; + struct crypto_shash *tx_tfm_mic; unsigned int wpa_enabled:1; unsigned int tkip_cm_active:1; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 0a508649903d..49015b05f3d1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf, return -ENOMEM; } rtlpriv = hw->priv; + rtlpriv->hw = hw; rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32), GFP_KERNEL); if (!rtlpriv->usb_data) diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 6307088b375f..a518cb1b59d4 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) { resource_size_t allocated = 0, available = 0; struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_common *ndns = to_ndns(dev); struct nd_mapping *nd_mapping; struct nvdimm_drvdata *ndd; struct nd_label_id label_id; @@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) u8 *uuid = NULL; int rc, i; - if (dev->driver || to_ndns(dev)->claim) + if (dev->driver || ndns->claim) return -EBUSY; if (is_namespace_pmem(dev)) { @@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) nd_namespace_pmem_set_resource(nd_region, nspm, val * nd_region->ndr_mappings); - } else if (is_namespace_blk(dev)) { - struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); - - /* - * Try to delete the namespace if we deleted all of its - * allocation, this is not the seed device for the - * region, and it is not actively claimed by a btt - * instance. - */ - if (val == 0 && nd_region->ns_seed != dev - && !nsblk->common.claim) - nd_device_unregister(dev, ND_ASYNC); } + /* + * Try to delete the namespace if we deleted all of its + * allocation, this is not the seed device for the region, and + * it is not actively claimed by a btt instance. + */ + if (val == 0 && nd_region->ns_seed != dev && !ndns->claim) + nd_device_unregister(dev, ND_ASYNC); + return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7282d7495bf1..5b536be5a12e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off, rc = memcpy_from_pmem(mem + off, pmem_addr, len); kunmap_atomic(mem); - return rc; + if (rc) + return -EIO; + return 0; } static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2fc86dc7a8df..8a3c3e32a704 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) if (ret) return ret; - /* Checking for ctrl->tagset is a trick to avoid sleeping on module - * load, since we only need the quirk on reset_controller. Notice - * that the HGST device needs this delay only in firmware activation - * procedure; unfortunately we have no (easy) way to verify this. - */ - if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset) + if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) msleep(NVME_QUIRK_DELAY_AMOUNT); return nvme_wait_ready(ctrl, cap, false); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index aa0bc60810a7..fcc9dcfdf675 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1654,13 +1654,12 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, struct nvme_fc_fcp_op *op) { struct nvmefc_fcp_req *freq = &op->fcp_req; - u32 map_len = nvme_map_len(rq); enum dma_data_direction dir; int ret; freq->sg_cnt = 0; - if (!map_len) + if (!blk_rq_payload_bytes(rq)) return 0; freq->sg_table.sgl = freq->first_sgl; @@ -1854,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, if (ret) return ret; - data_len = nvme_map_len(rq); + data_len = blk_rq_payload_bytes(rq); if (data_len) io_dir = ((rq_data_dir(rq) == WRITE) ? NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 6377e14586dc..aead6d08ed2c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -225,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) return (sector >> (ns->lba_shift - 9)); } -static inline unsigned nvme_map_len(struct request *rq) -{ - if (req_op(rq) == REQ_OP_DISCARD) - return sizeof(struct nvme_dsm_range); - else - return blk_rq_bytes(rq); -} - static inline void nvme_cleanup_cmd(struct request *req) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 19beeb7b2ac2..3faefabf339c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req) return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); } -static int nvme_init_iod(struct request *rq, unsigned size, - struct nvme_dev *dev) +static int nvme_init_iod(struct request *rq, struct nvme_dev *dev) { struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); int nseg = blk_rq_nr_phys_segments(rq); + unsigned int size = blk_rq_payload_bytes(rq); if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); @@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) } #endif -static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, - int total_len) +static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct dma_pool *pool; - int length = total_len; + int length = blk_rq_payload_bytes(req); struct scatterlist *sg = iod->sg; int dma_len = sg_dma_len(sg); u64 dma_addr = sg_dma_address(sg); @@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, } static int nvme_map_data(struct nvme_dev *dev, struct request *req, - unsigned size, struct nvme_command *cmnd) + struct nvme_command *cmnd) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct request_queue *q = req->q; @@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req, DMA_ATTR_NO_WARN)) goto out; - if (!nvme_setup_prps(dev, req, size)) + if (!nvme_setup_prps(dev, req)) goto out_unmap; ret = BLK_MQ_RQ_QUEUE_ERROR; @@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_dev *dev = nvmeq->dev; struct request *req = bd->rq; struct nvme_command cmnd; - unsigned map_len; int ret = BLK_MQ_RQ_QUEUE_OK; /* @@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ret != BLK_MQ_RQ_QUEUE_OK) return ret; - map_len = nvme_map_len(req); - ret = nvme_init_iod(req, map_len, dev); + ret = nvme_init_iod(req, dev); if (ret != BLK_MQ_RQ_QUEUE_OK) goto out_free_cmd; if (blk_rq_nr_phys_segments(req)) - ret = nvme_map_data(dev, req, map_len, &cmnd); + ret = nvme_map_data(dev, req, &cmnd); if (ret != BLK_MQ_RQ_QUEUE_OK) goto out_cleanup_iod; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f587af345889..557f29b1f1bb 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, } static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, - struct request *rq, unsigned int map_len, - struct nvme_command *c) + struct request *rq, struct nvme_command *c) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_device *dev = queue->device; @@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, } if (count == 1) { - if (rq_data_dir(rq) == WRITE && - map_len <= nvme_rdma_inline_data_size(queue) && - nvme_rdma_queue_idx(queue)) + if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && + blk_rq_payload_bytes(rq) <= + nvme_rdma_inline_data_size(queue)) return nvme_rdma_map_sg_inline(queue, req, c); if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) @@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) { if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { - struct nvme_command *cmd = (struct nvme_command *)rq->cmd; + struct nvme_command *cmd = nvme_req(rq)->cmd; if (rq->cmd_type != REQ_TYPE_DRV_PRIV || cmd->common.opcode != nvme_fabrics_command || @@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_command *c = sqe->data; bool flush = false; struct ib_device *dev; - unsigned int map_len; int ret; WARN_ON_ONCE(rq->tag < 0); @@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(rq); - map_len = nvme_map_len(rq); - ret = nvme_rdma_map_data(queue, rq, map_len, c); + ret = nvme_rdma_map_data(queue, rq, c); if (ret < 0) { dev_err(queue->ctrl->ctrl.device, "Failed to map data (%d)\n", ret); diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index 1f38d0836751..f1b633bce525 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev) rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", xgene_msi_hwirq_alloc, NULL); - if (rc) + if (rc < 0) goto err_cpuhp; pci_xgene_online = rc; rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index bed19994c1e9..af8f6e92e885 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val; - /* get iATU unroll support */ - pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); - dev_dbg(pp->dev, "iATU unroll: %s\n", - pp->iatu_unroll_enabled ? "enabled" : "disabled"); - /* set the number of lanes */ val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); val &= ~PORT_LINK_MODE_MASK; @@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp) * we should not program the ATU here. */ if (!pp->ops->rd_other_conf) { + /* get iATU unroll support */ + pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); + dev_dbg(pp->dev, "iATU unroll: %s\n", + pp->iatu_unroll_enabled ? "enabled" : "disabled"); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, pp->mem_base, pp->mem_bus_addr, pp->mem_size); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e164b5c9f0f0..204960e70333 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev) pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) return; + pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_flags_reg = reg16; @@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; /* - * A Root Port is always the upstream end of a Link. No PCIe - * component has two Links. Two Links are connected by a Switch - * that has a Port on each Link and internal logic to connect the - * two Ports. + * A Root Port or a PCI-to-PCIe bridge is always the upstream end + * of a Link. No PCIe component has two Links. Two Links are + * connected by a Switch that has a Port on each Link and internal + * logic to connect the two Ports. */ type = pci_pcie_type(pdev); - if (type == PCI_EXP_TYPE_ROOT_PORT) + if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_PCIE_BRIDGE) pdev->has_secondary_link = 1; else if (type == PCI_EXP_TYPE_UPSTREAM || type == PCI_EXP_TYPE_DOWNSTREAM) { diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 37300634b7d2..c123488266ce 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, enum pin_config_param param = pinconf_to_config_param(*config); void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG); unsigned long flags; u32 conf, pull, val, debounce; u16 arg = 0; @@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset, return -EINVAL; raw_spin_lock_irqsave(&vg->lock, flags); - debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG)); + debounce = readl(db_reg); raw_spin_unlock_irqrestore(&vg->lock, flags); switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { @@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, unsigned int param, arg; void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG); unsigned long flags; u32 conf, val, debounce; int i, ret = 0; @@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, break; case PIN_CONFIG_INPUT_DEBOUNCE: - debounce = readl(byt_gpio_reg(vg, offset, - BYT_DEBOUNCE_REG)); - conf &= ~BYT_DEBOUNCE_PULSE_MASK; + debounce = readl(db_reg); + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; switch (arg) { + case 0: + conf &= BYT_DEBOUNCE_EN; + break; case 375: - conf |= BYT_DEBOUNCE_PULSE_375US; + debounce |= BYT_DEBOUNCE_PULSE_375US; break; case 750: - conf |= BYT_DEBOUNCE_PULSE_750US; + debounce |= BYT_DEBOUNCE_PULSE_750US; break; case 1500: - conf |= BYT_DEBOUNCE_PULSE_1500US; + debounce |= BYT_DEBOUNCE_PULSE_1500US; break; case 3000: - conf |= BYT_DEBOUNCE_PULSE_3MS; + debounce |= BYT_DEBOUNCE_PULSE_3MS; break; case 6000: - conf |= BYT_DEBOUNCE_PULSE_6MS; + debounce |= BYT_DEBOUNCE_PULSE_6MS; break; case 12000: - conf |= BYT_DEBOUNCE_PULSE_12MS; + debounce |= BYT_DEBOUNCE_PULSE_12MS; break; case 24000: - conf |= BYT_DEBOUNCE_PULSE_24MS; + debounce |= BYT_DEBOUNCE_PULSE_24MS; break; default: ret = -EINVAL; } + if (!ret) + writel(debounce, db_reg); break; default: ret = -ENOTSUPP; @@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc) static void byt_gpio_irq_init_hw(struct byt_gpio *vg) { + struct gpio_chip *gc = &vg->chip; + struct device *dev = &vg->pdev->dev; void __iomem *reg; u32 base, value; int i; @@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg) } value = readl(reg); - if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && - !(value & BYT_DIRECT_IRQ_EN)) { + if (value & BYT_DIRECT_IRQ_EN) { + clear_bit(i, gc->irq_valid_mask); + dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i); + } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) { byt_gpio_clear_triggering(vg, i); - dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); + dev_dbg(dev, "disabling GPIO %d\n", i); } } @@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg) gc->can_sleep = false; gc->parent = &vg->pdev->dev; gc->ngpio = vg->soc_data->npins; + gc->irq_need_valid_mask = true; #ifdef CONFIG_PM_SLEEP vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index 59cb7a6fc5be..901b356b09d7 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c @@ -19,7 +19,7 @@ #define BXT_PAD_OWN 0x020 #define BXT_HOSTSW_OWN 0x080 -#define BXT_PADCFGLOCK 0x090 +#define BXT_PADCFGLOCK 0x060 #define BXT_GPI_IE 0x110 #define BXT_COMMUNITY(s, e) \ diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 1e139672f1af..6df35dcb29ae 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, return 0; } +static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) +{ + u32 value; + + value = readl(padcfg0); + if (input) { + value &= ~PADCFG0_GPIORXDIS; + value |= PADCFG0_GPIOTXDIS; + } else { + value &= ~PADCFG0_GPIOTXDIS; + value |= PADCFG0_GPIORXDIS; + } + writel(value, padcfg0); +} + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) @@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); - /* Disable TX buffer and enable RX (this will be input) */ - value &= ~PADCFG0_GPIORXDIS; - value |= PADCFG0_GPIOTXDIS; writel(value, padcfg0); + /* Disable TX buffer and enable RX (this will be input) */ + __intel_gpio_set_direction(padcfg0, true); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; - u32 value; raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - - value = readl(padcfg0); - if (input) - value |= PADCFG0_GPIOTXDIS; - else - value &= ~PADCFG0_GPIOTXDIS; - writel(value, padcfg0); + __intel_gpio_set_direction(padcfg0, input); raw_spin_unlock_irqrestore(&pctrl->lock, flags); diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index c3928aa3fefa..e0bca4df2a2f 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; -static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; -static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), - PIN(GPIOAO_5, 0) }; +static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) }; +static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) }; static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; @@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = { GPIO_GROUP(GPIOAO_13, 0), /* bank AO */ - GROUP(uart_tx_ao_b, 0, 26), + GROUP(uart_tx_ao_b, 0, 24), GROUP(uart_rx_ao_b, 0, 25), GROUP(uart_tx_ao_a, 0, 12), GROUP(uart_rx_ao_a, 0, 11), diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 25694f7094c7..b69743b07a1d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) }; static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; -static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; -static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), - PIN(GPIOAO_5, 0) }; +static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) }; +static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) }; static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; @@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = { GPIO_GROUP(GPIOAO_9, 0), /* bank AO */ - GROUP(uart_tx_ao_b, 0, 26), + GROUP(uart_tx_ao_b, 0, 24), GROUP(uart_rx_ao_b, 0, 25), GROUP(uart_tx_ao_a, 0, 12), GROUP(uart_rx_ao_a, 0, 11), diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index c9a146948192..537b52055756 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc) i = 128; pin_num = AMD_GPIO_PINS_BANK2 + i; break; + default: + return; } for (; i < pin_num; i++) { diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c index aa8bd9794683..96686336e3a3 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c @@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, 41, 42, 45}; -static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; +static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}; static const unsigned i2c0_pins[] = {63, 64}; static const int i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {65, 66}; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 410741acb3c9..f46ece2ce3c4 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) case 8: case 7: case 6: + case 1: ideapad_input_report(priv, vpc_bit); break; case 5: diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 1fc0de870ff8..361770568ad0 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, + error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT, DRIVER_NAME, input); if (error) { dev_err(&pdev->dev, "Unable to request irq %d for mfld power" diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 97b4c3a219c0..25f15df5c2d7 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -326,7 +326,7 @@ static int __init mlxplat_init(void) return 0; fail_platform_mux_register: - for (i--; i > 0 ; i--) + while (--i >= 0) platform_device_unregister(priv->pdev_mux[i]); platform_device_unregister(priv->pdev_i2c); fail_alloc: diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c index cbf4d83a7271..25b176996cb7 100644 --- a/drivers/platform/x86/surface3-wmi.c +++ b/drivers/platform/x86/surface3-wmi.c @@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle, static int s3_wmi_check_platform_device(struct device *dev, void *data) { - struct acpi_device *adev, *ts_adev; + struct acpi_device *adev, *ts_adev = NULL; acpi_handle handle; acpi_status status; @@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device) return 0; } -#ifdef CONFIG_PM -static int s3_wmi_resume(struct device *dev) +static int __maybe_unused s3_wmi_resume(struct device *dev) { s3_wmi_send_lid_state(); return 0; } -#endif static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); static struct platform_driver s3_wmi_driver = { diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 9a507e77eced..90b05c72186c 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, goto unwind_vring_allocations; } - /* track the rvdevs list reference */ - kref_get(&rvdev->refcount); - list_add_tail(&rvdev->node, &rproc->rvdevs); rproc_add_subdev(rproc, &rvdev->subdev, @@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) /* * Create a copy of the resource table. When a virtio device starts * and calls vring_new_virtqueue() the address of the allocated vring - * will be stored in the table_ptr. Before the device is started, - * table_ptr will be copied into device memory. + * will be stored in the cached_table. Before the device is started, + * cached_table will be copied into device memory. */ - rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL); - if (!rproc->table_ptr) + rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL); + if (!rproc->cached_table) goto clean_up; + rproc->table_ptr = rproc->cached_table; + /* reset max_notifyid */ rproc->max_notifyid = -1; @@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) } /* - * The starting device has been given the rproc->table_ptr as the + * The starting device has been given the rproc->cached_table as the * resource table. The address of the vring along with the other - * allocated resources (carveouts etc) is stored in table_ptr. + * allocated resources (carveouts etc) is stored in cached_table. * In order to pass this information to the remote device we must copy * this information to device memory. We also update the table_ptr so * that any subsequent changes will be applied to the loaded version. */ loaded_table = rproc_find_loaded_rsc_table(rproc, fw); - if (loaded_table) - memcpy(loaded_table, rproc->table_ptr, tablesz); + if (loaded_table) { + memcpy(loaded_table, rproc->cached_table, tablesz); + rproc->table_ptr = loaded_table; + } /* power up the remote processor */ ret = rproc->ops->start(rproc); @@ -951,7 +952,8 @@ stop_rproc: clean_up_resources: rproc_resource_cleanup(rproc); clean_up: - kfree(rproc->table_ptr); + kfree(rproc->cached_table); + rproc->cached_table = NULL; rproc->table_ptr = NULL; rproc_disable_iommu(rproc); @@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc) rproc_disable_iommu(rproc); /* Free the copy of the resource table */ - kfree(rproc->table_ptr); + kfree(rproc->cached_table); + rproc->cached_table = NULL; rproc->table_ptr = NULL; /* if in crash state, unlock crash handler */ diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index a79cb5a9e5f2..1cfb775e8e82 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev) struct device *dev = &rpdev->dev; int ret; - dev_set_name(&rpdev->dev, "%s:%s", - dev_name(dev->parent), rpdev->id.name); + dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent), + rpdev->id.name, rpdev->src, rpdev->dst); rpdev->dev.bus = &rpmsg_bus; rpdev->dev.release = rpmsg_release_device; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 639ed4e6afd1..070c4da95f48 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; #define CCW_CMD_WRITE_CONF 0x21 #define CCW_CMD_WRITE_STATUS 0x31 #define CCW_CMD_READ_VQ_CONF 0x32 +#define CCW_CMD_READ_STATUS 0x72 #define CCW_CMD_SET_IND_ADAPTER 0x73 #define CCW_CMD_SET_VIRTIO_REV 0x83 @@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 +#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) @@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) * This may happen on device detach. */ if (ret && (ret != -ENODEV)) - dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", + dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", ret, index); vring_del_virtqueue(vq); @@ -892,6 +894,28 @@ out_free: static u8 virtio_ccw_get_status(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); + u8 old_status = *vcdev->status; + struct ccw1 *ccw; + + if (vcdev->revision < 1) + return *vcdev->status; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) + return old_status; + + ccw->cmd_code = CCW_CMD_READ_STATUS; + ccw->flags = 0; + ccw->count = sizeof(*vcdev->status); + ccw->cda = (__u32)(unsigned long)vcdev->status; + ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); +/* + * If the channel program failed (should only happen if the device + * was hotunplugged, and then we clean up via the machine check + * handler anyway), vcdev->status was not overwritten and we just + * return the old status, which is fine. +*/ + kfree(ccw); return *vcdev->status; } @@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) kfree(ccw); } -static struct virtio_config_ops virtio_ccw_config_ops = { +static const struct virtio_config_ops virtio_ccw_config_ops = { .get_features = virtio_ccw_get_features, .finalize_features = virtio_ccw_finalize_features, .get = virtio_ccw_get_config, @@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, case VIRTIO_CCW_DOING_READ_CONFIG: case VIRTIO_CCW_DOING_WRITE_CONFIG: case VIRTIO_CCW_DOING_WRITE_STATUS: + case VIRTIO_CCW_DOING_READ_STATUS: case VIRTIO_CCW_DOING_SET_VQ: case VIRTIO_CCW_DOING_SET_IND: case VIRTIO_CCW_DOING_SET_CONF_IND: diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index d9e15210b110..5caf5f3ff642 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin" -#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin" -#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" +#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin" +#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index a9a00169ad91..b2e8c0dfc79c 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job) struct bfad_fcxp *drv_fcxp; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; - struct fc_bsg_request *bsg_request = bsg_request; + struct fc_bsg_request *bsg_request = job->request; struct fc_bsg_reply *bsg_reply = job->reply; uint32_t command_type = bsg_request->msgcode; unsigned long flags; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index f9e862093a25..cfcfff48e8e1 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -58,7 +58,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "3.2.25.0" +#define BFAD_DRIVER_VERSION "3.2.25.1" #endif #define BFAD_PROTO_NAME FCPI_NAME diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 9ddc9200e0a4..9e4b7709043e 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -248,6 +248,7 @@ struct fnic { struct completion *remove_wait; /* device remove thread blocks */ atomic_t in_flight; /* io counter */ + bool internal_reset_inprogress; u32 _reserved; /* fill hole */ unsigned long state_flags; /* protected by host lock */ enum fnic_state state; diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 2544a37ece0a..adb3d5871e74 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc) unsigned long wait_host_tmo; struct Scsi_Host *shost = sc->device->host; struct fc_lport *lp = shost_priv(shost); + struct fnic *fnic = lport_priv(lp); + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->internal_reset_inprogress == 0) { + fnic->internal_reset_inprogress = 1; + } else { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "host reset in progress skipping another host reset\n"); + return SUCCESS; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* * If fnic_reset is successful, wait for fabric login to complete @@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc) } } + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->internal_reset_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); return ret; } diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 3d3768aaab4f..99b747cedbeb 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -46,6 +46,7 @@ #define INITIAL_SRP_LIMIT 800 #define DEFAULT_MAX_SECTORS 256 +#define MAX_TXU 1024 * 1024 static uint max_vdma_size = MAX_H_COPY_RDMA; @@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, } info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, - GFP_KERNEL); + GFP_ATOMIC); if (!info) { dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", iue->target); @@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, info->mad_version = cpu_to_be32(MAD_VERSION_1); info->os_type = cpu_to_be32(LINUX); memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); - info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); + info->port_max_txu[0] = cpu_to_be32(MAX_TXU); dma_wmb(); rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, @@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) } cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, - GFP_KERNEL); + GFP_ATOMIC); if (!cap) { dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", iue->target); @@ -3585,7 +3586,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) 1, 1); if (rc) { pr_err("srp_transfer_data() failed: %d\n", rc); - return -EAGAIN; + return -EIO; } /* * We now tell TCM to add this WRITE CDB directly into the TCM storage diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 236e4e51d161..7b6bd8ed0d0b 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); + elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4faa7672fc1d..a78a3df68f67 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); + phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 394fe1338d09..dcb33f4fa687 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -393,6 +393,7 @@ struct MPT3SAS_TARGET { * @eedp_enable: eedp support enable bit * @eedp_type: 0(type_1), 1(type_2), 2(type_3) * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device */ struct MPT3SAS_DEVICE { struct MPT3SAS_TARGET *sas_target; @@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE { u8 ignore_delay_remove; /* Iopriority Command Handling */ u8 ncq_prio_enable; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b5c966e319d3..75f3fce1c867 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, } } -static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) { - return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; } /** @@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) if (!scmd) continue; count++; - if (ata_12_16_cmd(scmd)) - scsi_internal_device_unblock(scmd->device, - SDEV_RUNNING); + _scsih_set_satl_pending(scmd, false); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) @@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); - /* - * Lock the device for any subsequent command until command is - * done. - */ - if (ata_12_16_cmd(scmd)) - scsi_internal_device_block(scmd->device); - sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; - if (ata_12_16_cmd(scmd)) - scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + _scsih_set_satl_pending(scmd, false); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig index 23ca8a274586..21331453db7b 100644 --- a/drivers/scsi/qedi/Kconfig +++ b/drivers/scsi/qedi/Kconfig @@ -1,6 +1,6 @@ config QEDI tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support" - depends on PCI && SCSI + depends on PCI && SCSI && UIO depends on QED select SCSI_ISCSI_ATTRS select QED_LL2 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 47eb4d545d13..f201f4099620 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; ssize_t rval = 0; + mutex_lock(&ha->optrom_mutex); + if (ha->optrom_state != QLA_SREADING) - return 0; + goto out; - mutex_lock(&ha->optrom_mutex); rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, ha->optrom_region_size); + +out: mutex_unlock(&ha->optrom_mutex); return rval; @@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; - if (ha->optrom_state != QLA_SWRITING) + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SWRITING) { + mutex_unlock(&ha->optrom_mutex); return -EINVAL; - if (off > ha->optrom_region_size) + } + if (off > ha->optrom_region_size) { + mutex_unlock(&ha->optrom_mutex); return -ERANGE; + } if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; - mutex_lock(&ha->optrom_mutex); memcpy(&ha->optrom_buffer[off], buf, count); mutex_unlock(&ha->optrom_mutex); @@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); int type; - int rval = 0; port_id_t did; type = simple_strtol(buf, NULL, 10); @@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); - rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); + qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); return count; } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index f7df01b76714..5b1287a63c49 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1556,7 +1556,8 @@ typedef struct { struct atio { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ }; @@ -2732,7 +2733,7 @@ struct isp_operations { #define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) #define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) -#define QLA_MSIX_DEFAULT 0x00 +#define QLA_BASE_VECTORS 2 /* default + RSP */ #define QLA_MSIX_RSP_Q 0x01 #define QLA_ATIO_VECTOR 0x02 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 @@ -2754,7 +2755,6 @@ struct qla_msix_entry { uint16_t entry; char name[30]; void *handle; - struct irq_affinity_notify irq_notify; int cpuid; }; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 632d5f30386a..7b6317c8c2e9 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) /* Wait for soft-reset to complete. */ RD_REG_DWORD(®->ctrl_status); - for (cnt = 0; cnt < 6000000; cnt++) { + for (cnt = 0; cnt < 60; cnt++) { barrier(); if ((RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) @@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) RD_REG_DWORD(®->hccr); RD_REG_WORD(®->mailbox0); - for (cnt = 6000000; RD_REG_WORD(®->mailbox0) != 0 && + for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5093ca9b02ec..dc88a09f9043 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); -static void qla_irq_affinity_notify(struct irq_affinity_notify *, - const cpumask_t *); -static void qla_irq_affinity_release(struct kref *); - /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; + if (pkt->entry_type == NOTIFY_ACK_TYPE && + pkt->handle == QLA_TGT_SKIP_HANDLE) + return; + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); @@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (!vha->flags.online) return; - if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) { - /* if kernel does not notify qla of IRQ's CPU change, - * then set it here. - */ - rsp->msix->cpuid = smp_processor_id(); - ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid; - } - while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; @@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = { static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { -#define MIN_MSIX_COUNT 2 int i, ret; struct qla_msix_entry *qentry; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct irq_affinity desc = { + .pre_vectors = QLA_BASE_VECTORS, + }; + + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) + desc.pre_vectors++; + + ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS, + ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + &desc); - ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count, - PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (ret < 0) { ql_log(ql_log_fatal, vha, 0x00c7, "MSI-X: Failed to enable support, " @@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->have_irq = 0; qentry->in_use = 0; qentry->handle = NULL; - qentry->irq_notify.notify = qla_irq_affinity_notify; - qentry->irq_notify.release = qla_irq_affinity_release; - qentry->cpuid = -1; } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { + for (i = 0; i < QLA_BASE_VECTORS; i++) { qentry = &ha->msix_entries[i]; qentry->handle = rsp; rsp->msix = qentry; @@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) goto msix_register_fail; qentry->have_irq = 1; qentry->in_use = 1; - - /* Register for CPU affinity notification. */ - irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); - - /* Schedule work (ie. trigger a notification) to read cpu - * mask for this specific irq. - * kref_get is required because - * irq_affinity_notify() will do - * kref_put(). - */ - kref_get(&qentry->irq_notify.kref); - schedule_work(&qentry->irq_notify.work); } /* @@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, msix->handle = qpair; return ret; } - - -/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */ -static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct qla_msix_entry *e = - container_of(notify, struct qla_msix_entry, irq_notify); - struct qla_hw_data *ha; - struct scsi_qla_host *base_vha; - struct rsp_que *rsp = e->handle; - - /* user is recommended to set mask to just 1 cpu */ - e->cpuid = cpumask_first(mask); - - ha = rsp->hw; - base_vha = pci_get_drvdata(ha->pdev); - - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host %ld : vector %d cpu %d \n", __func__, - base_vha->host_no, e->vector, e->cpuid); - - if (e->have_irq) { - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && - (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) { - ha->tgt.rspq_vector_cpuid = e->cpuid; - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: rspq vector %d cpu %d runtime change\n", - __func__, base_vha->host_no, e->vector, e->cpuid); - } - } -} - -static void qla_irq_affinity_release(struct kref *ref) -{ - struct irq_affinity_notify *notify = - container_of(ref, struct irq_affinity_notify, kref); - struct qla_msix_entry *e = - container_of(notify, struct qla_msix_entry, irq_notify); - struct rsp_que *rsp = e->handle; - struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev); - - ql_dbg(ql_dbg_init, base_vha, 0xffff, - "%s: host%ld: vector %d cpu %d\n", __func__, - base_vha->host_no, e->vector, e->cpuid); -} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2819ceb96041..67f64db390b0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -10,7 +10,7 @@ #include <linux/delay.h> #include <linux/gfp.h> -struct rom_cmd { +static struct rom_cmd { uint16_t cmd; } rom_cmds[] = { { MBC_LOAD_RAM }, @@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - /* if PCI error, then avoid mbx processing.*/ - if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { + /* if PCI error, then avoid mbx processing.*/ + if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x1191, "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; - } + } reg = ha->iobase; io_lock_on = base_vha->flags.init_done; @@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } } else { - uint16_t mb0; - uint32_t ictrl; + uint16_t mb[8]; + uint32_t ictrl, host_status, hccr; uint16_t w; if (IS_FWI2_CAPABLE(ha)) { - mb0 = RD_REG_WORD(®->isp24.mailbox0); + mb[0] = RD_REG_WORD(®->isp24.mailbox0); + mb[1] = RD_REG_WORD(®->isp24.mailbox1); + mb[2] = RD_REG_WORD(®->isp24.mailbox2); + mb[3] = RD_REG_WORD(®->isp24.mailbox3); + mb[7] = RD_REG_WORD(®->isp24.mailbox7); ictrl = RD_REG_DWORD(®->isp24.ictrl); + host_status = RD_REG_DWORD(®->isp24.host_status); + hccr = RD_REG_DWORD(®->isp24.hccr); + + ql_log(ql_log_warn, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", + command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], + mb[7], host_status, hccr); + } else { - mb0 = RD_MAILBOX_REG(ha, ®->isp, 0); + mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); ictrl = RD_REG_WORD(®->isp.ictrl); + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, + "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " + "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); } - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, - "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " - "mb[0]=0x%x\n", command, ictrl, jiffies, mb0); ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); /* Capture FW dump only, if PCI device active */ @@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - int configured_count; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, "Entered %s.\n", __func__); @@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); } else { - configured_count = mcp->mb[11]; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, "Done %s.\n", __func__); } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 54380b434b30..0a1723cc08cf 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized; (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) +const int MD_MIU_TEST_AGT_RDDATA[] = { + 0x410000A8, 0x410000AC, + 0x410000B8, 0x410000BC +}; + static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 6201dce3553b..77624eac95a4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue { #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 -static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, - 0x410000B8, 0x410000BC }; +extern const int MD_MIU_TEST_AGT_RDDATA[4]; #define CRB_NIU_XG_PAUSE_CTL_P0 0x1 #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 007192d7bad8..dc1ec9b61027 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -15,6 +15,23 @@ #define TIMEOUT_100_MS 100 +static const uint32_t qla8044_reg_tbl[] = { + QLA8044_PEG_HALT_STATUS1, + QLA8044_PEG_HALT_STATUS2, + QLA8044_PEG_ALIVE_COUNTER, + QLA8044_CRB_DRV_ACTIVE, + QLA8044_CRB_DEV_STATE, + QLA8044_CRB_DRV_STATE, + QLA8044_CRB_DRV_SCRATCH, + QLA8044_CRB_DEV_PART_INFO1, + QLA8044_CRB_IDC_VER_MAJOR, + QLA8044_FW_VER_MAJOR, + QLA8044_FW_VER_MINOR, + QLA8044_FW_VER_SUB, + QLA8044_CMDPEG_STATE, + QLA8044_ASIC_TEMP, +}; + /* 8044 Flash Read/Write functions */ uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 02fe3c4cdf55..83c1b7e17c80 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -535,23 +535,6 @@ enum qla_regs { #define CRB_CMDPEG_CHECK_RETRY_COUNT 60 #define CRB_CMDPEG_CHECK_DELAY 500 -static const uint32_t qla8044_reg_tbl[] = { - QLA8044_PEG_HALT_STATUS1, - QLA8044_PEG_HALT_STATUS2, - QLA8044_PEG_ALIVE_COUNTER, - QLA8044_CRB_DRV_ACTIVE, - QLA8044_CRB_DEV_STATE, - QLA8044_CRB_DRV_STATE, - QLA8044_CRB_DRV_SCRATCH, - QLA8044_CRB_DEV_PART_INFO1, - QLA8044_CRB_IDC_VER_MAJOR, - QLA8044_FW_VER_MAJOR, - QLA8044_FW_VER_MINOR, - QLA8044_FW_VER_SUB, - QLA8044_CMDPEG_STATE, - QLA8044_ASIC_TEMP, -}; - /* MiniDump Structures */ /* Driver_code is for driver to write some info about the entry diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8521cfe302e9..0a000ecf0881 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) continue; rsp = ha->rsp_q_map[cnt]; - clear_bit(cnt, ha->req_qid_map); + clear_bit(cnt, ha->rsp_qid_map); ha->rsp_q_map[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_free_rsp_que(ha, rsp); @@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, sizeof(struct ct6_dsd), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ctx_cachep) - goto fail_free_gid_list; + goto fail_free_srb_mempool; } ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, ctx_cachep); @@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), GFP_KERNEL); if (!ha->loop_id_map) - goto fail_async_pd; + goto fail_loop_id_map; else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, @@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, return 0; +fail_loop_id_map: + dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); fail_async_pd: dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); fail_ex_init_cb: @@ -3851,6 +3853,10 @@ fail_free_ms_iocb: dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); ha->ms_iocb = NULL; ha->ms_iocb_dma = 0; + + if (ha->sns_cmd) + dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), + ha->sns_cmd, ha->sns_cmd_dma); fail_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); @@ -3868,10 +3874,12 @@ fail_free_nvram: kfree(ha->nvram); ha->nvram = NULL; fail_free_ctx_mempool: - mempool_destroy(ha->ctx_mempool); + if (ha->ctx_mempool) + mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; fail_free_srb_mempool: - mempool_destroy(ha->srb_mempool); + if (ha->srb_mempool) + mempool_destroy(ha->srb_mempool); ha->srb_mempool = NULL; fail_free_gid_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index bff9689f5ca9..e4fda84b959e 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) { struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; - uint32_t unpacked_lun, lun = 0; uint16_t loop_id; int res = 0; struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; - struct atio_from_isp *a = (struct atio_from_isp *)iocb; unsigned long flags; loop_id = le16_to_cpu(n->u.isp24.nport_handle); @@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); - lun = a->u.isp24.fcp_cmnd.lun; - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); - - return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, - iocb, QLA24XX_MGMT_SEND_NACK); + return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); } /* ha->tgt.sess_lock supposed to be held on entry */ @@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; @@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, #if 0 /* Todo */ if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#else + if (rc) { + } #endif goto done; } @@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) if (!vha->flags.online) return; - while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, - ha_locked); + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { + /* + * This packet is corrupted. The header + payload + * can not be trusted. There is no point in passing + * it further up. + */ + ql_log(ql_log_warn, vha, 0xffff, + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); + } else { + qlt_24xx_atio_pkt_all_vps(vha, + (struct atio_from_isp *)pkt, ha_locked); + } for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; @@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) /* Disable Full Login after LIP */ nv->host_p &= cpu_to_le32(~BIT_10); + + /* + * clear BIT 15 explicitly as we have seen at least + * a couple of instances where this was set and this + * was causing the firmware to not be initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); /* Enable target PRLI control */ nv->firmware_options_2 |= cpu_to_le32(BIT_14); } else { @@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) return; } - /* out-of-order frames reassembly */ - nv->firmware_options_3 |= BIT_6|BIT_9; - if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = @@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) /* Disable ini mode, if requested */ if (!qla_ini_mode_enabled(vha)) nv->firmware_options_1 |= cpu_to_le32(BIT_5); - /* Disable Full Login after LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_13); /* Enable initial LIP */ nv->firmware_options_1 &= cpu_to_le32(~BIT_9); + /* + * clear BIT 15 explicitly as we have seen at + * least a couple of instances where this was set + * and this was causing the firmware to not be + * initialized. + */ + nv->firmware_options_1 &= cpu_to_le32(~BIT_15); if (ql2xtgt_tape_enable) /* Enable FC tape support */ nv->firmware_options_2 |= cpu_to_le32(BIT_12); @@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) return; } - /* out-of-order frames reassembly */ - nv->firmware_options_3 |= BIT_6|BIT_9; - if (ha->tgt.enable_class_2) { if (vha->flags.init_done) fc_host_supported_classes(vha->host) = diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index f26c5f60eedd..0824a8164a24 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -427,13 +427,33 @@ struct atio_from_isp { struct { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; +#define FCP_CMD_LENGTH_MASK 0x0fff +#define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ } raw; } u; } __packed; +static inline int fcpcmd_is_corrupted(struct atio *atio) +{ + if (atio->entry_type == ATIO_TYPE7 && + (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) + return 1; + else + return 0; +} + +/* adjust corrupted atio so we won't trip over the same entry again. */ +static inline void adjust_corrupted_atio(struct atio_from_isp *atio) +{ + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN); + atio->u.isp24.fcp_cmnd.add_cdb_len = 0; +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 36935c9ed669..8a58ef3adab4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, count++; } } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring; + + if (atr || !buf) { + length = ha->tgt.atio_q_length; + qla27xx_insert16(0, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len); + count++; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd026, "%s: unknown queue %x\n", __func__, ent->t263.queue_type); @@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, count++; } } + } else if (QLA_TGT_MODE_ENABLED() && + ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { + struct qla_hw_data *ha = vha->hw; + struct atio *atr = ha->tgt.atio_ring_ptr; + + if (atr || !buf) { + qla27xx_insert16(0, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(ha->tgt.atio_q_in ? + readl(ha->tgt.atio_q_in) : 0, buf, len); + count++; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd02f, "%s: unknown queue %x\n", __func__, ent->t274.queue_type); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 6643f6fc7795..d925910be761 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, { return sprintf(page, "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " - UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine); } @@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void) int ret; pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " - UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, + UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname, utsname()->machine); ret = target_register_template(&tcm_qla2xxx_ops); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 37e026a4823d..cf8430be183b 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -1,7 +1,6 @@ #include <target/target_core_base.h> #include <linux/btree.h> -#define TCM_QLA2XXX_VERSION "v0.1" /* length of ASCII WWPNs including pad */ #define TCM_QLA2XXX_NAMELEN 32 /* diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index c35b6de4ca64..e9e1e141af9c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) count = blk_rq_map_sg(req->q, req, sdb->table.sgl); BUG_ON(count > sdb->table.nents); sdb->table.nents = count; - sdb->length = blk_rq_bytes(req); + sdb->length = blk_rq_payload_bytes(req); return BLKPREP_OK; } @@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev) * request queue. */ if (q->mq_ops) { - blk_mq_stop_hw_queues(q); + blk_mq_quiesce_queue(q); } else { spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b1933041da39..0b09638fa39b 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -836,7 +836,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) struct bio *bio = rq->bio; sector_t sector = blk_rq_pos(rq); unsigned int nr_sectors = blk_rq_sectors(rq); - unsigned int nr_bytes = blk_rq_bytes(rq); int ret; if (sdkp->device->no_write_same) @@ -869,21 +868,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) cmd->transfersize = sdp->sector_size; cmd->allowed = SD_MAX_RETRIES; - - /* - * For WRITE_SAME the data transferred in the DATA IN buffer is - * different from the amount of data actually written to the target. - * - * We set up __data_len to the amount of data transferred from the - * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list - * to transfer a single sector of data first, but then reset it to - * the amount of data to be written right after so that the I/O path - * knows how much to actually write. - */ - rq->__data_len = sdp->sector_size; - ret = scsi_init_io(cmd); - rq->__data_len = nr_bytes; - return ret; + return scsi_init_io(cmd); } static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) @@ -2600,7 +2585,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; @@ -2783,13 +2769,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); } - sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) - q->limits.zoned = BLK_ZONED_HA; - else if (sdkp->device->type == TYPE_ZBC) + if (sdkp->device->type == TYPE_ZBC) { + /* Host-managed */ q->limits.zoned = BLK_ZONED_HM; - else - q->limits.zoned = BLK_ZONED_NONE; + } else { + sdkp->zoned = (buffer[8] >> 4) & 3; + if (sdkp->zoned == 1) + /* Host-aware */ + q->limits.zoned = BLK_ZONED_HA; + else + /* + * Treat drive-managed devices as + * regular block devices. + */ + q->limits.zoned = BLK_ZONED_NONE; + } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 8c9a35c91705..50adabbb5808 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); - if (scsi_is_sas_rphy(&sdev->sdev_gendev)) + if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent)) efd.addr = sas_get_address(sdev); if (efd.addr) { diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index 396b32dca074..7cf70aaec0ba 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pool) { SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); + ret = -ENOMEM; goto err_free_res; } @@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pool) { SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); + ret = -ENOMEM; goto err_free_dflt_sgl_pool; } @@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pool) { SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); + ret = -ENOMEM; goto err_free_max_sgl_pool; } diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 8823cc81ae45..5bb376009d98 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) if (IS_ERR(task)) { dev_err(dev, "can't create rproc_boot thread\n"); + ret = PTR_ERR(task); goto err_put_rproc; } diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ec4aa252d6e8..2922a9908302 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -378,6 +378,7 @@ config SPI_FSL_SPI config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO + depends on HAS_DMA depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST help This enables support for the Freescale DSPI controller in master diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index e89da0af45d2..0314c6b9e044 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev) struct spi_master *master; struct a3700_spi *spi; u32 num_cs = 0; - int ret = 0; + int irq, ret = 0; master = spi_alloc_master(dev, sizeof(*spi)); if (!master) { @@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev) master->unprepare_message = a3700_spi_unprepare_message; master->set_cs = a3700_spi_set_cs; master->flags = SPI_MASTER_HALF_DUPLEX; - master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | + master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD); platform_set_drvdata(pdev, master); @@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev) goto error; } - spi->irq = platform_get_irq(pdev, 0); - if (spi->irq < 0) { - dev_err(dev, "could not get irq: %d\n", spi->irq); + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "could not get irq: %d\n", irq); ret = -ENXIO; goto error; } + spi->irq = irq; init_completion(&spi->done); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 319225d7e761..6ab4c7700228 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev) SPI_ENGINE_VERSION_MAJOR(version), SPI_ENGINE_VERSION_MINOR(version), SPI_ENGINE_VERSION_PATCH(version)); - return -ENODEV; + ret = -ENODEV; + goto err_put_master; } spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index d36c11b73a35..02fb96797ac8 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (!t->rx_dma) { + if (dma_mapping_error(&spi->dev, !t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } @@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_TO_DEVICE); - if (!t->tx_dma) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { ret = -EFAULT; goto err_tx_map; } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index e31971f91475..837cb8d0bac6 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) static void mid_spi_dma_stop(struct dw_spi *dws) { if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_all(dws->txchan); + dmaengine_terminate_sync(dws->txchan); clear_bit(TX_BUSY, &dws->dma_chan_busy); } if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { - dmaengine_terminate_all(dws->rxchan); + dmaengine_terminate_sync(dws->rxchan); clear_bit(RX_BUSY, &dws->dma_chan_busy); } } diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index b715a26a9148..054012f87567 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = { static int dw_spi_debugfs_init(struct dw_spi *dws) { - dws->debugfs = debugfs_create_dir("dw_spi", NULL); + char name[128]; + + snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev)); + dws->debugfs = debugfs_create_dir(name, NULL); if (!dws->debugfs) return -ENOMEM; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index dd7b5b47291d..d6239fa718be 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) pxa2xx_spi_write(drv_data, SSCR1, tmp); tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); pxa2xx_spi_write(drv_data, SSCR0, tmp); + break; default: tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0012ad02e569..1f00eeb0b5a3 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = { }; static const struct of_device_id sh_msiof_match[] = { - { .compatible = "renesas,sh-msiof", .data = &sh_data }, { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, + { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data }, { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, + { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data }, + { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */ {}, }; MODULE_DEVICE_TABLE(of, sh_msiof_match); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7dfefd66df93..1cadc9eefa21 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: + case TCM_TOO_MANY_TARGET_DESCS: + case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: + case TCM_TOO_MANY_SEGMENT_DESCS: + case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = { .key = ILLEGAL_REQUEST, .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ }, + [TCM_TOO_MANY_TARGET_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ + }, + [TCM_TOO_MANY_SEGMENT_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ + }, [TCM_PARAMETER_LIST_LENGTH_ERROR] = { .key = ILLEGAL_REQUEST, .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 37d5caebffa6..d828b3b5000b 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) return 0; } -static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, - bool src) +static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, + struct se_device **found_dev) { struct se_device *se_dev; - unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; int rc; - if (src) - dev_wwn = &xop->dst_tid_wwn[0]; - else - dev_wwn = &xop->src_tid_wwn[0]; - mutex_lock(&g_device_mutex); list_for_each_entry(se_dev, &g_device_list, g_dev_node) { @@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op if (rc != 0) continue; - if (src) { - xop->dst_dev = se_dev; - pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" - " se_dev\n", xop->dst_dev); - } else { - xop->src_dev = se_dev; - pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" - " se_dev\n", xop->src_dev); - } + *found_dev = se_dev; + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); rc = target_depend_item(&se_dev->dev_group.cg_item); if (rc != 0) { @@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, - unsigned char *p, bool src) + unsigned char *p, unsigned short cscd_index) { unsigned char *desc = p; unsigned short ript; @@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op return -EINVAL; } - if (src) { + if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { + pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " + "dest\n", cscd_index); + return 0; + } + + if (cscd_index == xop->stdi) { memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* * Determine if the source designator matches the local device @@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" " received xop\n", xop->src_dev); } - } else { + } + + if (cscd_index == xop->dtdi) { memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* - * Determine if the destination designator matches the local device + * Determine if the destination designator matches the local + * device. If @cscd_index corresponds to both source (stdi) and + * destination (dtdi), or dtdi comes after stdi, then + * XCOL_DEST_RECV_OP wins. */ if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], XCOPY_NAA_IEEE_REGEX_LEN)) { @@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, { struct se_device *local_dev = se_cmd->se_dev; unsigned char *desc = p; - int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; + unsigned short cscd_index = 0; unsigned short start = 0; - bool src = true; *sense_ret = TCM_INVALID_PARAMETER_LIST; if (offset != 0) { pr_err("XCOPY target descriptor list length is not" " multiple of %d\n", XCOPY_TARGET_DESC_LEN); + *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE; return -EINVAL; } - if (tdll > 64) { + if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) { pr_err("XCOPY target descriptor supports a maximum" " two src/dest descriptors, tdll: %hu too large..\n", tdll); + /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */ + *sense_ret = TCM_TOO_MANY_TARGET_DESCS; return -EINVAL; } /* @@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, while (start < tdll) { /* - * Check target descriptor identification with 0xE4 type with - * use VPD 0x83 WWPN matching .. + * Check target descriptor identification with 0xE4 type, and + * compare the current index with the CSCD descriptor IDs in + * the segment descriptor. Use VPD 0x83 WWPN matching .. */ switch (desc[0]) { case 0xe4: rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, - &desc[0], src); + &desc[0], cscd_index); if (rc != 0) goto out; - /* - * Assume target descriptors are in source -> destination order.. - */ - if (src) - src = false; - else - src = true; start += XCOPY_TARGET_DESC_LEN; desc += XCOPY_TARGET_DESC_LEN; - ret++; + cscd_index++; break; default: pr_err("XCOPY unsupported descriptor type code:" " 0x%02x\n", desc[0]); + *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE; goto out; } } - if (xop->op_origin == XCOL_SOURCE_RECV_OP) - rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); - else - rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); + switch (xop->op_origin) { + case XCOL_SOURCE_RECV_OP: + rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn, + &xop->dst_dev); + break; + case XCOL_DEST_RECV_OP: + rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn, + &xop->src_dev); + break; + default: + pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " + "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); + rc = -EINVAL; + break; + } /* * If a matching IEEE NAA 0x83 descriptor for the requested device * is not located on this node, return COPY_ABORTED with ASQ/ASQC @@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", xop->dst_dev, &xop->dst_tid_wwn[0]); - return ret; + return cscd_index; out: return -EINVAL; @@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op xop->stdi = get_unaligned_be16(&desc[4]); xop->dtdi = get_unaligned_be16(&desc[6]); + + if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX || + xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) { + pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n", + XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi); + return -EINVAL; + } + pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", desc_len, xop->stdi, xop->dtdi, dc); @@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, struct xcopy_op *xop, unsigned char *p, - unsigned int sdll) + unsigned int sdll, sense_reason_t *sense_ret) { unsigned char *desc = p; unsigned int start = 0; int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; + *sense_ret = TCM_INVALID_PARAMETER_LIST; + if (offset != 0) { pr_err("XCOPY segment descriptor list length is not" " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); + *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE; + return -EINVAL; + } + if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { + pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" + " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); + /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ + *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; return -EINVAL; } @@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, default: pr_err("XCOPY unsupported segment descriptor" "type: 0x%02x\n", desc[0]); + *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE; goto out; } } @@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) return TCM_UNSUPPORTED_SCSI_OPCODE; } + if (se_cmd->data_length == 0) { + target_complete_cmd(se_cmd, SAM_STAT_GOOD); + return TCM_NO_SENSE; + } + if (se_cmd->data_length < XCOPY_HDR_LEN) { + pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", + se_cmd->data_length, XCOPY_HDR_LEN); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); if (!xop) { pr_err("Unable to allocate xcopy_op\n"); @@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) */ tdll = get_unaligned_be16(&p[2]); sdll = get_unaligned_be32(&p[8]); + if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) { + pr_err("XCOPY descriptor list length %u exceeds maximum %u\n", + tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN); + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } inline_dl = get_unaligned_be32(&p[12]); if (inline_dl != 0) { @@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) goto out; } + if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) { + pr_err("XCOPY parameter truncation: data length %u too small " + "for tdll: %hu sdll: %u inline_dl: %u\n", + se_cmd->data_length, tdll, sdll, inline_dl); + ret = TCM_PARAMETER_LIST_LENGTH_ERROR; + goto out; + } + pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, tdll, sdll, inline_dl); + /* + * skip over the target descriptors until segment descriptors + * have been passed - CSCD ids are needed to determine src and dest. + */ + seg_desc = &p[16] + tdll; + + rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, + sdll, &ret); + if (rc <= 0) + goto out; + + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, + rc * XCOPY_SEGMENT_DESC_LEN); + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); if (rc <= 0) goto out; @@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, rc * XCOPY_TARGET_DESC_LEN); - seg_desc = &p[16]; - seg_desc += (rc * XCOPY_TARGET_DESC_LEN); - - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); - if (rc <= 0) { - xcopy_pt_undepend_remotedev(xop); - goto out; - } transport_kunmap_data_sg(se_cmd); - pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, - rc * XCOPY_SEGMENT_DESC_LEN); INIT_WORK(&xop->xop_work, target_xcopy_do_work); queue_work(xcopy_wq, &xop->xop_work); return TCM_NO_SENSE; diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index 4d3d4dd060f2..7c0b105cbe1b 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -1,10 +1,17 @@ #include <target/target_core_base.h> +#define XCOPY_HDR_LEN 16 #define XCOPY_TARGET_DESC_LEN 32 #define XCOPY_SEGMENT_DESC_LEN 28 #define XCOPY_NAA_IEEE_REGEX_LEN 16 #define XCOPY_MAX_SECTORS 1024 +/* + * SPC4r37 6.4.6.1 + * Table 150 — CSCD descriptor ID values + */ +#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF + enum xcopy_origin_list { XCOL_SOURCE_RECV_OP = 0x01, XCOL_DEST_RECV_OP = 0x02, diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index b811b0fb61b1..4c7796512453 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -118,12 +118,12 @@ struct rockchip_tsadc_chip { void (*control)(void __iomem *reg, bool on); /* Per-sensor methods */ - int (*get_temp)(struct chip_tsadc_table table, + int (*get_temp)(const struct chip_tsadc_table *table, int chn, void __iomem *reg, int *temp); - void (*set_alarm_temp)(struct chip_tsadc_table table, - int chn, void __iomem *reg, int temp); - void (*set_tshut_temp)(struct chip_tsadc_table table, - int chn, void __iomem *reg, int temp); + int (*set_alarm_temp)(const struct chip_tsadc_table *table, + int chn, void __iomem *reg, int temp); + int (*set_tshut_temp)(const struct chip_tsadc_table *table, + int chn, void __iomem *reg, int temp); void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); /* Per-table methods */ @@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = { {3452, 115000}, {3437, 120000}, {3421, 125000}, + {0, 125000}, }; static const struct tsadc_table rk3368_code_table[] = { @@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = { {TSADCV3_DATA_MASK, 125000}, }; -static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, +static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table, int temp) { int high, low, mid; - u32 error = 0; + unsigned long num; + unsigned int denom; + u32 error = table->data_mask; low = 0; - high = table.length - 1; + high = (table->length - 1) - 1; /* ignore the last check for table */ mid = (high + low) / 2; /* Return mask code data when the temp is over table range */ - if (temp < table.id[low].temp || temp > table.id[high].temp) { - error = table.data_mask; + if (temp < table->id[low].temp || temp > table->id[high].temp) goto exit; - } while (low <= high) { - if (temp == table.id[mid].temp) - return table.id[mid].code; - else if (temp < table.id[mid].temp) + if (temp == table->id[mid].temp) + return table->id[mid].code; + else if (temp < table->id[mid].temp) high = mid - 1; else low = mid + 1; mid = (low + high) / 2; } + /* + * The conversion code granularity provided by the table. Let's + * assume that the relationship between temperature and + * analog value between 2 table entries is linear and interpolate + * to produce less granular result. + */ + num = abs(table->id[mid + 1].code - table->id[mid].code); + num *= temp - table->id[mid].temp; + denom = table->id[mid + 1].temp - table->id[mid].temp; + + switch (table->mode) { + case ADC_DECREMENT: + return table->id[mid].code - (num / denom); + case ADC_INCREMENT: + return table->id[mid].code + (num / denom); + default: + pr_err("%s: unknown table mode: %d\n", __func__, table->mode); + return error; + } + exit: - pr_err("Invalid the conversion, error=%d\n", error); + pr_err("%s: invalid temperature, temp=%d error=%d\n", + __func__, temp, error); return error; } -static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, - int *temp) +static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table, + u32 code, int *temp) { unsigned int low = 1; - unsigned int high = table.length - 1; + unsigned int high = table->length - 1; unsigned int mid = (low + high) / 2; unsigned int num; unsigned long denom; - WARN_ON(table.length < 2); + WARN_ON(table->length < 2); - switch (table.mode) { + switch (table->mode) { case ADC_DECREMENT: - code &= table.data_mask; - if (code < table.id[high].code) + code &= table->data_mask; + if (code <= table->id[high].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code >= table.id[mid].code && - code < table.id[mid - 1].code) + if (code >= table->id[mid].code && + code < table->id[mid - 1].code) break; - else if (code < table.id[mid].code) + else if (code < table->id[mid].code) low = mid + 1; else high = mid - 1; @@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, } break; case ADC_INCREMENT: - code &= table.data_mask; - if (code < table.id[low].code) + code &= table->data_mask; + if (code < table->id[low].code) return -EAGAIN; /* Incorrect reading */ while (low <= high) { - if (code <= table.id[mid].code && - code > table.id[mid - 1].code) + if (code <= table->id[mid].code && + code > table->id[mid - 1].code) break; - else if (code > table.id[mid].code) + else if (code > table->id[mid].code) low = mid + 1; else high = mid - 1; @@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, } break; default: - pr_err("Invalid the conversion table\n"); + pr_err("%s: unknown table mode: %d\n", __func__, table->mode); + return -EINVAL; } /* @@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, * temperature between 2 table entries is linear and interpolate * to produce less granular result. */ - num = table.id[mid].temp - table.id[mid - 1].temp; - num *= abs(table.id[mid - 1].code - code); - denom = abs(table.id[mid - 1].code - table.id[mid].code); - *temp = table.id[mid - 1].temp + (num / denom); + num = table->id[mid].temp - table->id[mid - 1].temp; + num *= abs(table->id[mid - 1].code - code); + denom = abs(table->id[mid - 1].code - table->id[mid].code); + *temp = table->id[mid - 1].temp + (num / denom); return 0; } @@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable) writel_relaxed(val, regs + TSADCV2_AUTO_CON); } -static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, +static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table, int chn, void __iomem *regs, int *temp) { u32 val; @@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, return rk_tsadcv2_code_to_temp(table, val, temp); } -static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, - int chn, void __iomem *regs, int temp) +static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table, + int chn, void __iomem *regs, int temp) { - u32 alarm_value, int_en; + u32 alarm_value; + u32 int_en, int_clr; + + /* + * In some cases, some sensors didn't need the trip points, the + * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm + * in the end, ignore this case and disable the high temperature + * interrupt. + */ + if (temp == INT_MAX) { + int_clr = readl_relaxed(regs + TSADCV2_INT_EN); + int_clr &= ~TSADCV2_INT_SRC_EN(chn); + writel_relaxed(int_clr, regs + TSADCV2_INT_EN); + return 0; + } /* Make sure the value is valid */ alarm_value = rk_tsadcv2_temp_to_code(table, temp); - if (alarm_value == table.data_mask) - return; + if (alarm_value == table->data_mask) + return -ERANGE; - writel_relaxed(alarm_value & table.data_mask, + writel_relaxed(alarm_value & table->data_mask, regs + TSADCV2_COMP_INT(chn)); int_en = readl_relaxed(regs + TSADCV2_INT_EN); int_en |= TSADCV2_INT_SRC_EN(chn); writel_relaxed(int_en, regs + TSADCV2_INT_EN); + + return 0; } -static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, - int chn, void __iomem *regs, int temp) +static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table, + int chn, void __iomem *regs, int temp) { u32 tshut_value, val; /* Make sure the value is valid */ tshut_value = rk_tsadcv2_temp_to_code(table, temp); - if (tshut_value == table.data_mask) - return; + if (tshut_value == table->data_mask) + return -ERANGE; writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); /* TSHUT will be valid */ val = readl_relaxed(regs + TSADCV2_AUTO_CON); writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); + + return 0; } static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, @@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high) dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", __func__, sensor->id, low, high); - tsadc->set_alarm_temp(tsadc->table, - sensor->id, thermal->regs, high); - - return 0; + return tsadc->set_alarm_temp(&tsadc->table, + sensor->id, thermal->regs, high); } static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) @@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; int retval; - retval = tsadc->get_temp(tsadc->table, + retval = tsadc->get_temp(&tsadc->table, sensor->id, thermal->regs, out_temp); dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", sensor->id, *out_temp, retval); @@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev, int error; tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, + + error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, thermal->tshut_temp); + if (error) + dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + __func__, thermal->tshut_temp, error); sensor->thermal = thermal; sensor->id = id; @@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev) thermal->chip->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); - thermal->chip->set_tshut_temp(thermal->chip->table, + + error = thermal->chip->set_tshut_temp(&thermal->chip->table, id, thermal->regs, thermal->tshut_temp); + if (error) + dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n", + __func__, thermal->tshut_temp, error); } thermal->chip->control(thermal->regs, true); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 641faab6e24b..655591316a88 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -799,6 +799,11 @@ static void thermal_release(struct device *dev) if (!strncmp(dev_name(dev), "thermal_zone", sizeof("thermal_zone") - 1)) { tz = to_thermal_zone(dev); + kfree(tz->trip_type_attrs); + kfree(tz->trip_temp_attrs); + kfree(tz->trip_hyst_attrs); + kfree(tz->trips_attribute_group.attrs); + kfree(tz->device.groups); kfree(tz); } else if (!strncmp(dev_name(dev), "cooling_device", sizeof("cooling_device") - 1)) { @@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) thermal_zone_device_set_polling(tz, 0); - kfree(tz->trip_type_attrs); - kfree(tz->trip_temp_attrs); - kfree(tz->trip_hyst_attrs); - kfree(tz->trips_attribute_group.attrs); thermal_set_governor(tz, NULL); thermal_remove_hwmon_sysfs(tz); @@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) idr_destroy(&tz->idr); mutex_destroy(&tz->lock); device_unregister(&tz->device); - kfree(tz->device.groups); } EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c index 541af5946203..c4a508a124dc 100644 --- a/drivers/thermal/thermal_hwmon.c +++ b/drivers/thermal/thermal_hwmon.c @@ -59,14 +59,6 @@ static LIST_HEAD(thermal_hwmon_list); static DEFINE_MUTEX(thermal_hwmon_list_lock); static ssize_t -name_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev); - return sprintf(buf, "%s\n", hwmon->type); -} -static DEVICE_ATTR_RO(name); - -static ssize_t temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) { int temperature; @@ -165,15 +157,12 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) INIT_LIST_HEAD(&hwmon->tz_list); strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); - hwmon->device = hwmon_device_register(NULL); + hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type, + hwmon, NULL, NULL); if (IS_ERR(hwmon->device)) { result = PTR_ERR(hwmon->device); goto free_mem; } - dev_set_drvdata(hwmon->device, hwmon); - result = device_create_file(hwmon->device, &dev_attr_name); - if (result) - goto free_mem; register_sys_interface: temp = kzalloc(sizeof(*temp), GFP_KERNEL); @@ -222,10 +211,8 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) free_temp_mem: kfree(temp); unregister_name: - if (new_hwmon_device) { - device_remove_file(hwmon->device, &dev_attr_name); + if (new_hwmon_device) hwmon_device_unregister(hwmon->device); - } free_mem: if (new_hwmon_device) kfree(hwmon); @@ -267,7 +254,6 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) list_del(&hwmon->node); mutex_unlock(&thermal_hwmon_list_lock); - device_remove_file(hwmon->device, &dev_attr_name); hwmon_device_unregister(hwmon->device); kfree(hwmon); } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 61569a765d9e..76e03a7de9cc 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -675,7 +675,7 @@ static struct console univ8250_console = { .device = uart_console_device, .setup = univ8250_console_setup, .match = univ8250_console_match, - .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV, + .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &serial8250_reg, }; diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index aa0166b6d450..116436b7fa52 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) static void serial8250_io_resume(struct pci_dev *dev) { struct serial_private *priv = pci_get_drvdata(dev); - const struct pciserial_board *board; + struct serial_private *new; if (!priv) return; - board = priv->board; - kfree(priv); - priv = pciserial_init_ports(dev, board); - - if (!IS_ERR(priv)) { - pci_set_drvdata(dev, priv); + new = pciserial_init_ports(dev, priv->board); + if (!IS_ERR(new)) { + pci_set_drvdata(dev, new); + kfree(priv); } } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fe4399b41df6..c13fec451d03 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p) * Enable previously disabled RX interrupts. */ if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { - serial8250_clear_fifos(p); + serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; serial_port_out(&p->port, UART_IER, p->ier); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 168b10cad47b..fabbe76203bb 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port) /* disable PDC transmit */ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); } + + /* + * Disable the transmitter. + * This is mandatory when DMA is used, otherwise the DMA buffer + * is fully transmitted. + */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); + /* Disable interrupts */ atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); @@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port) /* Enable interrupts */ atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); + + /* re-enable the transmitter */ + atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); } /* @@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg) */ if (!uart_circ_empty(xmit)) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); + else if ((port->rs485.flags & SER_RS485_ENABLED) && + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) { + /* DMA done, stop TX, start RX for RS485 */ + atmel_start_rx(port); + } spin_unlock_irqrestore(&port->lock, flags); } @@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port) desc->callback = atmel_complete_tx_dma; desc->callback_param = atmel_port; atmel_port->cookie_tx = dmaengine_submit(desc); - - } else { - if (port->rs485.flags & SER_RS485_ENABLED) { - /* DMA done, stop TX, start RX for RS485 */ - atmel_start_rx(port); - } } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 52bbd27e93ae..701c085bb19b 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, - .evbit = { BIT_MASK(EV_KEY) }, - .keybit = { BIT_MASK(KEY_LEFTALT) }, + .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) }, }, { }, }; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 9548d3e03453..302b8f5f7d27 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -513,8 +513,8 @@ struct dwc2_core_params { /* Gadget parameters */ bool g_dma; bool g_dma_desc; - u16 g_rx_fifo_size; - u16 g_np_tx_fifo_size; + u32 g_rx_fifo_size; + u32 g_np_tx_fifo_size; u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; }; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index c55db4aa54d6..77c5fcf3a5bf 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || @@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, __func__, epctrl, epctrl_reg); /* Allocate DMA descriptor chain for non-ctrl endpoints */ - if (using_desc_dma(hsotg)) { - hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, + if (using_desc_dma(hsotg) && !hs_ep->desc_list) { + hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * sizeof(struct dwc2_dma_desc), &hs_ep->desc_list_dma, GFP_ATOMIC); @@ -3872,7 +3872,7 @@ error1: error2: if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { - dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * + dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * sizeof(struct dwc2_dma_desc), hs_ep->desc_list, hs_ep->desc_list_dma); hs_ep->desc_list = NULL; @@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep) return -EINVAL; } - /* Remove DMA memory allocated for non-control Endpoints */ - if (using_desc_dma(hsotg)) { - dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * - sizeof(struct dwc2_dma_desc), - hs_ep->desc_list, hs_ep->desc_list_dma); - hs_ep->desc_list = NULL; - } - epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); spin_lock_irqsave(&hsotg->lock, flags); @@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 911c3b36ac06..46d0ad5105e4 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (!HCD_HW_ACCESSIBLE(hcd)) goto unlock; + if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) + goto unlock; + if (!hsotg->params.hibernation) goto skip_power_saving; @@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb, { #ifdef VERBOSE_DEBUG struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); - char *pipetype; - char *speed; + char *pipetype = NULL; + char *speed = NULL; dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); dev_vdbg(hsotg->dev, " Device address: %d\n", diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 11fe68a4627b..bcd1e19b4076 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param, } /** - * dwc2_set_param_u16() - Set a u16 parameter + * dwc2_set_param_u32() - Set a u32 parameter * * See dwc2_set_param(). */ -static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, +static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param, bool lookup, char *property, u16 legacy, u16 def, u16 min, u16 max) { dwc2_set_param(hsotg, param, lookup, property, - legacy, def, min, max, 2); + legacy, def, min, max, 4); } /** @@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg, * auto-detect if the hardware does not support the * default. */ - dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, + dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size, true, "g-rx-fifo-size", 2048, hw->rx_fifo_size, 16, hw->rx_fifo_size); - dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, + dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size, true, "g-np-tx-fifo-size", 1024, hw->dev_nperio_tx_fifo_size, 16, hw->dev_nperio_tx_fifo_size); diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index e27899bb5706..e956306d9b0f 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev) exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); if (IS_ERR(exynos->axius_clk)) { dev_err(dev, "no AXI UpScaler clk specified\n"); - return -ENODEV; + ret = -ENODEV; + goto axius_clk_err; } clk_prepare_enable(exynos->axius_clk); } else { @@ -196,6 +197,7 @@ err3: regulator_disable(exynos->vdd33); err2: clk_disable_unprepare(exynos->axius_clk); +axius_clk_err: clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->clk); return ret; diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 002822d98fda..49d685ad0da9 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); if (!cdev->os_desc_req->buf) { ret = -ENOMEM; - kfree(cdev->os_desc_req); + usb_ep_free_request(ep0, cdev->os_desc_req); goto end; } cdev->os_desc_req->context = cdev; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 5e746adc8a2d..5490fc51638e 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func) unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) usb_ep_disable(ep->ep); @@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func) __ffs_epfile_read_buffer_free(epfile); ++epfile; } - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); } @@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while(count--) { struct usb_endpoint_descriptor *ds; int desc_idx; @@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func) ++ep; ++epfile; - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); return ret; @@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c, /* cleanup after autoconfig */ spin_lock_irqsave(&func->ffs->eps_lock, flags); - do { + while (count--) { if (ep->ep && ep->req) usb_ep_free_request(ep->ep, ep->req); ep->req = NULL; ++ep; - } while (--count); + } spin_unlock_irqrestore(&func->ffs->eps_lock, flags); kfree(func->eps); func->eps = NULL; diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index f3212db9bc37..12c7687216e6 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); goto err; } - ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); + sprintf(ep->name, "ep%d", ep->index); + ep->ep.name = ep->name; ep->ep_regs = udc->regs + USBA_EPT_BASE(i); ep->dma_regs = udc->regs + USBA_DMA_BASE(i); diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h index 3e1c9d589dfa..b03b2ebfc53a 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.h +++ b/drivers/usb/gadget/udc/atmel_usba_udc.h @@ -280,6 +280,7 @@ struct usba_ep { void __iomem *ep_regs; void __iomem *dma_regs; void __iomem *fifo; + char name[8]; struct usb_ep ep; struct usba_udc *udc; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index ddfab301e366..e5834dd9bcde 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev) return -ENODEV; /* Try to set 64-bit DMA first */ - if (WARN_ON(!pdev->dev.dma_mask)) + if (!pdev->dev.dma_mask) /* Platform did not initialize dma_mask */ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 25f522b09dd9..e32029a31ca4 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); ep->stop_cmds_pending--; - if (xhci->xhc_state & XHCI_STATE_REMOVING) { - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } - if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Stop EP timer ran, but another timer marked " - "xHCI as DYING, exiting."); - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Stop EP timer ran, but no command pending, " diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 0c8deb9ed42d..9a0ec116654a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_urb_free_priv(urb_priv); return ret; } - if ((xhci->xhc_state & XHCI_STATE_DYING) || - (xhci->xhc_state & XHCI_STATE_HALTED)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Ep 0x%x: URB %p to be canceled on " - "non-responsive xHCI host.", - urb->ep->desc.bEndpointAddress, urb); - /* Let the stop endpoint command watchdog timer (which set this - * state) finish cleaning up the endpoint TD lists. We must - * have caught it in the middle of dropping a lock and giving - * back an URB. - */ - goto done; - } ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index 4fef50e5c8c1..dd70c88419d2 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused) unsigned i; seq_printf(s, "MUSB (M)HDRC Register Dump\n"); + pm_runtime_get_sync(musb->controller); for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) { switch (musb_regmap[i].size) { @@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused) } } + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); return 0; } @@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused) struct musb *musb = s->private; unsigned test; + pm_runtime_get_sync(musb->controller); test = musb_readb(musb->mregs, MUSB_TESTMODE); + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); if (test & MUSB_TEST_FORCE_HOST) seq_printf(s, "force host\n"); @@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file, u8 test; char buf[18]; + pm_runtime_get_sync(musb->controller); test = musb_readb(musb->mregs, MUSB_TESTMODE); if (test) { dev_err(musb->controller, "Error: test mode is already set. " "Please do USB Bus Reset to start a new test.\n"); - return count; + goto ret; } memset(buf, 0x00, sizeof(buf)); @@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file, musb_writeb(musb->mregs, MUSB_TESTMODE, test); +ret: + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); return count; } @@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused) switch (musb->xceiv->otg->state) { case OTG_STATE_A_HOST: case OTG_STATE_A_WAIT_BCON: + pm_runtime_get_sync(musb->controller); + reg = musb_readb(musb->mregs, MUSB_DEVCTL); connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0; + + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); break; default: connect = -1; @@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; + pm_runtime_get_sync(musb->controller); if (!strncmp(buf, "0", 1)) { switch (musb->xceiv->otg->state) { case OTG_STATE_A_HOST: @@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file, } } + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); return count; } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 2597b83a8ae2..95aa5233726c 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -95,6 +95,7 @@ struct ch341_private { unsigned baud_rate; /* set baud rate */ u8 line_control; /* set line control value RTS/DTR */ u8 line_status; /* active status of modem control inputs */ + u8 lcr; }; static void ch341_set_termios(struct tty_struct *tty, @@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request, r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, value, index, NULL, 0, DEFAULT_TIMEOUT); + if (r < 0) + dev_err(&dev->dev, "failed to send control message: %d\n", r); return r; } @@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev, r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, value, index, buf, bufsize, DEFAULT_TIMEOUT); - return r; + if (r < bufsize) { + if (r >= 0) { + dev_err(&dev->dev, + "short control message received (%d < %u)\n", + r, bufsize); + r = -EIO; + } + + dev_err(&dev->dev, "failed to receive control message: %d\n", + r); + return r; + } + + return 0; } -static int ch341_init_set_baudrate(struct usb_device *dev, - struct ch341_private *priv, unsigned ctrl) +static int ch341_set_baudrate_lcr(struct usb_device *dev, + struct ch341_private *priv, u8 lcr) { short a; int r; @@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev, factor = 0x10000 - factor; a = (factor & 0xff00) | divisor; - /* 0x9c is "enable SFR_UART Control register and timer" */ - r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, - 0x9c | (ctrl << 8), a | 0x80); + /* + * CH341A buffers data until a full endpoint-size packet (32 bytes) + * has been received unless bit 7 is set. + */ + a |= BIT(7); + + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a); + if (r) + return r; + + r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr); + if (r) + return r; return r; } @@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control) static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) { + const unsigned int size = 2; char *buffer; int r; - const unsigned size = 8; unsigned long flags; buffer = kmalloc(size, GFP_KERNEL); @@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) if (r < 0) goto out; - /* setup the private status if available */ - if (r == 2) { - r = 0; - spin_lock_irqsave(&priv->lock, flags); - priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; - spin_unlock_irqrestore(&priv->lock, flags); - } else - r = -EPROTO; + spin_lock_irqsave(&priv->lock, flags); + priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; + spin_unlock_irqrestore(&priv->lock, flags); out: kfree(buffer); return r; @@ -200,9 +221,9 @@ out: kfree(buffer); static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) { + const unsigned int size = 2; char *buffer; int r; - const unsigned size = 8; buffer = kmalloc(size, GFP_KERNEL); if (!buffer) @@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) if (r < 0) goto out; - r = ch341_init_set_baudrate(dev, priv, 0); + r = ch341_set_baudrate_lcr(dev, priv, priv->lcr); if (r < 0) goto out; @@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port) spin_lock_init(&priv->lock); priv->baud_rate = DEFAULT_BAUD_RATE; - priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; r = ch341_configure(port->serial->dev, priv); if (r < 0) @@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) r = ch341_configure(serial->dev, priv); if (r) - goto out; + return r; if (tty) ch341_set_termios(tty, port, NULL); @@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) if (r) { dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n", __func__, r); - goto out; + return r; } r = usb_serial_generic_open(tty, port); + if (r) + goto err_kill_interrupt_urb; + + return 0; + +err_kill_interrupt_urb: + usb_kill_urb(port->interrupt_in_urb); -out: return r; + return r; } /* Old_termios contains the original termios settings and @@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty, baud_rate = tty_get_baud_rate(tty); - priv->baud_rate = baud_rate; ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX; switch (C_CSIZE(tty)) { @@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty, ctrl |= CH341_LCR_STOP_BITS_2; if (baud_rate) { - spin_lock_irqsave(&priv->lock, flags); - priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); - spin_unlock_irqrestore(&priv->lock, flags); - r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl); + priv->baud_rate = baud_rate; + + r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl); if (r < 0 && old_termios) { priv->baud_rate = tty_termios_baud_rate(old_termios); tty_termios_copy_hw(&tty->termios, old_termios); + } else if (r == 0) { + priv->lcr = ctrl; } - } else { - spin_lock_irqsave(&priv->lock, flags); - priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); - spin_unlock_irqrestore(&priv->lock, flags); } - ch341_set_handshake(port->serial->dev, priv->line_control); + spin_lock_irqsave(&priv->lock, flags); + if (C_BAUD(tty) == B0) + priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); + else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) + priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); + spin_unlock_irqrestore(&priv->lock, flags); + ch341_set_handshake(port->serial->dev, priv->line_control); } static void ch341_break_ctl(struct tty_struct *tty, int break_state) @@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty) static int ch341_reset_resume(struct usb_serial *serial) { - struct ch341_private *priv; - - priv = usb_get_serial_port_data(serial->port[0]); + struct usb_serial_port *port = serial->port[0]; + struct ch341_private *priv = usb_get_serial_port_data(port); + int ret; /* reconfigure ch341 serial port after bus-reset */ ch341_configure(serial->dev, priv); - return 0; + if (tty_port_initialized(&port->port)) { + ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); + if (ret) { + dev_err(&port->dev, "failed to submit interrupt urb: %d\n", + ret); + return ret; + } + } + + return usb_serial_generic_resume(serial); } static struct usb_serial_driver ch341_device = { diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 0ee190fc1bf8..6cb45757818f 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, status_buf, KLSI_STATUSBUF_LEN, 10000 ); - if (rc < 0) - dev_err(&port->dev, "Reading line status failed (error = %d)\n", - rc); - else { + if (rc != KLSI_STATUSBUF_LEN) { + dev_err(&port->dev, "reading line status failed: %d\n", rc); + if (rc >= 0) + rc = -EIO; + } else { status = get_unaligned_le16(status_buf); dev_info(&port->serial->dev->dev, "read status %x %x\n", diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index 79451f7ef1b7..062c205f0046 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, struct scatterlist sg[4], sg_dst; void *dst_buf; size_t dst_size; - const u8 bzero[16] = { 0 }; u8 iv[crypto_skcipher_ivsize(tfm_cbc)]; size_t zero_padding; @@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc, sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1)); sg_set_buf(&sg[2], b, blen); /* 0 if well behaved :) */ - sg_set_buf(&sg[3], bzero, zero_padding); + sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0); sg_init_one(&sg_dst, dst_buf, dst_size); skcipher_request_set_tfm(req, tfm_cbc); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 9266271a787a..b3cc33fa6d26 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -36,7 +36,6 @@ #include <linux/uaccess.h> #include <linux/vfio.h> #include <linux/workqueue.h> -#include <linux/pid_namespace.h> #include <linux/mdev.h> #include <linux/notifier.h> @@ -495,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, unsigned long *pfn_base, bool do_accounting) { unsigned long limit; - bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, - CAP_IPC_LOCK); + bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK); struct mm_struct *mm; int ret; bool rsvd; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 253310cdaaca..fd6c8b66f06f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) struct iov_iter out_iter, in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; - unsigned out, in; + unsigned int out = 0, in = 0; int head, ret, prot_bytes; size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); size_t out_size, in_size; @@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { NULL, }; -static struct target_core_fabric_ops vhost_scsi_ops = { +static const struct target_core_fabric_ops vhost_scsi_ops = { .module = THIS_MODULE, .name = "vhost", .get_fabric_name = vhost_scsi_get_fabric_name, diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index bbbf588540ed..ce5e63d2c66a 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work) static int vhost_vsock_start(struct vhost_vsock *vsock) { + struct vhost_virtqueue *vq; size_t i; int ret; @@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) goto err; for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { - struct vhost_virtqueue *vq = &vsock->vqs[i]; + vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); if (!vhost_vq_access_ok(vq)) { ret = -EFAULT; - mutex_unlock(&vq->mutex); goto err_vq; } if (!vq->private_data) { vq->private_data = vsock; - vhost_vq_init_access(vq); + ret = vhost_vq_init_access(vq); + if (ret) + goto err_vq; } mutex_unlock(&vq->mutex); @@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) return 0; err_vq: + vq->private_data = NULL; + mutex_unlock(&vq->mutex); + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { - struct vhost_virtqueue *vq = &vsock->vqs[i]; + vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); vq->private_data = NULL; diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index f89245b8ba8e..68a113594808 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c @@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap) int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); @@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index d47a2fcef818..c71fde5fe835 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -59,6 +59,7 @@ #define pr_fmt(fmt) "virtio-mmio: " fmt #include <linux/acpi.h> +#include <linux/dma-mapping.h> #include <linux/highmem.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) struct virtio_mmio_device *vm_dev; struct resource *mem; unsigned long magic; + int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) @@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev) } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); - if (vm_dev->version == 1) + if (vm_dev->version == 1) { writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + /* + * In the legacy case, ensure our coherently-allocated virtio + * ring will be at an address expressable as a 32-bit PFN. + */ + if (!rc) + dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32 + PAGE_SHIFT)); + } else { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + } + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); + platform_set_drvdata(pdev, vm_dev); return register_virtio_device(&vm_dev->vdev); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 409aeaa49246..7e38ed79c3fc 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev) if (xen_domain()) return true; + /* + * On ARM-based machines, the DMA ops will do the right thing, + * so always use them with legacy devices. + */ + if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64)) + return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1); + return false; } diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index 6b5ee896af63..7cc51223db1c 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); - *pci_base = (dma_addr_t)vme_base + pci_offset; + *pci_base = (dma_addr_t)*vme_base + pci_offset; *size = (unsigned long long)((vme_bound - *vme_base) + granularity); *enabled = 0; diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 112ce422dc22..2a165cc8a43c 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -42,6 +42,7 @@ static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; +static uint64_t callback_via; static unsigned long alloc_xen_mmio(unsigned long len) { @@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len) return addr; } +static uint64_t get_callback_via(struct pci_dev *pdev) +{ + u8 pin; + int irq; + + irq = pdev->irq; + if (irq < 16) + return irq; /* ISA IRQ */ + + pin = pdev->pin; + + /* We don't know the GSI. Specify the PCI INTx line instead. */ + return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */ + ((uint64_t)pci_domain_nr(pdev->bus) << 32) | + ((uint64_t)pdev->bus->number << 16) | + ((uint64_t)(pdev->devfn & 0xff) << 8) | + ((uint64_t)(pin - 1) & 3); +} + +static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) +{ + xen_hvm_evtchn_do_upcall(); + return IRQ_HANDLED; +} + +static int xen_allocate_irq(struct pci_dev *pdev) +{ + return request_irq(pdev->irq, do_hvm_evtchn_intr, + IRQF_NOBALANCING | IRQF_TRIGGER_RISING, + "xen-platform-pci", pdev); +} + +static int platform_pci_resume(struct pci_dev *pdev) +{ + int err; + if (!xen_pv_domain()) + return 0; + err = xen_set_callback_via(callback_via); + if (err) { + dev_err(&pdev->dev, "platform_pci_resume failure!\n"); + return err; + } + return 0; +} + static int platform_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev, platform_mmio = mmio_addr; platform_mmiolen = mmio_len; + /* + * Xen HVM guests always use the vector callback mechanism. + * L1 Dom0 in a nested Xen environment is a PV guest inside in an + * HVM environment. It needs the platform-pci driver to get + * notifications from L0 Xen, but it cannot use the vector callback + * as it is not exported by L1 Xen. + */ + if (xen_pv_domain()) { + ret = xen_allocate_irq(pdev); + if (ret) { + dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); + goto out; + } + callback_via = get_callback_via(pdev); + ret = xen_set_callback_via(callback_via); + if (ret) { + dev_warn(&pdev->dev, "Unable to set the evtchn callback " + "err=%d\n", ret); + goto out; + } + } + max_nr_gframes = gnttab_max_grant_frames(); grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); @@ -123,6 +191,9 @@ static struct pci_driver platform_driver = { .name = DRV_NAME, .probe = platform_pci_probe, .id_table = platform_pci_tbl, +#ifdef CONFIG_PM + .resume_early = platform_pci_resume, +#endif }; builtin_pci_driver(platform_driver); |