summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c12
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c26
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
14 files changed, 133 insertions, 31 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index a31d7ef3032c..ec1282af2479 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -280,7 +280,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -301,10 +301,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ee6466912497..77fdd9911c3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1037,6 +1038,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d869d058ef24..425413fcaf02 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2755,8 +2755,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
u64 wb_gpu_addr;
u32 *buf;
struct bonaire_mqd *mqd;
-
- gfx_v7_0_cp_compute_enable(adev, true);
+ struct amdgpu_ring *ring;
/* fix up chicken bits */
tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2791,7 +2790,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
/* init the queues. Just two for now. */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+ ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj == NULL) {
r = amdgpu_bo_create(adev,
@@ -2970,6 +2969,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
amdgpu_bo_unreserve(ring->mqd_obj);
ring->ready = true;
+ }
+
+ gfx_v7_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+
r = amdgpu_ring_test_ring(ring);
if (r)
ring->ready = false;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9f7dafce3a4c..7bf90e9e6139 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -171,10 +171,34 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
}
+static int imx_drm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Check modeset again in case crtc_state->mode_changed is
+ * updated in plane's ->atomic_check callback.
+ */
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 08e188bc10fc..462056e4b9e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -76,6 +76,8 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
+
+ drm_crtc_vblank_off(crtc);
}
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -175,6 +177,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ drm_crtc_vblank_on(crtc);
+
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 4ad67d015ec7..29423e757d36 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -319,13 +319,14 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
/*
- * since we cannot touch active IDMAC channels, we do not support
- * resizing the enabled plane or changing its format
+ * We support resizing active plane or changing its format by
+ * forcing CRTC mode change and disabling-enabling plane in plane's
+ * ->atomic_update callback.
*/
if (old_fb && (state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
fb->pixel_format != old_fb->pixel_format))
- return -EINVAL;
+ crtc_state->mode_changed = true;
eba = drm_plane_state_to_eba(state);
@@ -336,7 +337,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
if (old_fb && fb->pitches[0] != old_fb->pitches[0])
- return -EINVAL;
+ crtc_state->mode_changed = true;
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
@@ -372,7 +373,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
- return -EINVAL;
+ crtc_state->mode_changed = true;
}
return 0;
@@ -392,8 +393,14 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
enum ipu_color_space ics;
if (old_state->fb) {
- ipu_plane_atomic_set_base(ipu_plane, old_state);
- return;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+
+ if (!crtc_state->mode_changed) {
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ return;
+ }
+
+ ipu_disable_plane(plane);
}
switch (ipu_plane->dp_flow) {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b4bc7f1ef717..d0da52f2a806 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -157,6 +157,12 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+
+ /* task holding struct_mutex.. currently only used in submit path
+ * to detect and reject faults from copy_from_user() for submit
+ * ioctl.
+ */
+ struct task_struct *struct_mutex_task;
};
struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6cd4af443139..85f3047e05ae 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int ret;
+ /* This should only happen if userspace tries to pass a mmap'd
+ * but unfaulted gem bo vaddr into submit ioctl, triggering
+ * a page fault while struct_mutex is already held. This is
+ * not a valid use-case so just bail.
+ */
+ if (priv->struct_mutex_task == current)
+ return VM_FAULT_SIGBUS;
+
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 9766f9ae4b7d..880d6a9af7c8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
kfree(submit);
}
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_from_user_inatomic(to, from, n);
+ return -EFAULT;
+}
+
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
int ret = 0;
spin_lock(&file->table_lock);
+ pagefault_disable();
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret) {
- ret = -EFAULT;
- goto out_unlock;
+ ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+ if (unlikely(ret)) {
+ pagefault_enable();
+ spin_unlock(&file->table_lock);
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret)
+ goto out;
+ spin_lock(&file->table_lock);
+ pagefault_disable();
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- submit->nr_bos = i;
+ pagefault_enable();
spin_unlock(&file->table_lock);
+out:
+ submit->nr_bos = i;
+
return ret;
}
@@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
return ret;
+ priv->struct_mutex_task = current;
+
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
@@ -468,6 +487,7 @@ out:
if (ret)
msm_gem_submit_free(submit);
out_unlock:
+ priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f2ad17aa33f0..dc57b628e074 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_pdev)
return false;
+ if (!parent_pdev->bridge_d3) {
+ /*
+ * Parent PCI bridge is currently not power managed.
+ * Since userspace can change these afterwards to be on
+ * the safe side we stick with _DSM and prevent usage of
+ * _PR3 from the bridge.
+ */
+ pci_d3cold_disable(pdev);
+ return false;
+ }
+
parent_adev = ACPI_COMPANION(&parent_pdev->dev);
if (!parent_adev)
return false;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8b42d31a7f0e..9ecef9385491 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -57,21 +57,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
switch (args->param) {
case DRM_VC4_PARAM_V3D_IDENT0:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
pm_runtime_put(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
pm_runtime_put(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
pm_runtime_put(&vc4->v3d->pdev->dev);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 489e3de0c050..428e24919ef1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4)
struct vc4_exec_info, head);
}
+static inline struct vc4_exec_info *
+vc4_last_render_job(struct vc4_dev *vc4)
+{
+ if (list_empty(&vc4->render_job_list))
+ return NULL;
+ return list_last_entry(&vc4->render_job_list,
+ struct vc4_exec_info, head);
+}
+
/**
* struct vc4_texture_sample_info - saves the offsets into the UBO for texture
* setup parameters.
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6155e8aca1c6..b262c5c26f10 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -534,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
- GFP_KERNEL);
+ exec->bo = drm_calloc_large(exec->bo_count,
+ sizeof(struct drm_gem_cma_object *));
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
@@ -572,8 +572,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
spin_unlock(&file_priv->table_lock);
fail:
- kfree(handles);
- return 0;
+ drm_free_large(handles);
+ return ret;
}
static int
@@ -608,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
* read the contents back for validation, and I think the
* bo->vaddr is uncached access.
*/
- temp = kmalloc(temp_size, GFP_KERNEL);
+ temp = drm_malloc_ab(temp_size, 1);
if (!temp) {
DRM_ERROR("Failed to allocate storage for copying "
"in bin/render CLs.\n");
@@ -675,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
ret = vc4_validate_shader_recs(dev, exec);
fail:
- kfree(temp);
+ drm_free_large(temp);
return ret;
}
@@ -688,7 +688,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
- kfree(exec->bo);
+ drm_free_large(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
@@ -942,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev)
vc4->overflow_mem = NULL;
}
- vc4_bo_cache_destroy(dev);
-
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
+
+ vc4_bo_cache_destroy(dev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b0104a346a74..094bc6a475c1 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work)
spin_lock_irqsave(&vc4->job_lock, irqflags);
current_exec = vc4_first_bin_job(vc4);
+ if (!current_exec)
+ current_exec = vc4_last_render_job(vc4);
if (current_exec) {
- vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
+ vc4->overflow_mem->seqno = current_exec->seqno;
list_add_tail(&vc4->overflow_mem->unref_head,
&current_exec->unref_list);
vc4->overflow_mem = NULL;