diff options
Diffstat (limited to 'drivers/gpu/drm/vc4')
-rw-r--r-- | drivers/gpu/drm/vc4/Kconfig | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_bo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_crtc.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_drv.h | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_dsi.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_gem.c | 183 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_hdmi.c | 111 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_plane.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_txp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_validate.c | 11 |
11 files changed, 120 insertions, 307 deletions
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 6cc7b7e6294a..123ab0ce1781 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -14,6 +14,7 @@ config DRM_VC4 select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDMI_STATE_HELPER select DRM_DISPLAY_HELPER + select DRM_EXEC select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c index 40a05869a50e..992e8f5c5c6e 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c +++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c @@ -724,7 +724,7 @@ static void drm_vc4_test_pv_muxing_invalid(struct kunit *test) static int vc4_pv_muxing_test_init(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct pv_muxing_priv *priv; struct drm_device *drm; struct vc4_dev *vc4; @@ -737,13 +737,15 @@ static int vc4_pv_muxing_test_init(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); priv->vc4 = vc4; - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return 0; } @@ -782,7 +784,7 @@ static struct kunit_suite vc5_pv_muxing_test_suite = { */ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test) { - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; @@ -795,11 +797,10 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -822,7 +823,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); @@ -843,6 +844,9 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use); KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -854,7 +858,7 @@ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *tes */ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) { - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; @@ -867,11 +871,10 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -905,7 +908,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -929,6 +932,9 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel); } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } /* @@ -949,7 +955,7 @@ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test) { - struct drm_modeset_acquire_ctx *ctx; + struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct drm_device *drm; @@ -959,11 +965,10 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); - ctx = drm_kunit_helper_acquire_ctx_alloc(test); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); + drm_modeset_acquire_init(&ctx, 0); drm = &vc4->base; - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); @@ -975,7 +980,7 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); @@ -987,6 +992,9 @@ drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct ku new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_EXPECT_NULL(test, new_vc4_crtc_state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } static struct kunit_case vc5_pv_muxing_bugs_tests[] = { diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index fb450b6a4d44..7125773889f1 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -1043,7 +1043,7 @@ static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused) struct vc4_dev *vc4 = to_vc4_dev(dev); int i; - del_timer(&vc4->bo_cache.time_timer); + timer_delete(&vc4->bo_cache.time_timer); cancel_work_sync(&vc4->bo_cache.time_work); vc4_bo_cache_purge(dev); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index cf40a53ad42e..2a48038abe7a 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -884,11 +884,7 @@ struct vc4_async_flip_state { struct drm_framebuffer *fb; struct drm_framebuffer *old_fb; struct drm_pending_vblank_event *event; - - union { - struct dma_fence_cb fence; - struct vc4_seqno_cb seqno; - } cb; + struct dma_fence_cb cb; }; /* Called when the V3D execution for the BO being flipped to is done, so that @@ -919,10 +915,11 @@ vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state) kfree(flip_state); } -static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb) +static void vc4_async_page_flip_complete_with_cleanup(struct dma_fence *fence, + struct dma_fence_cb *cb) { struct vc4_async_flip_state *flip_state = - container_of(cb, struct vc4_async_flip_state, cb.seqno); + container_of(cb, struct vc4_async_flip_state, cb); struct vc4_bo *bo = NULL; if (flip_state->old_fb) { @@ -932,6 +929,7 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb) } vc4_async_page_flip_complete(flip_state); + dma_fence_put(fence); /* * Decrement the BO usecnt in order to keep the inc/dec @@ -950,7 +948,7 @@ static void vc4_async_page_flip_fence_complete(struct dma_fence *fence, struct dma_fence_cb *cb) { struct vc4_async_flip_state *flip_state = - container_of(cb, struct vc4_async_flip_state, cb.fence); + container_of(cb, struct vc4_async_flip_state, cb); vc4_async_page_flip_complete(flip_state); dma_fence_put(fence); @@ -961,16 +959,15 @@ static int vc4_async_set_fence_cb(struct drm_device *dev, { struct drm_framebuffer *fb = flip_state->fb; struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0); + dma_fence_func_t async_page_flip_complete_function; struct vc4_dev *vc4 = to_vc4_dev(dev); struct dma_fence *fence; int ret; - if (vc4->gen == VC4_GEN_4) { - struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); - - return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno, - vc4_async_page_flip_seqno_complete); - } + if (vc4->gen == VC4_GEN_4) + async_page_flip_complete_function = vc4_async_page_flip_complete_with_cleanup; + else + async_page_flip_complete_function = vc4_async_page_flip_fence_complete; ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence); if (ret) @@ -978,14 +975,14 @@ static int vc4_async_set_fence_cb(struct drm_device *dev, /* If there's no fence, complete the page flip immediately */ if (!fence) { - vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence); + async_page_flip_complete_function(fence, &flip_state->cb); return 0; } /* If the fence has already been completed, complete the page flip */ - if (dma_fence_add_callback(fence, &flip_state->cb.fence, - vc4_async_page_flip_fence_complete)) - vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence); + if (dma_fence_add_callback(fence, &flip_state->cb, + async_page_flip_complete_function)) + async_page_flip_complete_function(fence, &flip_state->cb); return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 4a078ffd9f82..221d8e01d539 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -186,11 +186,6 @@ struct vc4_dev { */ struct vc4_perfmon *active_perfmon; - /* List of struct vc4_seqno_cb for callbacks to be made from a - * workqueue when the given seqno is passed. - */ - struct list_head seqno_cb_list; - /* The memory used for storing binner tile alloc, tile state, * and overflow memory allocations. This is freed when V3D * powers down. @@ -247,16 +242,6 @@ struct vc4_dev { struct vc4_bo { struct drm_gem_dma_object base; - /* seqno of the last job to render using this BO. */ - uint64_t seqno; - - /* seqno of the last job to use the RCL to write to this BO. - * - * Note that this doesn't include binner overflow memory - * writes. - */ - uint64_t write_seqno; - bool t_format; /* List entry for the BO's position in either @@ -304,12 +289,6 @@ struct vc4_fence { #define to_vc4_fence(_fence) \ container_of_const(_fence, struct vc4_fence, base) -struct vc4_seqno_cb { - struct work_struct work; - uint64_t seqno; - void (*func)(struct vc4_seqno_cb *cb); -}; - struct vc4_v3d { struct vc4_dev *vc4; struct platform_device *pdev; @@ -695,9 +674,6 @@ struct vc4_exec_info { /* Sequence number for this bin/render job. */ uint64_t seqno; - /* Latest write_seqno of any BO that binning depends on. */ - uint64_t bin_dep_seqno; - struct dma_fence *fence; /* Last current addresses the hardware was processing when the @@ -1025,9 +1001,6 @@ void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec); int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, bool interruptible); void vc4_job_handle_completed(struct vc4_dev *vc4); -int vc4_queue_seqno_cb(struct drm_device *dev, - struct vc4_seqno_cb *cb, uint64_t seqno, - void (*func)(struct vc4_seqno_cb *cb)); int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 5eb293bdb363..779b22efe27b 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -800,7 +800,7 @@ dsi_esc_timing(u32 ns) } static void vc4_dsi_bridge_disable(struct drm_bridge *bridge, - struct drm_bridge_state *state) + struct drm_atomic_state *state) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); u32 disp0_ctrl; @@ -811,7 +811,7 @@ static void vc4_dsi_bridge_disable(struct drm_bridge *bridge, } static void vc4_dsi_bridge_post_disable(struct drm_bridge *bridge, - struct drm_bridge_state *state) + struct drm_atomic_state *state) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); struct device *dev = &dsi->pdev->dev; @@ -873,9 +873,8 @@ static bool vc4_dsi_bridge_mode_fixup(struct drm_bridge *bridge, } static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_state) + struct drm_atomic_state *state) { - struct drm_atomic_state *state = old_state->base.state; struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); const struct drm_crtc_state *crtc_state; struct device *dev = &dsi->pdev->dev; @@ -1143,7 +1142,7 @@ static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge, } static void vc4_dsi_bridge_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_state) + struct drm_atomic_state *state) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); bool debug_dump_regs = false; diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 22bccd69eb62..8125f87edc60 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -29,6 +29,7 @@ #include <linux/sched/signal.h> #include <linux/dma-fence-array.h> +#include <drm/drm_exec.h> #include <drm/drm_syncobj.h> #include "uapi/drm/vc4_drm.h" @@ -552,45 +553,24 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) } static void -vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) +vc4_attach_fences(struct vc4_exec_info *exec) { struct vc4_bo *bo; unsigned i; for (i = 0; i < exec->bo_count; i++) { bo = to_vc4_bo(exec->bo[i]); - bo->seqno = seqno; - dma_resv_add_fence(bo->base.base.resv, exec->fence, DMA_RESV_USAGE_READ); } - list_for_each_entry(bo, &exec->unref_list, unref_head) { - bo->seqno = seqno; - } - for (i = 0; i < exec->rcl_write_bo_count; i++) { bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); - bo->write_seqno = seqno; - dma_resv_add_fence(bo->base.base.resv, exec->fence, DMA_RESV_USAGE_WRITE); } } -static void -vc4_unlock_bo_reservations(struct drm_device *dev, - struct vc4_exec_info *exec, - struct ww_acquire_ctx *acquire_ctx) -{ - int i; - - for (i = 0; i < exec->bo_count; i++) - dma_resv_unlock(exec->bo[i]->resv); - - ww_acquire_fini(acquire_ctx); -} - /* Takes the reservation lock on all the BOs being referenced, so that * at queue submit time we can update the reservations. * @@ -599,70 +579,23 @@ vc4_unlock_bo_reservations(struct drm_device *dev, * to vc4, so we don't attach dma-buf fences to them. */ static int -vc4_lock_bo_reservations(struct drm_device *dev, - struct vc4_exec_info *exec, - struct ww_acquire_ctx *acquire_ctx) +vc4_lock_bo_reservations(struct vc4_exec_info *exec, + struct drm_exec *exec_ctx) { - int contended_lock = -1; - int i, ret; - struct drm_gem_object *bo; - - ww_acquire_init(acquire_ctx, &reservation_ww_class); - -retry: - if (contended_lock != -1) { - bo = exec->bo[contended_lock]; - ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx); - if (ret) { - ww_acquire_done(acquire_ctx); - return ret; - } - } - - for (i = 0; i < exec->bo_count; i++) { - if (i == contended_lock) - continue; - - bo = exec->bo[i]; - - ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx); - if (ret) { - int j; - - for (j = 0; j < i; j++) { - bo = exec->bo[j]; - dma_resv_unlock(bo->resv); - } - - if (contended_lock != -1 && contended_lock >= i) { - bo = exec->bo[contended_lock]; - - dma_resv_unlock(bo->resv); - } - - if (ret == -EDEADLK) { - contended_lock = i; - goto retry; - } - - ww_acquire_done(acquire_ctx); - return ret; - } - } - - ww_acquire_done(acquire_ctx); + int ret; /* Reserve space for our shared (read-only) fence references, * before we commit the CL to the hardware. */ - for (i = 0; i < exec->bo_count; i++) { - bo = exec->bo[i]; + drm_exec_init(exec_ctx, DRM_EXEC_INTERRUPTIBLE_WAIT, exec->bo_count); + drm_exec_until_all_locked(exec_ctx) { + ret = drm_exec_prepare_array(exec_ctx, exec->bo, + exec->bo_count, 1); + } - ret = dma_resv_reserve_fences(bo->resv, 1); - if (ret) { - vc4_unlock_bo_reservations(dev, exec, acquire_ctx); - return ret; - } + if (ret) { + drm_exec_fini(exec_ctx); + return ret; } return 0; @@ -679,7 +612,7 @@ retry: */ static int vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, - struct ww_acquire_ctx *acquire_ctx, + struct drm_exec *exec_ctx, struct drm_syncobj *out_sync) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -706,9 +639,9 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, if (out_sync) drm_syncobj_replace_fence(out_sync, exec->fence); - vc4_update_bo_seqnos(exec, seqno); + vc4_attach_fences(exec); - vc4_unlock_bo_reservations(dev, exec, acquire_ctx); + drm_exec_fini(exec_ctx); list_add_tail(&exec->head, &vc4->bin_job_list); @@ -904,12 +837,6 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) goto fail; } - /* Block waiting on any previous rendering into the CS's VBO, - * IB, or textures, so that pixels are actually written by the - * time we try to read them. - */ - ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true); - fail: kvfree(temp); return ret; @@ -968,7 +895,6 @@ void vc4_job_handle_completed(struct vc4_dev *vc4) { unsigned long irqflags; - struct vc4_seqno_cb *cb, *cb_temp; if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; @@ -985,46 +911,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4) spin_lock_irqsave(&vc4->job_lock, irqflags); } - list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) { - if (cb->seqno <= vc4->finished_seqno) { - list_del_init(&cb->work.entry); - schedule_work(&cb->work); - } - } - - spin_unlock_irqrestore(&vc4->job_lock, irqflags); -} - -static void vc4_seqno_cb_work(struct work_struct *work) -{ - struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work); - - cb->func(cb); -} - -int vc4_queue_seqno_cb(struct drm_device *dev, - struct vc4_seqno_cb *cb, uint64_t seqno, - void (*func)(struct vc4_seqno_cb *cb)) -{ - struct vc4_dev *vc4 = to_vc4_dev(dev); - unsigned long irqflags; - - if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) - return -ENODEV; - - cb->func = func; - INIT_WORK(&cb->work, vc4_seqno_cb_work); - - spin_lock_irqsave(&vc4->job_lock, irqflags); - if (seqno > vc4->finished_seqno) { - cb->seqno = seqno; - list_add_tail(&cb->work.entry, &vc4->seqno_cb_list); - } else { - schedule_work(&cb->work); - } spin_unlock_irqrestore(&vc4->job_lock, irqflags); - - return 0; } /* Scheduled when any job has been completed, this walks the list of @@ -1079,8 +966,10 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; struct drm_vc4_wait_bo *args = data; - struct drm_gem_object *gem_obj; - struct vc4_bo *bo; + unsigned long timeout_jiffies = + usecs_to_jiffies(div_u64(args->timeout_ns, 1000)); + ktime_t start = ktime_get(); + u64 delta_ns; if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; @@ -1088,17 +977,18 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; - gem_obj = drm_gem_object_lookup(file_priv, args->handle); - if (!gem_obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); - return -EINVAL; - } - bo = to_vc4_bo(gem_obj); + ret = drm_gem_dma_resv_wait(file_priv, args->handle, + true, timeout_jiffies); - ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, - &args->timeout_ns); + /* Decrement the user's timeout, in case we got interrupted + * such that the ioctl will be restarted. + */ + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); + if (delta_ns < args->timeout_ns) + args->timeout_ns -= delta_ns; + else + args->timeout_ns = 0; - drm_gem_object_put(gem_obj); return ret; } @@ -1123,7 +1013,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_vc4_submit_cl *args = data; struct drm_syncobj *out_sync = NULL; struct vc4_exec_info *exec; - struct ww_acquire_ctx acquire_ctx; + struct drm_exec exec_ctx; struct dma_fence *in_fence; int ret = 0; @@ -1216,7 +1106,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, if (ret) goto fail; - ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx); + ret = vc4_lock_bo_reservations(exec, &exec_ctx); if (ret) goto fail; @@ -1224,7 +1114,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, out_sync = drm_syncobj_find(file_priv, args->out_sync); if (!out_sync) { ret = -EINVAL; - goto fail; + goto fail_unreserve; } /* We replace the fence in out_sync in vc4_queue_submit since @@ -1239,7 +1129,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, */ exec->args = NULL; - ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync); + ret = vc4_queue_submit(dev, exec, &exec_ctx, out_sync); /* The syncobj isn't part of the exec data and we need to free our * reference even if job submission failed. @@ -1248,13 +1138,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, drm_syncobj_put(out_sync); if (ret) - goto fail; + goto fail_unreserve; /* Return the seqno for our job. */ args->seqno = vc4->emit_seqno; return 0; +fail_unreserve: + drm_exec_fini(&exec_ctx); fail: vc4_complete_exec(&vc4->base, exec); @@ -1275,7 +1167,6 @@ int vc4_gem_init(struct drm_device *dev) INIT_LIST_HEAD(&vc4->bin_job_list); INIT_LIST_HEAD(&vc4->render_job_list); INIT_LIST_HEAD(&vc4->job_done_list); - INIT_LIST_HEAD(&vc4->seqno_cb_list); spin_lock_init(&vc4->job_lock); INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 47d9ada98430..37238a12baa5 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -270,34 +270,6 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {} #endif -static int reset_pipe(struct drm_crtc *crtc, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_atomic_state *state; - struct drm_crtc_state *crtc_state; - int ret; - - state = drm_atomic_state_alloc(crtc->dev); - if (!state) - return -ENOMEM; - - state->acquire_ctx = ctx; - - crtc_state = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) { - ret = PTR_ERR(crtc_state); - goto out; - } - - crtc_state->connectors_changed = true; - - ret = drm_atomic_commit(state); -out: - drm_atomic_state_put(state); - - return ret; -} - static int vc4_hdmi_reset_link(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx) { @@ -376,7 +348,7 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector, * would be perfectly happy if were to just reconfigure * the SCDC settings on the fly. */ - return reset_pipe(crtc, ctx); + return drm_atomic_helper_reset_crtc(crtc, ctx); } static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, @@ -2954,15 +2926,16 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, struct resource *res; int ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi"); - if (!res) - return -ENODEV; - - vc4_hdmi->hdmicore_regs = devm_ioremap(dev, res->start, - resource_size(res)); - if (!vc4_hdmi->hdmicore_regs) - return -ENOMEM; + vc4_hdmi->hdmicore_regs = devm_platform_ioremap_resource_byname(pdev, + "hdmi"); + if (IS_ERR(vc4_hdmi->hdmicore_regs)) + return PTR_ERR(vc4_hdmi->hdmicore_regs); + /* This is shared between both HDMI controllers. Cannot + * claim for both instances. Lets not convert to using + * devm_platform_ioremap_resource_byname() like + * the rest + */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hd"); if (!res) return -ENODEV; @@ -2971,53 +2944,35 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, if (!vc4_hdmi->hd_regs) return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cec"); - if (!res) - return -ENODEV; - - vc4_hdmi->cec_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vc4_hdmi->cec_regs) - return -ENOMEM; + vc4_hdmi->cec_regs = devm_platform_ioremap_resource_byname(pdev, + "cec"); + if (IS_ERR(vc4_hdmi->cec_regs)) + return PTR_ERR(vc4_hdmi->cec_regs); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csc"); - if (!res) - return -ENODEV; + vc4_hdmi->csc_regs = devm_platform_ioremap_resource_byname(pdev, + "csc"); + if (IS_ERR(vc4_hdmi->csc_regs)) + return PTR_ERR(vc4_hdmi->csc_regs); - vc4_hdmi->csc_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vc4_hdmi->csc_regs) - return -ENOMEM; + vc4_hdmi->dvp_regs = devm_platform_ioremap_resource_byname(pdev, + "dvp"); + if (IS_ERR(vc4_hdmi->dvp_regs)) + return PTR_ERR(vc4_hdmi->dvp_regs); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvp"); - if (!res) - return -ENODEV; + vc4_hdmi->phy_regs = devm_platform_ioremap_resource_byname(pdev, + "phy"); - vc4_hdmi->dvp_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vc4_hdmi->dvp_regs) - return -ENOMEM; + if (IS_ERR(vc4_hdmi->phy_regs)) + return PTR_ERR(vc4_hdmi->phy_regs); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - if (!res) - return -ENODEV; + vc4_hdmi->ram_regs = devm_platform_ioremap_resource_byname(pdev, + "packet"); + if (IS_ERR(vc4_hdmi->ram_regs)) + return PTR_ERR(vc4_hdmi->ram_regs); - vc4_hdmi->phy_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vc4_hdmi->phy_regs) - return -ENOMEM; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "packet"); - if (!res) - return -ENODEV; - - vc4_hdmi->ram_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vc4_hdmi->ram_regs) - return -ENOMEM; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rm"); - if (!res) - return -ENODEV; - - vc4_hdmi->rm_regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vc4_hdmi->rm_regs) - return -ENOMEM; + vc4_hdmi->rm_regs = devm_platform_ioremap_resource_byname(pdev, "rm"); + if (IS_ERR(vc4_hdmi->rm_regs)) + return PTR_ERR(vc4_hdmi->rm_regs); vc4_hdmi->hsm_clock = devm_clk_get(dev, "hdmi"); if (IS_ERR(vc4_hdmi->hsm_clock)) { diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index d608860d525f..c5e84d3494d2 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -2338,7 +2338,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, } static int vc4_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) + struct drm_atomic_state *state, bool flip) { struct vc4_dev *vc4 = to_vc4_dev(plane->dev); struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 4eab069cda75..42acac05fe47 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -196,7 +196,7 @@ static int vc4_txp_connector_get_modes(struct drm_connector *connector) static enum drm_mode_status vc4_txp_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct drm_mode_config *mode_config = &dev->mode_config; diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 5bf134968ade..1e7bdda55698 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -284,9 +284,6 @@ validate_indexed_prim_list(VALIDATE_ARGS) if (!ib) return -EINVAL; - exec->bin_dep_seqno = max(exec->bin_dep_seqno, - to_vc4_bo(&ib->base)->write_seqno); - if (offset > ib->base.size || (ib->base.size - offset) / index_size < length) { DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n", @@ -738,11 +735,6 @@ reloc_tex(struct vc4_exec_info *exec, *validated_p0 = tex->dma_addr + p0; - if (is_cs) { - exec->bin_dep_seqno = max(exec->bin_dep_seqno, - to_vc4_bo(&tex->base)->write_seqno); - } - return true; fail: DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0); @@ -904,9 +896,6 @@ validate_gl_shader_rec(struct drm_device *dev, uint32_t stride = *(uint8_t *)(pkt_u + o + 5); uint32_t max_index; - exec->bin_dep_seqno = max(exec->bin_dep_seqno, - to_vc4_bo(&vbo->base)->write_seqno); - if (state->addr & 0x8) stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff; |