summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c78
1 files changed, 56 insertions, 22 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 9ca741f3a0bc..44fe587aaef9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -182,6 +182,20 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
}
EXPORT_SYMBOL(drm_sched_dependency_optimized);
+/**
+ * drm_sched_start_timeout - start timeout for reset worker
+ *
+ * @sched: scheduler instance to start the worker for
+ *
+ * Start the timeout for the given scheduler.
+ */
+static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+{
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_empty(&sched->ring_mirror_list))
+ schedule_delayed_work(&sched->work_tdr, sched->timeout);
+}
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -197,19 +211,13 @@ static void drm_sched_job_finish(struct work_struct *work)
* manages to find this job as the next job in the list, the fence
* signaled check below will prevent the timeout to be restarted.
*/
- cancel_delayed_work_sync(&s_job->work_tdr);
+ cancel_delayed_work_sync(&sched->work_tdr);
spin_lock(&sched->job_list_lock);
- /* queue TDR for next job */
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
- struct drm_sched_job *next = list_next_entry(s_job, node);
-
- if (!dma_fence_is_signaled(&next->s_fence->finished))
- schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
/* remove job from ring_mirror_list */
list_del(&s_job->node);
+ /* queue TDR for next job */
+ drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
dma_fence_put(&s_job->s_fence->finished);
@@ -233,19 +241,49 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node) == s_job)
- schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
- struct drm_sched_job *job = container_of(work, struct drm_sched_job,
- work_tdr.work);
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_job *job;
+ int r;
- job->sched->ops->timedout_job(job);
+ sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *fence = job->s_fence;
+
+ if (!dma_fence_remove_callback(fence->parent, &fence->cb))
+ goto already_signaled;
+ }
+
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct drm_sched_job, node);
+ spin_unlock(&sched->job_list_lock);
+
+ if (job)
+ sched->ops->timedout_job(job);
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry(job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *fence = job->s_fence;
+
+ if (!fence->parent || !list_empty(&fence->cb.node))
+ continue;
+
+ r = dma_fence_add_callback(fence->parent, &fence->cb,
+ drm_sched_process_job);
+ if (r)
+ drm_sched_process_job(fence->parent, &fence->cb);
+
+already_signaled:
+ ;
+ }
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -312,11 +350,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
int r;
spin_lock(&sched->job_list_lock);
- s_job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
- if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
- schedule_delayed_work(&s_job->work_tdr, sched->timeout);
-
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -349,6 +382,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
}
spin_lock(&sched->job_list_lock);
}
+ drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -384,7 +418,6 @@ int drm_sched_job_init(struct drm_sched_job *job,
INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
- INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
return 0;
}
@@ -575,6 +608,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
+ INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);