summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorOscar Mateo <oscar.mateo@intel.com>2014-07-24 17:04:23 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-08-11 16:55:17 +0200
commit48d823878d64f93163f5a949623346748bbce1b4 (patch)
tree15aea93bd4927c689309120f720284f1b88b5e19 /drivers
parent454afebde873874b939465bfc1a294ac3697c96e (diff)
downloadlinux-48d823878d64f93163f5a949623346748bbce1b4.tar.gz
linux-48d823878d64f93163f5a949623346748bbce1b4.tar.bz2
linux-48d823878d64f93163f5a949623346748bbce1b4.zip
drm/i915/bdw: Generic logical ring init and cleanup
Allocate and populate the default LRC for every ring, call gen-specific init/cleanup, init/fini the command parser and set the status page (now inside the LRC object). These are things all engines/rings have in common. Stopping the ring before cleanup and initializing the seqnos is left as a TODO task (we need more infrastructure in place before we can achieve this). v2: Check the ringbuffer backing obj for ring_is_initialized, instead of the context backing obj (similar, but not exactly the same). Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
4 files changed, 70 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index bcb41002aa13..7a08f3e9e1ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -445,10 +445,6 @@ int i915_gem_context_init(struct drm_device *dev)
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
-
- /* FIXME: we really only want to do this for initialized rings */
- if (i915.enable_execlists)
- intel_lr_context_deferred_create(ctx, ring);
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9c2ff8f11c90..ed7a4ff3bbd2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -110,12 +110,60 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
{
- /* TODO */
+ if (!intel_ring_initialized(ring))
+ return;
+
+ /* TODO: make sure the ring is stopped */
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ i915_cmd_parser_fini_ring(ring);
+
+ if (ring->status_page.obj) {
+ kunmap(sg_page(ring->status_page.obj->pages->sgl));
+ ring->status_page.obj = NULL;
+ }
}
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{
- /* TODO */
+ int ret;
+ struct intel_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *dctx_obj;
+
+ /* Intentionally left blank. */
+ ring->buffer = NULL;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ init_waitqueue_head(&ring->irq_queue);
+
+ ret = intel_lr_context_deferred_create(dctx, ring);
+ if (ret)
+ return ret;
+
+ /* The status page is offset 0 from the context object in LRCs. */
+ dctx_obj = dctx->engine[ring->id].state;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
+ ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
+ if (ring->status_page.page_addr == NULL)
+ return -ENOMEM;
+ ring->status_page.obj = dctx_obj;
+
+ ret = i915_cmd_parser_init_ring(ring);
+ if (ret)
+ return ret;
+
+ if (ring->init) {
+ ret = ring->init(ring);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -399,6 +447,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
+ if (ctx->engine[ring->id].state)
+ return 0;
context_size = round_up(get_lr_context_size(ring), 4096);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c35f956ed6a0..e4b97f5c5797 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -40,6 +40,23 @@
*/
#define CACHELINE_BYTES 64
+bool
+intel_ring_initialized(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!dev)
+ return false;
+
+ if (i915.enable_execlists) {
+ struct intel_context *dctx = ring->default_context;
+ struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
+
+ return ringbuf->obj;
+ } else
+ return ring->buffer && ring->buffer->obj;
+}
+
static inline int __ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fe9d9d9d3598..fbe54ef6a9a1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -289,11 +289,7 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
-{
- return ring->buffer && ring->buffer->obj;
-}
+bool intel_ring_initialized(struct intel_engine_cs *ring);
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)