summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_scheduler.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-02-28 10:20:33 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-02-28 11:07:56 +0000
commit32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e (patch)
treeb8d37fcf09299b0f0edd9317479336e04a921156 /drivers/gpu/drm/i915/i915_scheduler.h
parentbd2be1418659abd7b1cdecc7d23d86314b0e3496 (diff)
downloadlinux-stable-32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e.tar.gz
linux-stable-32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e.tar.bz2
linux-stable-32eb6bcfdda9dad240cf6a22fda2b3418b1a1b8e.zip
drm/i915: Make request allocation caches global
As kmem_caches share the same properties (size, allocation/free behaviour) for all potential devices, we can use global caches. While this potential has worse fragmentation behaviour (one can argue that different devices would have different activity lifetimes, but you can also argue that activity is temporal across the system) it is the default behaviour of the system at large to amalgamate matching caches. The benefit for us is much reduced pointer dancing along the frequent allocation paths. v2: Defer shrinking until after a global grace period for futureproofing multiple consumers of the slab caches, similar to the current strategy for avoiding shrinking too early. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190228102035.5857-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_scheduler.h')
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h34
1 files changed, 30 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index dbe9cb7ecd82..4153c6a607b0 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -83,6 +83,23 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
+struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
+ struct rb_node node;
+ unsigned long used;
+ int priority;
+};
+
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
+
void i915_sched_node_init(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -90,12 +107,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_dependency *dep,
unsigned long flags);
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal);
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node);
+void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -105,4 +120,15 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+void __i915_priolist_free(struct i915_priolist *p);
+static inline void i915_priolist_free(struct i915_priolist *p)
+{
+ if (p->priority != I915_PRIORITY_NORMAL)
+ __i915_priolist_free(p);
+}
+
+int i915_global_scheduler_init(void);
+void i915_global_scheduler_shrink(void);
+void i915_global_scheduler_exit(void);
+
#endif /* _I915_SCHEDULER_H_ */