summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_drv.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h56
1 files changed, 35 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 79209ee25c21..735f695cb889 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
@@ -85,8 +86,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180606"
-#define DRIVER_TIMESTAMP 1528323047
+#define DRIVER_DATE "20180620"
+#define DRIVER_TIMESTAMP 1529529048
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -107,13 +108,24 @@
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
#else
+
#define i915_inject_load_failure() false
+#define i915_error_injected() false
+
#endif
+#define i915_load_error(i915, fmt, ...) \
+ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
+
typedef struct {
uint32_t val;
} uint_fixed_16_16_t;
@@ -340,14 +352,21 @@ struct drm_i915_file_private {
unsigned int bsd_engine;
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
*/
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
- atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST 1
+#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN 3
+#define I915_CLIENT_SCORE_BANNED 9
+ /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+ atomic_t ban_score;
+ unsigned long hang_timestamp;
};
/* Interface history:
@@ -601,7 +620,7 @@ struct i915_psr {
bool sink_support;
struct intel_dp *enabled;
bool active;
- struct delayed_work work;
+ struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
@@ -631,7 +650,7 @@ enum intel_pch {
PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP,
+ PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -994,6 +1013,8 @@ struct i915_gem_mm {
#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
+#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
+
enum modeset_restore {
MODESET_ON_LID_OPEN,
MODESET_DONE,
@@ -1004,6 +1025,7 @@ enum modeset_restore {
#define DP_AUX_B 0x10
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05
@@ -1290,7 +1312,7 @@ struct i915_frontbuffer_tracking {
};
struct i915_wa_reg {
- i915_reg_t addr;
+ u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
@@ -3174,7 +3196,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
@@ -3678,14 +3700,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
- unsigned long j = timespec_to_jiffies(value);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of