summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
7 files changed, 82 insertions, 103 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 78b75ee3c931..c894a48a74a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d2d93959b119..723fd763da8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -465,33 +465,34 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info)
{
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb;
struct vmw_framebuffer *vfb;
- int ret = 0;
+ int ret = 0, depth;
size_t new_bo_size;
- ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+ ret = vmw_fb_compute_depth(var, &depth);
if (ret)
return ret;
mode_cmd.width = var->xres;
mode_cmd.height = var->yres;
- mode_cmd.bpp = var->bits_per_pixel;
- mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+ mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
+ mode_cmd.pixel_format =
+ drm_mode_legacy_fb_format(var->bits_per_pixel,
+ ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
cur_fb = par->set_fb;
if (cur_fb && cur_fb->width == mode_cmd.width &&
cur_fb->height == mode_cmd.height &&
- cur_fb->bits_per_pixel == mode_cmd.bpp &&
- cur_fb->depth == mode_cmd.depth &&
- cur_fb->pitches[0] == mode_cmd.pitch)
+ cur_fb->pixel_format == mode_cmd.pixel_format &&
+ cur_fb->pitches[0] == mode_cmd.pitches[0])
return 0;
/* Need new buffer object ? */
- new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+ new_bo_size = (size_t) mode_cmd.pitches[0] * (size_t) mode_cmd.height;
ret = vmw_fb_kms_detach(par,
par->bo_size < new_bo_size ||
par->bo_size > 2*new_bo_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 26ac8e80a478..6541dd8b82dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -108,7 +108,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
* objects with actions attached to them.
*/
-static void vmw_fence_obj_destroy(struct fence *f)
+static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -123,17 +123,17 @@ static void vmw_fence_obj_destroy(struct fence *f)
fence->destroy(fence);
}
-static const char *vmw_fence_get_driver_name(struct fence *f)
+static const char *vmw_fence_get_driver_name(struct dma_fence *f)
{
return "vmwgfx";
}
-static const char *vmw_fence_get_timeline_name(struct fence *f)
+static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
{
return "svga";
}
-static bool vmw_fence_enable_signaling(struct fence *f)
+static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -152,12 +152,12 @@ static bool vmw_fence_enable_signaling(struct fence *f)
}
struct vmwgfx_wait_cb {
- struct fence_cb base;
+ struct dma_fence_cb base;
struct task_struct *task;
};
static void
-vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
+vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct vmwgfx_wait_cb *wait =
container_of(cb, struct vmwgfx_wait_cb, base);
@@ -167,7 +167,7 @@ vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
+static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
@@ -197,7 +197,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
while (ret > 0) {
__vmw_fences_update(fman);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
if (intr)
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct fence_ops vmw_fence_ops = {
+static struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
@@ -298,7 +298,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->event_fence_action_size =
ttm_round_pot(sizeof(struct vmw_event_fence_action));
mutex_init(&fman->goal_irq_mutex);
- fman->ctx = fence_context_alloc(1);
+ fman->ctx = dma_fence_context_alloc(1);
return fman;
}
@@ -326,8 +326,8 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
unsigned long irq_flags;
int ret = 0;
- fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
- fman->ctx, seqno);
+ dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
+ fman->ctx, seqno);
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
@@ -431,7 +431,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
u32 goal_seqno;
u32 *fifo_mem;
- if (fence_is_signaled_locked(&fence->base))
+ if (dma_fence_is_signaled_locked(&fence->base))
return false;
fifo_mem = fman->dev_priv->mmio_virt;
@@ -459,7 +459,7 @@ rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
- fence_signal_locked(&fence->base);
+ dma_fence_signal_locked(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -500,18 +500,18 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return 1;
vmw_fences_update(fman);
- return fence_is_signaled(&fence->base);
+ return dma_fence_is_signaled(&fence->base);
}
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
bool interruptible, unsigned long timeout)
{
- long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
+ long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
if (likely(ret > 0))
return 0;
@@ -530,7 +530,7 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- fence_free(&fence->base);
+ dma_fence_free(&fence->base);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
@@ -669,7 +669,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
spin_unlock_irq(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
@@ -677,7 +677,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
- fence_signal(&fence->base);
+ dma_fence_signal(&fence->base);
INIT_LIST_HEAD(&action_list);
list_splice_init(&fence->seq_passed_actions,
&action_list);
@@ -685,7 +685,7 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
}
BUG_ON(!list_empty(&fence->head));
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
spin_lock_irq(&fman->lock);
}
spin_unlock_irq(&fman->lock);
@@ -884,7 +884,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
spin_lock_irqsave(&fman->lock, irq_flags);
fman->pending_actions[action->type]++;
- if (fence_is_signaled_locked(&fence->base)) {
+ if (dma_fence_is_signaled_locked(&fence->base)) {
struct list_head action_list;
INIT_LIST_HEAD(&action_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 83ae301ee141..d9d85aa6ed20 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -27,7 +27,7 @@
#ifndef _VMWGFX_FENCE_H_
-#include <linux/fence.h>
+#include <linux/dma-fence.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -52,7 +52,7 @@ struct vmw_fence_action {
};
struct vmw_fence_obj {
- struct fence base;
+ struct dma_fence base;
struct list_head head;
struct list_head seq_passed_actions;
@@ -71,14 +71,14 @@ vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
*fence_p = NULL;
if (fence)
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
}
static inline struct vmw_fence_obj *
vmw_fence_obj_reference(struct vmw_fence_obj *fence)
{
if (fence)
- fence_get(&fence->base);
+ dma_fence_get(&fence->base);
return fence;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bf28ccc150df..e7daf59bac80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -516,7 +516,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd
+ const struct drm_mode_fb_cmd2
*mode_cmd,
bool is_dmabuf_proxy)
@@ -525,6 +525,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
+ struct drm_format_name_buf format_name;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
@@ -548,21 +549,22 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return -EINVAL;
}
- switch (mode_cmd->depth) {
- case 32:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
format = SVGA3D_A8R8G8B8;
break;
- case 24:
+ case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
break;
- case 16:
+ case DRM_FORMAT_RGB565:
format = SVGA3D_R5G6B5;
break;
- case 15:
+ case DRM_FORMAT_XRGB1555:
format = SVGA3D_A1R5G5B5;
break;
default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
@@ -581,14 +583,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
goto out_err1;
}
- /* XXX get the first 3 from the surface info */
- vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitches[0] = mode_cmd->pitch;
- vfbs->base.base.depth = mode_cmd->depth;
- vfbs->base.base.width = mode_cmd->width;
- vfbs->base.base.height = mode_cmd->height;
+ drm_helper_mode_fill_fb_struct(&vfbs->base.base, mode_cmd);
vfbs->surface = vmw_surface_reference(surface);
- vfbs->base.user_handle = mode_cmd->handle;
+ vfbs->base.user_handle = mode_cmd->handles[0];
vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
*out = &vfbs->base;
@@ -755,7 +752,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
* 0 on success, error code otherwise
*/
static int vmw_create_dmabuf_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd *mode_cmd,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
struct vmw_dma_buffer *dmabuf_mob,
struct vmw_surface **srf_out)
{
@@ -763,17 +760,18 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
struct drm_vmw_size content_base_size;
struct vmw_resource *res;
unsigned int bytes_pp;
+ struct drm_format_name_buf format_name;
int ret;
- switch (mode_cmd->depth) {
- case 32:
- case 24:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
format = SVGA3D_X8R8G8B8;
bytes_pp = 4;
break;
- case 16:
- case 15:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
format = SVGA3D_R5G6B5;
bytes_pp = 2;
break;
@@ -784,11 +782,12 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
break;
default:
- DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid framebuffer format %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
- content_base_size.width = mode_cmd->pitch / bytes_pp;
+ content_base_size.width = mode_cmd->pitches[0] / bytes_pp;
content_base_size.height = mode_cmd->height;
content_base_size.depth = 1;
@@ -826,16 +825,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
- const struct drm_mode_fb_cmd
+ const struct drm_mode_fb_cmd2
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
+ struct drm_format_name_buf format_name;
int ret;
- requested_size = mode_cmd->height * mode_cmd->pitch;
+ requested_size = mode_cmd->height * mode_cmd->pitches[0];
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
@@ -844,27 +844,16 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->active_display_unit == vmw_du_screen_object) {
- switch (mode_cmd->depth) {
- case 32:
- case 24:
- /* Only support 32 bpp for 32 and 24 depth fbs */
- if (mode_cmd->bpp == 32)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
- case 16:
- case 15:
- /* Only support 16 bpp for 16 and 15 depth fbs */
- if (mode_cmd->bpp == 16)
- break;
-
- DRM_ERROR("Invalid color depth/bbp: %d %d\n",
- mode_cmd->depth, mode_cmd->bpp);
- return -EINVAL;
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_RGB565:
+ break;
default:
- DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+ DRM_ERROR("Invalid pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
return -EINVAL;
}
}
@@ -875,14 +864,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
goto out_err1;
}
- vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitches[0] = mode_cmd->pitch;
- vfbd->base.base.depth = mode_cmd->depth;
- vfbd->base.base.width = mode_cmd->width;
- vfbd->base.base.height = mode_cmd->height;
+ drm_helper_mode_fill_fb_struct(&vfbd->base.base, mode_cmd);
vfbd->base.dmabuf = true;
vfbd->buffer = vmw_dmabuf_reference(dmabuf);
- vfbd->base.user_handle = mode_cmd->handle;
+ vfbd->base.user_handle = mode_cmd->handles[0];
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
@@ -916,7 +901,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
- const struct drm_mode_fb_cmd *mode_cmd)
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
bool is_dmabuf_proxy = false;
@@ -971,7 +956,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd2)
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -979,16 +964,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
- struct drm_mode_fb_cmd mode_cmd;
int ret;
- mode_cmd.width = mode_cmd2->width;
- mode_cmd.height = mode_cmd2->height;
- mode_cmd.pitch = mode_cmd2->pitches[0];
- mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
-
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
@@ -996,8 +973,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd.pitch,
- mode_cmd.height)) {
+ mode_cmd->pitches[0],
+ mode_cmd->height)) {
DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1011,7 +988,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* command stream using user-space handles.
*/
- user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
@@ -1023,14 +1000,14 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
- mode_cmd.handle,
+ mode_cmd->handles[0],
&surface, &bo);
if (ret)
goto err_out;
vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
!(dev_priv->capabilities & SVGA_CAP_3D),
- &mode_cmd);
+ mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index ff4803c107bc..f42ce9a1c3ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -248,7 +248,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_surface *surface,
bool only_2d,
- const struct drm_mode_fb_cmd *mode_cmd);
+ const struct drm_mode_fb_cmd2 *mode_cmd);
int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
unsigned unit,
u32 max_width,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 52ca1c9d070e..8e86d6d4141b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
- nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
@@ -1454,7 +1454,7 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
- fence_put(&fence->base);
+ dma_fence_put(&fence->base);
} else
reservation_object_add_excl_fence(bo->resv, &fence->base);
}