diff options
27 files changed, 38 insertions, 1316 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index e02780648a95..a2c2a5f5d85e 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ - vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx_system_manager.o + vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c deleted file mode 100644 index 326d2d177c8b..000000000000 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.c +++ /dev/null @@ -1,586 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/spinlock.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/slab.h> - -#include <drm/drm_device.h> -#include <drm/drm_file.h> -#include <drm/ttm/ttm_device.h> - -#include "ttm_memory.h" - -#define TTM_MEMORY_ALLOC_RETRIES 4 - -struct ttm_mem_global ttm_mem_glob; -EXPORT_SYMBOL(ttm_mem_glob); - -struct ttm_mem_zone { - struct kobject kobj; - struct ttm_mem_global *glob; - const char *name; - uint64_t zone_mem; - uint64_t emer_mem; - uint64_t max_mem; - uint64_t swap_limit; - uint64_t used_mem; -}; - -static struct attribute ttm_mem_sys = { - .name = "zone_memory", - .mode = S_IRUGO -}; -static struct attribute ttm_mem_emer = { - .name = "emergency_memory", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_max = { - .name = "available_memory", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_swap = { - .name = "swap_limit", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_used = { - .name = "used_memory", - .mode = S_IRUGO -}; - -static void ttm_mem_zone_kobj_release(struct kobject *kobj) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - - pr_info("Zone %7s: Used memory at exit: %llu KiB\n", - zone->name, (unsigned long long)zone->used_mem >> 10); - kfree(zone); -} - -static ssize_t ttm_mem_zone_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - uint64_t val = 0; - - spin_lock(&zone->glob->lock); - if (attr == &ttm_mem_sys) - val = zone->zone_mem; - else if (attr == &ttm_mem_emer) - val = zone->emer_mem; - else if (attr == &ttm_mem_max) - val = zone->max_mem; - else if (attr == &ttm_mem_swap) - val = zone->swap_limit; - else if (attr == &ttm_mem_used) - val = zone->used_mem; - spin_unlock(&zone->glob->lock); - - return snprintf(buffer, PAGE_SIZE, "%llu\n", - (unsigned long long) val >> 10); -} - -static void ttm_check_swapping(struct ttm_mem_global *glob); - -static ssize_t ttm_mem_zone_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t size) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - int chars; - unsigned long val; - uint64_t val64; - - chars = sscanf(buffer, "%lu", &val); - if (chars == 0) - return size; - - val64 = val; - val64 <<= 10; - - spin_lock(&zone->glob->lock); - if (val64 > zone->zone_mem) - val64 = zone->zone_mem; - if (attr == &ttm_mem_emer) { - zone->emer_mem = val64; - if (zone->max_mem > val64) - zone->max_mem = val64; - } else if (attr == &ttm_mem_max) { - zone->max_mem = val64; - if (zone->emer_mem < val64) - zone->emer_mem = val64; - } else if (attr == &ttm_mem_swap) - zone->swap_limit = val64; - spin_unlock(&zone->glob->lock); - - ttm_check_swapping(zone->glob); - - return size; -} - -static struct attribute *ttm_mem_zone_attrs[] = { - &ttm_mem_sys, - &ttm_mem_emer, - &ttm_mem_max, - &ttm_mem_swap, - &ttm_mem_used, - NULL -}; - -static const struct sysfs_ops ttm_mem_zone_ops = { - .show = &ttm_mem_zone_show, - .store = &ttm_mem_zone_store -}; - -static struct kobj_type ttm_mem_zone_kobj_type = { - .release = &ttm_mem_zone_kobj_release, - .sysfs_ops = &ttm_mem_zone_ops, - .default_attrs = ttm_mem_zone_attrs, -}; -static struct kobj_type ttm_mem_glob_kobj_type = {0}; - -static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, - bool from_wq, uint64_t extra) -{ - unsigned int i; - struct ttm_mem_zone *zone; - uint64_t target; - - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - - if (from_wq) - target = zone->swap_limit; - else if (capable(CAP_SYS_ADMIN)) - target = zone->emer_mem; - else - target = zone->max_mem; - - target = (extra > target) ? 0ULL : target; - - if (zone->used_mem > target) - return true; - } - return false; -} - -/* - * At this point we only support a single shrink callback. - * Extend this if needed, perhaps using a linked list of callbacks. - * Note that this function is reentrant: - * many threads may try to swap out at any given time. - */ - -static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, - uint64_t extra, struct ttm_operation_ctx *ctx) -{ - int ret; - - spin_lock(&glob->lock); - - while (ttm_zones_above_swap_target(glob, from_wq, extra)) { - spin_unlock(&glob->lock); - ret = ttm_global_swapout(ctx, GFP_KERNEL); - spin_lock(&glob->lock); - if (unlikely(ret <= 0)) - break; - } - - spin_unlock(&glob->lock); -} - -static void ttm_shrink_work(struct work_struct *work) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - struct ttm_mem_global *glob = - container_of(work, struct ttm_mem_global, work); - - ttm_shrink(glob, true, 0ULL, &ctx); -} - -static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); - uint64_t mem; - int ret; - - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram - si->totalhigh; - mem *= si->mem_unit; - - zone->name = "kernel"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_kernel = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} - -#ifdef CONFIG_HIGHMEM -static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone; - uint64_t mem; - int ret; - - if (si->totalhigh == 0) - return 0; - - zone = kzalloc(sizeof(*zone), GFP_KERNEL); - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram; - mem *= si->mem_unit; - - zone->name = "highmem"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_highmem = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", - zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} -#else -static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); - uint64_t mem; - int ret; - - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram; - mem *= si->mem_unit; - - /** - * No special dma32 zone needed. - */ - - if (mem <= ((uint64_t) 1ULL << 32)) { - kfree(zone); - return 0; - } - - /* - * Limit max dma32 memory to 4GB for now - * until we can figure out how big this - * zone really is. - */ - - mem = ((uint64_t) 1ULL << 32); - zone->name = "dma32"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_dma32 = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} -#endif - -int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev) -{ - struct sysinfo si; - int ret; - int i; - struct ttm_mem_zone *zone; - - spin_lock_init(&glob->lock); - glob->swap_queue = create_singlethread_workqueue("ttm_swap"); - INIT_WORK(&glob->work, ttm_shrink_work); - - ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type, - &dev->kobj, "memory_accounting"); - if (unlikely(ret != 0)) { - kobject_put(&glob->kobj); - return ret; - } - - si_meminfo(&si); - - ret = ttm_mem_init_kernel_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#ifdef CONFIG_HIGHMEM - ret = ttm_mem_init_highmem_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#else - ret = ttm_mem_init_dma32_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#endif - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - pr_info("Zone %7s: Available graphics memory: %llu KiB\n", - zone->name, (unsigned long long)zone->max_mem >> 10); - } - return 0; -out_no_zone: - ttm_mem_global_release(glob); - return ret; -} - -void ttm_mem_global_release(struct ttm_mem_global *glob) -{ - struct ttm_mem_zone *zone; - unsigned int i; - - destroy_workqueue(glob->swap_queue); - glob->swap_queue = NULL; - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - kobject_del(&zone->kobj); - kobject_put(&zone->kobj); - } - kobject_del(&glob->kobj); - kobject_put(&glob->kobj); - memset(glob, 0, sizeof(*glob)); -} - -static void ttm_check_swapping(struct ttm_mem_global *glob) -{ - bool needs_swapping = false; - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (zone->used_mem > zone->swap_limit) { - needs_swapping = true; - break; - } - } - - spin_unlock(&glob->lock); - - if (unlikely(needs_swapping)) - (void)queue_work(glob->swap_queue, &glob->work); - -} - -static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t amount) -{ - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - zone->used_mem -= amount; - } - spin_unlock(&glob->lock); -} - -void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount) -{ - return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount); -} -EXPORT_SYMBOL(ttm_mem_global_free); - -static int ttm_mem_global_reserve(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t amount, bool reserve) -{ - uint64_t limit; - int ret = -ENOMEM; - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - - limit = (capable(CAP_SYS_ADMIN)) ? - zone->emer_mem : zone->max_mem; - - if (zone->used_mem > limit) - goto out_unlock; - } - - if (reserve) { - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - zone->used_mem += amount; - } - } - - ret = 0; -out_unlock: - spin_unlock(&glob->lock); - ttm_check_swapping(glob); - - return ret; -} - - -static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t memory, - struct ttm_operation_ctx *ctx) -{ - int count = TTM_MEMORY_ALLOC_RETRIES; - - while (unlikely(ttm_mem_global_reserve(glob, - single_zone, - memory, true) - != 0)) { - if (ctx->no_wait_gpu) - return -ENOMEM; - if (unlikely(count-- == 0)) - return -ENOMEM; - ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx); - } - - return 0; -} - -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx) -{ - /** - * Normal allocations of kernel memory are registered in - * the kernel zone. - */ - - return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx); -} -EXPORT_SYMBOL(ttm_mem_global_alloc); - -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_zone *zone = NULL; - - /** - * Page allocations may be registed in a single zone - * only if highmem or !dma32. - */ - -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; -#else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; -#endif - return ttm_mem_global_alloc_zone(glob, zone, size, ctx); -} - -void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, - uint64_t size) -{ - struct ttm_mem_zone *zone = NULL; - -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; -#else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; -#endif - ttm_mem_global_free_zone(glob, zone, size); -} - -size_t ttm_round_pot(size_t size) -{ - if ((size & (size - 1)) == 0) - return size; - else if (size > PAGE_SIZE) - return PAGE_ALIGN(size); - else { - size_t tmp_size = 4; - - while (tmp_size < size) - tmp_size <<= 1; - - return tmp_size; - } - return 0; -} -EXPORT_SYMBOL(ttm_round_pot); diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h deleted file mode 100644 index 7b0d617ebcb1..000000000000 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.h +++ /dev/null @@ -1,92 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef TTM_MEMORY_H -#define TTM_MEMORY_H - -#include <linux/workqueue.h> -#include <linux/spinlock.h> -#include <linux/bug.h> -#include <linux/wait.h> -#include <linux/errno.h> -#include <linux/kobject.h> -#include <linux/mm.h> - -#include <drm/ttm/ttm_bo_api.h> - -/** - * struct ttm_mem_global - Global memory accounting structure. - * - * @shrink: A single callback to shrink TTM memory usage. Extend this - * to a linked list to be able to handle multiple callbacks when needed. - * @swap_queue: A workqueue to handle shrinking in low memory situations. We - * need a separate workqueue since it will spend a lot of time waiting - * for the GPU, and this will otherwise block other workqueue tasks(?) - * At this point we use only a single-threaded workqueue. - * @work: The workqueue callback for the shrink queue. - * @lock: Lock to protect the @shrink - and the memory accounting members, - * that is, essentially the whole structure with some exceptions. - * @zones: Array of pointers to accounting zones. - * @num_zones: Number of populated entries in the @zones array. - * @zone_kernel: Pointer to the kernel zone. - * @zone_highmem: Pointer to the highmem zone if there is one. - * @zone_dma32: Pointer to the dma32 zone if there is one. - * - * Note that this structure is not per device. It should be global for all - * graphics devices. - */ - -#define TTM_MEM_MAX_ZONES 2 -struct ttm_mem_zone; -extern struct ttm_mem_global { - struct kobject kobj; - struct workqueue_struct *swap_queue; - struct work_struct work; - spinlock_t lock; - struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; - unsigned int num_zones; - struct ttm_mem_zone *zone_kernel; -#ifdef CONFIG_HIGHMEM - struct ttm_mem_zone *zone_highmem; -#else - struct ttm_mem_zone *zone_dma32; -#endif -} ttm_mem_glob; - -int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev); -void ttm_mem_global_release(struct ttm_mem_global *glob); -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -size_t ttm_round_pot(size_t size); - -#endif diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 22b3385d0c37..53de59ce30ca 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -93,10 +93,8 @@ struct ttm_object_device { spinlock_t object_lock; struct vmwgfx_open_hash object_hash; atomic_t object_count; - struct ttm_mem_global *mem_glob; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); - size_t dma_buf_size; struct idr idr; }; @@ -352,11 +350,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; struct vmwgfx_hash_item *hash; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; int ret = -EINVAL; if (base->tfile != tfile && !base->shareable) @@ -381,13 +374,8 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, if (require_existed) return -EPERM; - ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), - &ctx); - if (unlikely(ret != 0)) - return ret; ref = kmalloc(sizeof(*ref), GFP_KERNEL); if (unlikely(ref == NULL)) { - ttm_mem_global_free(mem_glob, sizeof(*ref)); return -ENOMEM; } @@ -412,7 +400,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, spin_unlock(&tfile->lock); BUG_ON(ret != -EINVAL); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree(ref); } @@ -427,7 +414,6 @@ ttm_ref_object_release(struct kref *kref) struct ttm_base_object *base = ref->obj; struct ttm_object_file *tfile = ref->tfile; struct vmwgfx_open_hash *ht; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; ht = &tfile->ref_hash[ref->ref_type]; (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); @@ -438,7 +424,6 @@ ttm_ref_object_release(struct kref *kref) base->ref_obj_release(base, ref->ref_type); ttm_base_object_unref(&ref->obj); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree_rcu(ref, rcu_head); spin_lock(&tfile->lock); } @@ -526,8 +511,7 @@ out_err: } struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, +ttm_object_device_init(unsigned int hash_order, const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); @@ -536,7 +520,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, if (unlikely(tdev == NULL)) return NULL; - tdev->mem_glob = mem_glob; spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); ret = vmwgfx_ht_create(&tdev->object_hash, hash_order); @@ -547,8 +530,6 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, tdev->ops = *ops; tdev->dmabuf_release = tdev->ops.release; tdev->ops.release = ttm_prime_dmabuf_release; - tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + - ttm_round_pot(sizeof(struct file)); return tdev; out_no_object_hash: @@ -633,7 +614,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) if (prime->dma_buf == dma_buf) prime->dma_buf = NULL; mutex_unlock(&prime->mutex); - ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); ttm_base_object_unref(&base); } @@ -715,30 +695,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, dma_buf = prime->dma_buf; if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; exp_info.ops = &tdev->ops; exp_info.size = prime->size; exp_info.flags = flags; exp_info.priv = prime; /* - * Need to create a new dma_buf, with memory accounting. + * Need to create a new dma_buf */ - ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, - &ctx); - if (unlikely(ret != 0)) { - mutex_unlock(&prime->mutex); - goto out_unref; - } dma_buf = dma_buf_export(&exp_info); if (IS_ERR(dma_buf)) { ret = PTR_ERR(dma_buf); - ttm_mem_global_free(tdev->mem_glob, - tdev->dma_buf_size); mutex_unlock(&prime->mutex); goto out_unref; } diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h index 6885ccbeec7a..a0f2da1012ba 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.h +++ b/drivers/gpu/drm/vmwgfx/ttm_object.h @@ -42,7 +42,6 @@ #include <linux/list.h> #include <linux/rcupdate.h> -#include "ttm_memory.h" #include "vmwgfx_hashtab.h" /** @@ -296,7 +295,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); /** * ttm_object device init - initialize a struct ttm_object_device * - * @mem_glob: struct ttm_mem_global for memory accounting. * @hash_order: Order of hash table used to hash the base objects. * @ops: DMA buf ops for prime objects of this device. * @@ -305,8 +303,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); */ extern struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, +ttm_object_device_init(unsigned int hash_order, const struct dma_buf_ops *ops); /** @@ -352,13 +349,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, #define ttm_prime_object_kfree(__obj, __prime) \ kfree_rcu(__obj, __prime.base.rhead) -/* - * Extra memory required by the base object's idr storage, which is allocated - * separately from the base object itself. We estimate an on-average 128 bytes - * per idr. - */ -#define TTM_OBJ_EXTRA_SIZE 128 - struct ttm_base_object * ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 6f27d69bad0e..fc8cc222ec51 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -1327,8 +1327,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) } /** - * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with - * memory accounting. + * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state. * * @dev_priv: Pointer to a device private structure. * @@ -1338,20 +1337,9 @@ struct vmw_ctx_binding_state * vmw_binding_state_alloc(struct vmw_private *dev_priv) { struct vmw_ctx_binding_state *cbs; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - int ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs), - &ctx); - if (ret) - return ERR_PTR(ret); cbs = vzalloc(sizeof(*cbs)); if (!cbs) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); return ERR_PTR(-ENOMEM); } @@ -1362,17 +1350,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv) } /** - * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its - * memory accounting info. + * vmw_binding_state_free - Free a struct vmw_ctx_binding_state. * * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. */ void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) { - struct vmw_private *dev_priv = cbs->dev_priv; - vfree(cbs); - ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index c97a3d5e90ce..bf9e9ff3be73 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -392,39 +392,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo) /** - * vmw_bo_acc_size - Calculate the pinned memory usage of buffers - * - * @dev_priv: Pointer to a struct vmw_private identifying the device. - * @size: The requested buffer size. - * @user: Whether this is an ordinary dma buffer or a user dma buffer. - */ -static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, - bool user) -{ - static size_t struct_size, user_struct_size; - size_t num_pages = PFN_UP(size); - size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); - - if (unlikely(struct_size == 0)) { - size_t backend_size = ttm_round_pot(vmw_tt_size); - - struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_buffer_object)); - user_struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_user_buffer_object)) + - TTM_OBJ_EXTRA_SIZE; - } - - if (dev_priv->map_mode == vmw_dma_alloc_coherent) - page_array_size += - ttm_round_pot(num_pages * sizeof(dma_addr_t)); - - return ((user) ? user_struct_size : struct_size) + - page_array_size; -} - - -/** * vmw_bo_bo_free - vmw buffer object destructor * * @bo: Pointer to the embedded struct ttm_buffer_object @@ -471,24 +438,17 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, struct ttm_placement *placement, struct ttm_buffer_object **p_bo) { - struct ttm_operation_ctx ctx = { false, false }; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; struct ttm_buffer_object *bo; - size_t acc_size; int ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(!bo)) return -ENOMEM; - acc_size = ttm_round_pot(sizeof(*bo)); - acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *)); - acc_size += ttm_round_pot(sizeof(struct ttm_tt)); - - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (unlikely(ret)) - goto error_free; - - bo->base.size = size; dma_resv_init(&bo->base._resv); drm_vma_node_reset(&bo->base.vma_node); @@ -497,7 +457,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, ttm_bo_type_kernel, placement, 0, &ctx, NULL, NULL, NULL); if (unlikely(ret)) - goto error_account; + goto error_free; ttm_bo_pin(bo); ttm_bo_unreserve(bo); @@ -505,9 +465,6 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, return 0; -error_account: - ttm_mem_global_free(&ttm_mem_glob, acc_size); - error_free: kfree(bo); return ret; @@ -533,23 +490,20 @@ int vmw_bo_init(struct vmw_private *dev_priv, bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)) { - struct ttm_operation_ctx ctx = { interruptible, false }; + struct ttm_operation_ctx ctx = { + .interruptible = interruptible, + .no_wait_gpu = false + }; struct ttm_device *bdev = &dev_priv->bdev; - size_t acc_size; int ret; bool user = (bo_free == &vmw_user_bo_destroy); WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); - - acc_size = vmw_bo_acc_size(dev_priv, size, user); memset(vmw_bo, 0, sizeof(*vmw_bo)); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); vmw_bo->base.priority = 3; vmw_bo->res_tree = RB_ROOT; - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (unlikely(ret)) - return ret; vmw_bo->base.base.size = size; dma_resv_init(&vmw_bo->base.base._resv); @@ -559,7 +513,6 @@ int vmw_bo_init(struct vmw_private *dev_priv, ttm_bo_type_device, placement, 0, &ctx, NULL, NULL, bo_free); if (unlikely(ret)) { - ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 494cb98061f2..415774fde796 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -324,22 +324,3 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) kfree(man); } -/** - * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed - * resource manager - * - * Returns the approximate allocation size of a command buffer managed - * resource manager. - */ -size_t vmw_cmdbuf_res_man_size(void) -{ - static size_t res_man_size; - - if (unlikely(res_man_size == 0)) - res_man_size = - ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) + - ttm_round_pot(sizeof(struct hlist_head) << - VMW_CMDBUF_RES_MAN_HT_ORDER); - - return res_man_size; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 4446758b6880..713321d0c2c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_dx_context_destroy(struct vmw_resource *res); -static uint64_t vmw_user_context_size; - static const struct vmw_user_resource_conv user_context_conv = { .object_type = VMW_RES_CONTEXT, .base_obj_to_res = vmw_user_context_base_to_res, @@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res) { struct vmw_user_context *ctx = container_of(res, struct vmw_user_context, res); - struct vmw_private *dev_priv = res->dev_priv; if (ctx->cbs) vmw_binding_state_free(ctx->cbs); @@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res) (void) vmw_context_bind_dx_query(res, NULL); ttm_base_object_kfree(ctx, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); } /* @@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data, struct vmw_resource *tmp; struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; if (!has_sm4_context(dev_priv) && dx) { @@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data, return -EINVAL; } - if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + - ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) + - + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_context_size, - &ttm_opt_ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for context" - " creation.\n"); - goto out_ret; - } - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (unlikely(!ctx)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); ret = -ENOMEM; goto out_ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 17a98db00017..7bdd52678362 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -546,8 +546,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res) (void) vmw_cotable_destroy(res); } -static size_t cotable_acc_size; - /** * vmw_cotable_free - Cotable resource destructor * @@ -555,10 +553,7 @@ static size_t cotable_acc_size; */ static void vmw_cotable_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; - kfree(res); - ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); } /** @@ -574,21 +569,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, u32 type) { struct vmw_cotable *vcotbl; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; u32 num_entries; - if (unlikely(cotable_acc_size == 0)) - cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - cotable_acc_size, &ttm_opt_ctx); - if (unlikely(ret)) - return ERR_PTR(ret); - vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); if (unlikely(!vcotbl)) { ret = -ENOMEM; @@ -622,7 +605,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, out_no_init: kfree(vcotbl); out_no_alloc: - ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 2846a0009633..9680ab183941 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -50,9 +50,6 @@ #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 -#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) - - /* * Fully encoded drm commands. Might move to vmw_drm.h */ @@ -986,8 +983,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) goto out_err0; } - dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, - &vmw_prime_dmabuf_ops); + dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { drm_err(&dev_priv->drm, @@ -1083,8 +1079,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->sm_type = VMW_SM_4; } - vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); - /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ if (has_sm4_context(dev_priv) && (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { @@ -1397,7 +1391,6 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - ttm_mem_global_release(&ttm_mem_glob); drm_dev_unregister(dev); vmw_driver_unload(dev); } @@ -1641,13 +1634,9 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, &vmw->drm); - ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); - if (ret) - goto out_error; - ret = vmw_driver_load(vmw, ent->device); if (ret) - goto out_release; + goto out_error; ret = drm_dev_register(&vmw->drm, 0); if (ret) @@ -1656,8 +1645,6 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out_unload: vmw_driver_unload(&vmw->drm); -out_release: - ttm_mem_global_release(&ttm_mem_glob); out_error: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index fbbbcdbe41e3..5599894534cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -628,9 +628,6 @@ struct vmw_private { struct vmw_cmdbuf_man *cman; DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); - /* Validation memory reservation */ - struct vmw_validation_mem vvm; - uint32 *devcaps; /* @@ -1028,9 +1025,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); -extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, - size_t gran); - /** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ @@ -1328,18 +1322,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int vmw_surface_gb_priv_define(struct drm_device *dev, - uint32_t user_accounting_size, - SVGA3dSurfaceAllFlags svga3d_flags, - SVGA3dSurfaceFormat format, - bool for_scanout, - uint32_t num_mip_levels, - uint32_t multisample_count, - uint32_t array_size, - struct drm_vmw_size size, - SVGA3dMSPattern multisample_pattern, - SVGA3dMSQualityLevel quality_level, - struct vmw_surface **srf_out); extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1348,7 +1330,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, struct drm_file *file_priv); int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out); @@ -1409,7 +1390,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, extern struct vmw_cmdbuf_res_manager * vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); -extern size_t vmw_cmdbuf_res_man_size(void); extern struct vmw_resource * vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, @@ -1606,11 +1586,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf) return buf; } -static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) -{ - return &ttm_mem_glob; -} - static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 986e85b7e616..cfe3edcf2cf2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -4054,8 +4054,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, struct sync_file *sync_file = NULL; DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); - vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); - if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 9fe12329a4d5..c9afd070c11b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -37,9 +37,6 @@ struct vmw_fence_manager { spinlock_t lock; struct list_head fence_list; struct work_struct work; - u32 user_fence_size; - u32 fence_size; - u32 event_fence_action_size; bool fifo_down; struct list_head cleanup_list; uint32_t pending_actions[VMW_ACTION_MAX]; @@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); fman->fifo_down = true; - fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) + - TTM_OBJ_EXTRA_SIZE; - fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); - fman->event_fence_action_size = - ttm_round_pot(sizeof(struct vmw_event_fence_action)); mutex_init(&fman->goal_irq_mutex); fman->ctx = dma_fence_context_alloc(1); @@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) { struct vmw_user_fence *ufence = container_of(fence, struct vmw_user_fence, fence); - struct vmw_fence_manager *fman = fman_from_fence(fence); ttm_base_object_kfree(ufence, base); - /* - * Free kernel space accounting. - */ - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), - fman->user_fence_size); } static void vmw_user_fence_base_release(struct ttm_base_object **p_base) @@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_user_fence *ufence; struct vmw_fence_obj *tmp; - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; int ret; - /* - * Kernel memory space accounting, since this object may - * be created by a user-space request. - */ - - ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, - &ctx); - if (unlikely(ret != 0)) - return ret; - ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (unlikely(!ufence)) { ret = -ENOMEM; @@ -646,7 +617,6 @@ out_err: tmp = &ufence->fence; vmw_fence_obj_unreference(&tmp); out_no_object: - ttm_mem_global_free(mem_glob, fman->user_fence_size); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 74fa41909213..40dc99337d5b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1188,7 +1188,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev, metadata.base_size.depth = 1; metadata.scanout = true; - ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out); + ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out); if (ret) { DRM_ERROR("Failed to allocate proxy content buffer\n"); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index f9394207dd3c..65f7c2bdc322 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -413,10 +413,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) * @mob: Pointer to the mob the pagetable of which we want to * populate. * - * This function allocates memory to be used for the pagetable, and - * adjusts TTM memory accounting accordingly. Returns ENOMEM if - * memory resources aren't sufficient and may cause TTM buffer objects - * to be swapped out by using the TTM memory accounting function. + * This function allocates memory to be used for the pagetable. + * Returns ENOMEM if memory resources aren't sufficient and may + * cause TTM buffer objects to be swapped out. */ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 922317d1acc8..7bc99b1279f7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -57,7 +57,6 @@ enum vmw_bo_dirty_method { * @ref_count: Reference count for this structure * @bitmap_size: The size of the bitmap in bits. Typically equal to the * nuber of pages in the bo. - * @size: The accounting size for this struct. * @bitmap: A bitmap where each bit represents a page. A set bit means a * dirty page. */ @@ -68,7 +67,6 @@ struct vmw_bo_dirty { unsigned int change_count; unsigned int ref_count; unsigned long bitmap_size; - size_t size; unsigned long bitmap[]; }; @@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; pgoff_t num_pages = vbo->base.resource->num_pages; - size_t size, acc_size; + size_t size; int ret; - static struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; if (dirty) { dirty->ref_count++; @@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) } size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); - acc_size = ttm_round_pot(size); - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (ret) { - VMW_DEBUG_USER("Out of graphics memory for buffer object " - "dirty tracker.\n"); - return ret; - } dirty = kvzalloc(size, GFP_KERNEL); if (!dirty) { ret = -ENOMEM; goto out_no_dirty; } - dirty->size = acc_size; dirty->bitmap_size = num_pages; dirty->start = dirty->bitmap_size; dirty->end = 0; @@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) return 0; out_no_dirty: - ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; } @@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) struct vmw_bo_dirty *dirty = vbo->dirty; if (dirty && --dirty->ref_count == 0) { - size_t acc_size = dirty->size; - kvfree(dirty); - ttm_mem_global_free(&ttm_mem_glob, acc_size); vbo->dirty = NULL; } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index b8dd62529104..a1e47f39a60b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -53,10 +53,6 @@ struct vmw_dx_shader { struct list_head cotable_head; }; -static uint64_t vmw_user_shader_size; -static uint64_t vmw_shader_size; -static size_t vmw_shader_dx_size; - static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_shader_base_to_res(struct ttm_base_object *base); @@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); -static uint64_t vmw_user_shader_size; static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, @@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, * * @res: The shader resource * - * Frees the DX shader resource and updates memory accounting. + * Frees the DX shader resource. */ static void vmw_dx_shader_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); vmw_resource_unreference(&shader->cotable); kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); } /** @@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_shader *shader; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_shader_dx_size) - vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); - if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, - &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - return ret; - } - shader = kmalloc(sizeof(*shader), GFP_KERNEL); if (!shader) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); return -ENOMEM; } @@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res) { struct vmw_user_shader *ushader = container_of(res, struct vmw_user_shader, shader.res); - struct vmw_private *dev_priv = res->dev_priv; ttm_base_object_kfree(ushader, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); } static void vmw_shader_free(struct vmw_resource *res) { struct vmw_shader *shader = vmw_res_to_shader(res); - struct vmw_private *dev_priv = res->dev_priv; kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); } /* @@ -722,31 +692,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_user_shader_size == 0)) - vmw_user_shader_size = - ttm_round_pot(sizeof(struct vmw_user_shader)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out; - } - ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); if (unlikely(!ushader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); ret = -ENOMEM; goto out; } @@ -793,31 +742,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, { struct vmw_shader *shader; struct vmw_resource *res; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_shader_size == 0)) - vmw_shader_size = - ttm_round_pot(sizeof(struct vmw_shader)) + - VMW_IDA_ACC_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out_err; - } - shader = kzalloc(sizeof(*shader), GFP_KERNEL); if (unlikely(!shader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); ret = -ENOMEM; goto out_err; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c index 33b69a70cfe3..76473d434f52 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c @@ -32,12 +32,10 @@ * struct vmw_user_simple_resource - User-space simple resource struct * * @base: The TTM base object implementing user-space visibility. - * @account_size: How much memory was accounted for this object. * @simple: The embedded struct vmw_simple_resource. */ struct vmw_user_simple_resource { struct ttm_base_object base; - size_t account_size; struct vmw_simple_resource simple; /* * Nothing to be placed after @simple, since size of @simple is @@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv, * * @res: The struct vmw_resource member of the simple resource object. * - * Frees memory and memory accounting for the object. + * Frees memory for the object. */ static void vmw_simple_resource_free(struct vmw_resource *res) { struct vmw_user_simple_resource *usimple = container_of(res, struct vmw_user_simple_resource, simple.res); - struct vmw_private *dev_priv = res->dev_priv; - size_t size = usimple->account_size; ttm_base_object_kfree(usimple, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, struct vmw_resource *res; struct vmw_resource *tmp; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; size_t alloc_size; - size_t account_size; int ret; alloc_size = offsetof(struct vmw_user_simple_resource, simple) + func->size; - account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE + - TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size, - &ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for %s" - " creation.\n", func->res_func.type_name); - - goto out_ret; - } usimple = kzalloc(alloc_size, GFP_KERNEL); if (!usimple) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - account_size); ret = -ENOMEM; goto out_ret; } usimple->simple.func = func; - usimple->account_size = account_size; res = &usimple->simple.res; usimple->base.shareable = false; usimple->base.tfile = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 9efb4463ce99..9739ffde007c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) * * @res: Pointer to a struct vmw_resource * - * Frees memory and memory accounting held by a struct vmw_view. + * Frees memory held by the struct vmw_view. */ static void vmw_view_res_free(struct vmw_resource *res) { struct vmw_view *view = vmw_view(res); - size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; - struct vmw_private *dev_priv = res->dev_priv; vmw_resource_unreference(&view->cotable); vmw_resource_unreference(&view->srf); kfree_rcu(view, rcu); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_resource *res; struct vmw_view *view; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; size_t size; int ret; @@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, size = offsetof(struct vmw_view, cmd) + cmd_size; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for view creation\n"); - return ret; - } - view = kmalloc(size, GFP_KERNEL); if (!view) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index d85310b2608d..8f0d651d0144 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, } if (!vps->surf) { - ret = vmw_gb_surface_define(dev_priv, 0, &metadata, + ret = vmw_gb_surface_define(dev_priv, &metadata, &vps->surf); if (ret != 0) { DRM_ERROR("Couldn't allocate STDU surface.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c index c8efa4a6c995..2de97419d5c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c @@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); -static size_t vmw_streamoutput_size; - static const struct vmw_res_func vmw_dx_streamoutput_func = { .res_type = vmw_res_streamoutput, .needs_backup = true, @@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, static void vmw_dx_streamoutput_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); vmw_resource_unreference(&so->cotable); kfree(so); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size); } static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res) @@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_streamoutput *so; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_streamoutput_size) - vmw_streamoutput_size = ttm_round_pot(sizeof(*so)); - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_streamoutput_size, &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for streamout.\n"); - return ret; - } - so = kmalloc(sizeof(*so), GFP_KERNEL); if (!so) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_streamoutput_size); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5d53a5f9d123..ab207de64c31 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -45,14 +45,12 @@ * @prime: The TTM prime object. * @base: The TTM base object handling user-space visibility. * @srf: The surface metadata. - * @size: TTM accounting size for the surface. * @master: Master of the creating client. Used for security check. * @backup_base: The TTM base object of the backup buffer. */ struct vmw_user_surface { struct ttm_prime_object prime; struct vmw_surface srf; - uint32_t size; struct drm_master *master; struct ttm_base_object *backup_base; }; @@ -74,13 +72,11 @@ struct vmw_surface_offset { /** * struct vmw_surface_dirty - Surface dirty-tracker * @cache: Cached layout information of the surface. - * @size: Accounting size for the struct vmw_surface_dirty. * @num_subres: Number of subresources. * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. */ struct vmw_surface_dirty { struct vmw_surface_cache cache; - size_t size; u32 num_subres; SVGA3dBox boxes[]; }; @@ -129,9 +125,6 @@ static const struct vmw_user_resource_conv user_surface_conv = { const struct vmw_user_resource_conv *user_surface_converter = &user_surface_conv; - -static uint64_t vmw_user_surface_size; - static const struct vmw_res_func vmw_legacy_surface_func = { .res_type = vmw_res_surface, .needs_backup = false, @@ -359,7 +352,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, * vmw_surface. * * Destroys a the device surface associated with a struct vmw_surface if - * any, and adjusts accounting and resource count accordingly. + * any, and adjusts resource count accordingly. */ static void vmw_hw_surface_destroy(struct vmw_resource *res) { @@ -666,8 +659,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_surface *srf = vmw_res_to_srf(res); struct vmw_user_surface *user_srf = container_of(srf, struct vmw_user_surface, srf); - struct vmw_private *dev_priv = srf->res.dev_priv; - uint32_t size = user_srf->size; WARN_ON_ONCE(res->dirty); if (user_srf->master) @@ -676,7 +667,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) kfree(srf->metadata.sizes); kfree(srf->snooper.image); ttm_prime_object_kfree(user_srf, prime); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -740,23 +730,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_vmw_surface_create_req *req = &arg->req; struct drm_vmw_surface_arg *rep = &arg->rep; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; int i, j; uint32_t cur_bo_offset; struct drm_vmw_size *cur_size; struct vmw_surface_offset *cur_offset; uint32_t num_sizes; - uint32_t size; const SVGA3dSurfaceDesc *desc; - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - num_sizes = 0; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) @@ -768,10 +749,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, num_sizes == 0) return -EINVAL; - size = vmw_user_surface_size + - ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + - ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); - desc = vmw_surface_get_desc(req->format); if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { VMW_DEBUG_USER("Invalid format %d for surface creation.\n", @@ -779,18 +756,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - size, &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface.\n"); - goto out_unlock; - } - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); if (unlikely(!user_srf)) { ret = -ENOMEM; - goto out_no_user_srf; + goto out_unlock; } srf = &user_srf->srf; @@ -805,7 +774,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, memcpy(metadata->mip_levels, req->mip_levels, sizeof(metadata->mip_levels)); metadata->num_sizes = num_sizes; - user_srf->size = size; metadata->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long) req->size_addr, @@ -916,8 +884,6 @@ out_no_offsets: kfree(metadata->sizes); out_no_sizes: ttm_prime_object_kfree(user_srf, prime); -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: return ret; } @@ -1459,7 +1425,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, struct vmw_resource *res; struct vmw_resource *tmp; int ret = 0; - uint32_t size; uint32_t backup_handle = 0; SVGA3dSurfaceAllFlags svga3d_flags_64 = SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, @@ -1506,12 +1471,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, return -EINVAL; } - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - size = vmw_user_surface_size; - metadata.flags = svga3d_flags_64; metadata.format = req->base.format; metadata.mip_levels[0] = req->base.mip_levels; @@ -1526,7 +1485,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, drm_vmw_surface_flag_scanout; /* Define a surface based on the parameters. */ - ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf); + ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); if (ret != 0) { VMW_DEBUG_USER("Failed to define surface.\n"); return ret; @@ -1955,11 +1914,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) u32 num_mip; u32 num_subres; u32 num_samples; - size_t dirty_size, acc_size; - static struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + size_t dirty_size; int ret; if (metadata->array_size) @@ -1973,14 +1928,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_subres = num_layers * num_mip; dirty_size = struct_size(dirty, boxes, num_subres); - acc_size = ttm_round_pot(dirty_size); - ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), - acc_size, &ctx); - if (ret) { - VMW_DEBUG_USER("Out of graphics memory for surface " - "dirty tracker.\n"); - return ret; - } dirty = kvzalloc(dirty_size, GFP_KERNEL); if (!dirty) { @@ -1990,13 +1937,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_samples = max_t(u32, 1, metadata->multisample_count); ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, - num_mip, num_layers, num_samples, - &dirty->cache); + num_mip, num_layers, num_samples, + &dirty->cache); if (ret) goto out_no_cache; dirty->num_subres = num_subres; - dirty->size = acc_size; res->dirty = (struct vmw_resource_dirty *) dirty; return 0; @@ -2004,7 +1950,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) out_no_cache: kvfree(dirty); out_no_dirty: - ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); return ret; } @@ -2015,10 +1960,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res) { struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; - size_t acc_size = dirty->size; kvfree(dirty); - ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); res->dirty = NULL; } @@ -2051,8 +1994,6 @@ static int vmw_surface_clean(struct vmw_resource *res) * vmw_gb_surface_define - Define a private GB surface * * @dev_priv: Pointer to a device private. - * @user_accounting_size: Used to track user-space memory usage, set - * to 0 for kernel mode only memory * @metadata: Metadata representing the surface to create. * @user_srf_out: allocated user_srf. Set to NULL on failure. * @@ -2062,17 +2003,12 @@ static int vmw_surface_clean(struct vmw_resource *res) * it available to user mode drivers. */ int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out) { struct vmw_surface_metadata *metadata; struct vmw_user_surface *user_srf; struct vmw_surface *srf; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; u32 sample_count = 1; u32 num_layers = 1; int ret; @@ -2113,22 +2049,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, if (req->sizes != NULL) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - user_accounting_size, &ctx); - if (ret != 0) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface.\n"); - goto out_unlock; - } - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); if (unlikely(!user_srf)) { ret = -ENOMEM; - goto out_no_user_srf; + goto out_unlock; } *srf_out = &user_srf->srf; - user_srf->size = user_accounting_size; user_srf->prime.base.shareable = false; user_srf->prime.base.tfile = NULL; @@ -2179,9 +2106,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, return ret; -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size); - out_unlock: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index b15228e7dbeb..a3d39e6b62b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -175,7 +175,6 @@ struct vmw_ttm_tt { int mem_type; struct sg_table sgt; struct vmw_sg_table vsgt; - uint64_t sg_alloc_size; bool mapped; bool bound; }; @@ -300,17 +299,10 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) { struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct vmw_sg_table *vsgt = &vmw_tt->vsgt; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; struct vmw_piter iter; dma_addr_t old; int ret = 0; - static size_t sgl_size; - static size_t sgt_size; if (vmw_tt->mapped) return 0; @@ -324,15 +316,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate: - if (unlikely(!sgl_size)) { - sgl_size = ttm_round_pot(sizeof(struct scatterlist)); - sgt_size = ttm_round_pot(sizeof(struct sg_table)); - } - vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; - ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); - if (unlikely(ret != 0)) - return ret; - ret = sg_alloc_table_from_pages_segment( &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, (unsigned long)vsgt->num_pages << PAGE_SHIFT, @@ -340,15 +323,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (ret) goto out_sg_alloc_fail; - if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { - uint64_t over_alloc = - sgl_size * (vsgt->num_pages - - vmw_tt->sgt.orig_nents); - - ttm_mem_global_free(glob, over_alloc); - vmw_tt->sg_alloc_size -= over_alloc; - } - ret = vmw_ttm_map_for_dma(vmw_tt); if (unlikely(ret != 0)) goto out_map_fail; @@ -375,7 +349,6 @@ out_map_fail: sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; out_sg_alloc_fail: - ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); return ret; } @@ -401,8 +374,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) vmw_ttm_unmap_from_dma(vmw_tt); sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_tt->sg_alloc_size); break; default: break; @@ -522,7 +493,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) static int vmw_ttm_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - unsigned int i; int ret; /* TODO: maybe completely drop this ? */ @@ -530,22 +500,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev, return 0; ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); - if (ret) - return ret; - - for (i = 0; i < ttm->num_pages; ++i) { - ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE, ctx); - if (ret) - goto error; - } - return 0; -error: - while (i--) - ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE); - ttm_pool_free(&bdev->pool, ttm); return ret; } @@ -554,7 +509,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm); - unsigned int i; vmw_ttm_unbind(bdev, ttm); @@ -565,10 +519,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, vmw_ttm_unmap_dma(vmw_tt); - for (i = 0; i < ttm->num_pages; ++i) - ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE); - ttm_pool_free(&bdev->pool, ttm); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 0a4c340252ec..98459db70962 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -99,38 +99,3 @@ out_unref: return ret; } -/* struct vmw_validation_mem callback */ -static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size) -{ - static struct ttm_operation_ctx ctx = {.interruptible = false, - .no_wait_gpu = false}; - struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); - - return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx); -} - -/* struct vmw_validation_mem callback */ -static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size) -{ - struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); - - return ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -} - -/** - * vmw_validation_mem_init_ttm - Interface the validation memory tracker - * to ttm. - * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private - * rather than a struct vmw_validation_mem is to make sure assumption in the - * callbacks that struct vmw_private derives from struct vmw_validation_mem - * holds true. - * @gran: The recommended allocation granularity - */ -void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran) -{ - struct vmw_validation_mem *vvm = &dev_priv->vvm; - - vvm->reserve_mem = vmw_vmt_reserve; - vvm->unreserve_mem = vmw_vmt_unreserve; - vvm->gran = gran; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index 41b7417cb5d3..f46891012be3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -29,6 +29,9 @@ #include "vmwgfx_validation.h" #include "vmwgfx_drv.h" + +#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) + /** * struct vmw_validation_bo_node - Buffer object validation metadata. * @base: Metadata used for TTM reservation- and validation. @@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, struct page *page; if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { - int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran); - - if (ret) - return NULL; - - ctx->vm_size_left += ctx->vm->gran; - ctx->total_mem += ctx->vm->gran; + ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN; + ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN; } page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) ctx->mem_size_left = 0; if (ctx->vm && ctx->total_mem) { - ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem); ctx->total_mem = 0; ctx->vm_size_left = 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 495fd504b8c6..f21df053882b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -40,21 +40,6 @@ #define VMW_RES_DIRTY_CLEAR BIT(1) /** - * struct vmw_validation_mem - Custom interface to provide memory reservations - * for the validation code. - * @reserve_mem: Callback to reserve memory - * @unreserve_mem: Callback to unreserve memory - * @gran: Reservation granularity. Contains a hint how much memory should - * be reserved in each call to @reserve_mem(). A slow implementation may want - * reservation to be done in large batches. - */ -struct vmw_validation_mem { - int (*reserve_mem)(struct vmw_validation_mem *m, size_t size); - void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size); - size_t gran; -}; - -/** * struct vmw_validation_context - Per command submission validation context * @ht: Hash table used to find resource- or buffer object duplicates * @resource_list: List head for resource validation metadata @@ -130,21 +115,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx) } /** - * vmw_validation_set_val_mem - Register a validation mem object for - * validation memory reservation - * @ctx: The validation context - * @vm: Pointer to a struct vmw_validation_mem - * - * Must be set before the first attempt to allocate validation memory. - */ -static inline void -vmw_validation_set_val_mem(struct vmw_validation_context *ctx, - struct vmw_validation_mem *vm) -{ - ctx->vm = vm; -} - -/** * vmw_validation_set_ht - Register a hash table for duplicate finding * @ctx: The validation context * @ht: Pointer to a hash table to use for duplicate finding @@ -191,22 +161,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx, } /** - * vmw_validation_context_init - Initialize a validation context - * @ctx: Pointer to the validation context to initialize - * - * This function initializes a validation context with @merge_dups set - * to false - */ -static inline void -vmw_validation_context_init(struct vmw_validation_context *ctx) -{ - memset(ctx, 0, sizeof(*ctx)); - INIT_LIST_HEAD(&ctx->resource_list); - INIT_LIST_HEAD(&ctx->resource_ctx_list); - INIT_LIST_HEAD(&ctx->bo_list); -} - -/** * vmw_validation_align - Align a validation memory allocation * @val: The size to be aligned * |