summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPauli Nieminen <suokkos@gmail.com>2010-04-01 12:45:03 +0000
committerDave Airlie <airlied@redhat.com>2010-04-06 11:36:18 +1000
commitc96af79e3463d5d3f865625baa8bb8aa4c0944a0 (patch)
treed3ed3dd9a4a9e8beda500f51213bd0c7e5c28884
parent975efdb1bf925ad48d4e3fe5339a85f12601e10d (diff)
downloadlinux-c96af79e3463d5d3f865625baa8bb8aa4c0944a0.tar.gz
linux-c96af79e3463d5d3f865625baa8bb8aa4c0944a0.tar.bz2
linux-c96af79e3463d5d3f865625baa8bb8aa4c0944a0.zip
drm/ttm: Add sysfs interface to control pool allocator.
Sysfs interface allows user to configure pool allocator functionality and change limits for the size of pool. Signed-off-by: Pauli Nieminen <suokkos@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c113
-rw-r--r--include/drm/ttm/ttm_page_alloc.h2
3 files changed, 114 insertions, 3 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index daff8a87977e..5e3f177323cb 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -393,7 +393,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
- ttm_page_alloc_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 57799dba35e0..6ca9b27e33d5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -72,6 +72,12 @@ struct ttm_page_pool {
unsigned long nrefills;
};
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialiazation to access them is pointless.
+ */
+
struct ttm_pool_opts {
unsigned alloc_size;
unsigned max_size;
@@ -94,6 +100,7 @@ struct ttm_pool_opts {
* @pools: All pool objects in use.
**/
struct ttm_pool_manager {
+ struct kobject kobj;
struct shrinker mm_shrink;
atomic_t page_alloc_inited;
struct ttm_pool_opts options;
@@ -109,6 +116,100 @@ struct ttm_pool_manager {
};
};
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ (void)m;
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR "[ttm] Setting allocation size to %lu "
+ "is not allowed. Recomended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING "[ttm] Setting allocation size to "
+ "larger than %lu is not recomended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
static struct ttm_pool_manager _manager = {
.page_alloc_inited = ATOMIC_INIT(0)
};
@@ -669,8 +770,9 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
pool->name = name;
}
-int ttm_page_alloc_init(unsigned max_pages)
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
+ int ret;
if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
return 0;
@@ -690,6 +792,13 @@ int ttm_page_alloc_init(unsigned max_pages)
_manager.options.small = SMALL_ALLOCATION;
_manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+ kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
+ ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager.kobj);
+ return ret;
+ }
+
ttm_pool_mm_shrink_init(&_manager);
return 0;
@@ -707,6 +816,8 @@ void ttm_page_alloc_fini()
for (i = 0; i < NUM_POOLS; ++i)
ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+
+ kobject_put(&_manager.kobj);
}
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 8b091c309df4..8bb4de567b2c 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -61,7 +61,7 @@ void ttm_put_pages(struct list_head *pages,
* multiple times but ttm_page_alloc_fini has to be called same number of
* times.
*/
-int ttm_page_alloc_init(unsigned max_pages);
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
* Free pool allocator.
*/