summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiam R. Howlett <Liam.Howlett@Oracle.com>2022-09-06 19:48:40 +0000
committerAndrew Morton <akpm@linux-foundation.org>2022-09-26 19:46:14 -0700
commit000a449345bbb4ffbd880f7143b5fb4acac34121 (patch)
tree3d061299215f6463b2b43fa37041b627939fa900
parente73cb368be374165b531d5ab1e74596b98e766a8 (diff)
downloadlinux-stable-000a449345bbb4ffbd880f7143b5fb4acac34121.tar.gz
linux-stable-000a449345bbb4ffbd880f7143b5fb4acac34121.tar.bz2
linux-stable-000a449345bbb4ffbd880f7143b5fb4acac34121.zip
radix tree test suite: add allocation counts and size to kmem_cache
Add functions to get the number of allocations, and total allocations from a kmem_cache. Also add a function to get the allocated size and a way to zero the total allocations. Link: https://lkml.kernel.org/r/20220906194824.2110408-5-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Tested-by: Yu Zhao <yuzhao@google.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: SeongJae Park <sj@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--tools/testing/radix-tree/linux.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 277aa8b70abc..f20529ae4dbe 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -24,6 +24,8 @@ struct kmem_cache {
void *objs;
void (*ctor)(void *);
unsigned int non_kernel;
+ unsigned long nr_allocated;
+ unsigned long nr_tallocated;
};
void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
@@ -31,9 +33,28 @@ void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
cachep->non_kernel = val;
}
+unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
+{
+ return cachep->size * cachep->nr_allocated;
+}
+
+unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
+{
+ return cachep->nr_allocated;
+}
+
+unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
+{
+ return cachep->nr_tallocated;
+}
+
+void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
+{
+ cachep->nr_tallocated = 0;
+}
+
void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
int gfp)
-
{
void *p;
@@ -64,7 +85,9 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
memset(p, 0, cachep->size);
}
+ uatomic_inc(&cachep->nr_allocated);
uatomic_inc(&nr_allocated);
+ uatomic_inc(&cachep->nr_tallocated);
if (kmalloc_verbose)
printf("Allocating %p from slab\n", p);
return p;
@@ -74,6 +97,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
assert(objp);
uatomic_dec(&nr_allocated);
+ uatomic_dec(&cachep->nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
pthread_mutex_lock(&cachep->lock);
@@ -99,6 +123,8 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
ret->size = size;
ret->align = align;
ret->nr_objs = 0;
+ ret->nr_allocated = 0;
+ ret->nr_tallocated = 0;
ret->objs = NULL;
ret->ctor = ctor;
ret->non_kernel = 0;