summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLi Yang <leoli@freescale.com>2007-06-18 19:29:21 +0800
committerKumar Gala <galak@kernel.crashing.org>2007-06-19 22:35:53 -0500
commit7c8545e98468c53809fc06788a3b9a34dff05240 (patch)
tree11f7cff9f7f0f67b04db8234c41de5d6bc871b4c /arch
parent7b7a57c77dccddd84b6aa02a38deee7ad97c977a (diff)
downloadlinux-7c8545e98468c53809fc06788a3b9a34dff05240.tar.gz
linux-7c8545e98468c53809fc06788a3b9a34dff05240.tar.bz2
linux-7c8545e98468c53809fc06788a3b9a34dff05240.zip
[POWERPC] rheap - eliminates internal fragments caused by alignment
The patch adds fragments caused by rh_alloc_align() back to free list, instead of allocating the whole chunk of memory. This will greatly improve memory utilization managed by rheap. It solves MURAM not enough problem with 3 UCCs enabled on MPC8323. Signed-off-by: Li Yang <leoli@freescale.com> Acked-by: Joakim Tjernlund <joakim.tjernlund@transmode.se> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/lib/rheap.c48
1 files changed, 29 insertions, 19 deletions
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 180ee2933ab9..2f24ea0d723a 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -437,27 +437,26 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch
struct list_head *l;
rh_block_t *blk;
rh_block_t *newblk;
- unsigned long start;
+ unsigned long start, sp_size;
/* Validate size, and alignment must be power of two */
if (size <= 0 || (alignment & (alignment - 1)) != 0)
return (unsigned long) -EINVAL;
- /* given alignment larger that default rheap alignment */
- if (alignment > info->alignment)
- size += alignment - 1;
-
/* Align to configured alignment */
size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
- if (assure_empty(info, 1) < 0)
+ if (assure_empty(info, 2) < 0)
return (unsigned long) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list);
- if (size <= blk->size)
- break;
+ if (size <= blk->size) {
+ start = (blk->start + alignment - 1) & ~(alignment - 1);
+ if (start + size <= blk->start + blk->size)
+ break;
+ }
blk = NULL;
}
@@ -470,25 +469,36 @@ unsigned long rh_alloc_align(rh_info_t * info, int size, int alignment, const ch
list_del(&blk->list);
newblk = blk;
} else {
+ /* Fragment caused, split if needed */
+ /* Create block for fragment in the beginning */
+ sp_size = start - blk->start;
+ if (sp_size) {
+ rh_block_t *spblk;
+
+ spblk = get_slot(info);
+ spblk->start = blk->start;
+ spblk->size = sp_size;
+ /* add before the blk */
+ list_add(&spblk->list, blk->list.prev);
+ }
newblk = get_slot(info);
- newblk->start = blk->start;
+ newblk->start = start;
newblk->size = size;
- /* blk still in free list, with updated start, size */
- blk->start += size;
- blk->size -= size;
+ /* blk still in free list, with updated start and size
+ * for fragment in the end */
+ blk->start = start + size;
+ blk->size -= sp_size + size;
+ /* No fragment in the end, remove blk */
+ if (blk->size == 0) {
+ list_del(&blk->list);
+ release_slot(info, blk);
+ }
}
newblk->owner = owner;
- start = newblk->start;
attach_taken_block(info, newblk);
- /* for larger alignment return fixed up pointer */
- /* this is no problem with the deallocator since */
- /* we scan for pointers that lie in the blocks */
- if (alignment > info->alignment)
- start = (start + alignment - 1) & ~(alignment - 1);
-
return start;
}