1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
|
/*
* Simple allocator for internal RAM in ETRAX FS
*
* Copyright (c) 2004 Axis Communications AB.
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <memmap.h>
#define STATUS_FREE 0
#define STATUS_ALLOCATED 1
#ifdef CONFIG_ETRAX_L2CACHE
#define RESERVED_SIZE 66*1024
#else
#define RESERVED_SIZE 0
#endif
struct intmem_allocation {
struct list_head entry;
unsigned int size;
unsigned offset;
char status;
};
static struct list_head intmem_allocations;
static void* intmem_virtual;
static void crisv32_intmem_init(void)
{
static int initiated = 0;
if (!initiated) {
struct intmem_allocation* alloc;
alloc = kmalloc(sizeof *alloc, GFP_KERNEL);
INIT_LIST_HEAD(&intmem_allocations);
intmem_virtual = ioremap(MEM_INTMEM_START + RESERVED_SIZE,
MEM_INTMEM_SIZE - RESERVED_SIZE);
initiated = 1;
alloc->size = MEM_INTMEM_SIZE - RESERVED_SIZE;
alloc->offset = 0;
alloc->status = STATUS_FREE;
list_add_tail(&alloc->entry, &intmem_allocations);
}
}
void* crisv32_intmem_alloc(unsigned size, unsigned align)
{
struct intmem_allocation* allocation;
struct intmem_allocation* tmp;
void* ret = NULL;
preempt_disable();
crisv32_intmem_init();
list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
int alignment = allocation->offset % align;
alignment = alignment ? align - alignment : alignment;
if (allocation->status == STATUS_FREE &&
allocation->size >= size + alignment) {
if (allocation->size > size + alignment) {
struct intmem_allocation* alloc;
alloc = kmalloc(sizeof *alloc, GFP_ATOMIC);
alloc->status = STATUS_FREE;
alloc->size = allocation->size - size -
alignment;
alloc->offset = allocation->offset + size +
alignment;
list_add(&alloc->entry, &allocation->entry);
if (alignment) {
struct intmem_allocation *tmp;
tmp = kmalloc(sizeof *tmp, GFP_ATOMIC);
tmp->offset = allocation->offset;
tmp->size = alignment;
tmp->status = STATUS_FREE;
allocation->offset += alignment;
list_add_tail(&tmp->entry,
&allocation->entry);
}
}
allocation->status = STATUS_ALLOCATED;
allocation->size = size;
ret = (void*)((int)intmem_virtual + allocation->offset);
}
}
preempt_enable();
return ret;
}
void crisv32_intmem_free(void* addr)
{
struct intmem_allocation* allocation;
struct intmem_allocation* tmp;
if (addr == NULL)
return;
preempt_disable();
crisv32_intmem_init();
list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
if (allocation->offset == (int)(addr - intmem_virtual)) {
struct intmem_allocation *prev =
list_entry(allocation->entry.prev,
struct intmem_allocation, entry);
struct intmem_allocation *next =
list_entry(allocation->entry.next,
struct intmem_allocation, entry);
allocation->status = STATUS_FREE;
/* Join with prev and/or next if also free */
if ((prev != &intmem_allocations) &&
(prev->status == STATUS_FREE)) {
prev->size += allocation->size;
list_del(&allocation->entry);
kfree(allocation);
allocation = prev;
}
if ((next != &intmem_allocations) &&
(next->status == STATUS_FREE)) {
allocation->size += next->size;
list_del(&next->entry);
kfree(next);
}
preempt_enable();
return;
}
}
preempt_enable();
}
void* crisv32_intmem_phys_to_virt(unsigned long addr)
{
return (void *)(addr - (MEM_INTMEM_START + RESERVED_SIZE) +
(unsigned long)intmem_virtual);
}
unsigned long crisv32_intmem_virt_to_phys(void* addr)
{
return (unsigned long)((unsigned long )addr -
(unsigned long)intmem_virtual + MEM_INTMEM_START +
RESERVED_SIZE);
}
module_init(crisv32_intmem_init);
|