Xenomai API  2.6.5
heap.h
1 /*
2  * @note Copyright (C) 2001,2002,2003 Philippe Gerum <[email protected]>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  *
19  * \ingroup heap
20  */
21 
22 #ifndef _XENO_NUCLEUS_HEAP_H
23 #define _XENO_NUCLEUS_HEAP_H
24 
25 #include <nucleus/queue.h>
26 
27 /*
28  * CONSTRAINTS:
29  *
30  * Minimum page size is 2 ** XNHEAP_MINLOG2 (must be large enough to
31  * hold a pointer).
32  *
33  * Maximum page size is 2 ** XNHEAP_MAXLOG2.
34  *
35  * Minimum block size equals the minimum page size.
36  *
37  * Requested block size smaller than the minimum block size is
38  * rounded to the minimum block size.
39  *
40  * Requested block size larger than 2 times the page size is rounded
41  * to the next page boundary and obtained from the free page
42  * list. So we need a bucket for each power of two between
43  * XNHEAP_MINLOG2 and XNHEAP_MAXLOG2 inclusive, plus one to honor
44  * requests ranging from the maximum page size to twice this size.
45  */
46 
47 #if defined(__KERNEL__) || defined(__XENO_SIM__)
48 
49 #define XNHEAP_PAGE_SIZE 512 /* A reasonable value for the xnheap page size */
50 #define XNHEAP_PAGE_MASK (~(XNHEAP_PAGE_SIZE-1))
51 #define XNHEAP_PAGE_ALIGN(addr) (((addr)+XNHEAP_PAGE_SIZE-1)&XNHEAP_PAGE_MASK)
52 
53 #define XNHEAP_MINLOG2 3
54 #define XNHEAP_MAXLOG2 22 /* Must hold pagemap::bcount objects */
55 #define XNHEAP_MINALLOCSZ (1 << XNHEAP_MINLOG2)
56 #define XNHEAP_MINALIGNSZ (1 << 4) /* i.e. 16 bytes */
57 #define XNHEAP_NBUCKETS (XNHEAP_MAXLOG2 - XNHEAP_MINLOG2 + 2)
58 #define XNHEAP_MAXEXTSZ (1 << 31) /* i.e. 2Gb */
59 
60 #define XNHEAP_PFREE 0
61 #define XNHEAP_PCONT 1
62 #define XNHEAP_PLIST 2
63 
64 #define XNHEAP_GFP_NONCACHED (1 << __GFP_BITS_SHIFT)
65 
66 struct xnpagemap {
67  unsigned int type : 8; /* PFREE, PCONT, PLIST or log2 */
68  unsigned int bcount : 24; /* Number of active blocks. */
69 };
70 
71 typedef struct xnextent {
72 
73  xnholder_t link;
74 
75 #define link2extent(ln) container_of(ln, xnextent_t, link)
76 
77  caddr_t membase, /* Base address of the page array */
78  memlim, /* Memory limit of page array */
79  freelist; /* Head of the free page list */
80 
81  struct xnpagemap pagemap[1]; /* Beginning of page map */
82 
83 } xnextent_t;
84 
85 typedef struct xnheap {
86 
87  xnholder_t link;
88 
89 #define link2heap(ln) container_of(ln, xnheap_t, link)
90 
91  u_long extentsize,
92  pagesize,
93  pageshift,
94  hdrsize,
95  npages, /* Number of pages per extent */
96  ubytes,
97  maxcont;
98 
99  xnqueue_t extents;
100 
101  DECLARE_XNLOCK(lock);
102 
103  struct xnbucket {
104  caddr_t freelist;
105  int fcount;
106  } buckets[XNHEAP_NBUCKETS];
107 
108  xnholder_t *idleq[XNARCH_NR_CPUS];
109 
110  xnarch_heapcb_t archdep;
111 
112  XNARCH_DECL_DISPLAY_CONTEXT();
113 
114  xnholder_t stat_link; /* Link in heapq */
115 
116  char label[XNOBJECT_NAME_LEN+16];
117 
118 } xnheap_t;
119 
120 extern xnheap_t kheap;
121 
122 #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
123 extern xnheap_t kstacks;
124 #endif
125 
126 #define xnheap_extentsize(heap) ((heap)->extentsize)
127 #define xnheap_page_size(heap) ((heap)->pagesize)
128 #define xnheap_page_count(heap) ((heap)->npages)
129 #define xnheap_usable_mem(heap) ((heap)->maxcont * countq(&(heap)->extents))
130 #define xnheap_used_mem(heap) ((heap)->ubytes)
131 #define xnheap_max_contiguous(heap) ((heap)->maxcont)
132 
133 static inline size_t xnheap_align(size_t size, size_t al)
134 {
135  /* The alignment value must be a power of 2 */
136  return ((size+al-1)&(~(al-1)));
137 }
138 
139 static inline size_t xnheap_external_overhead(size_t hsize, size_t psize)
140 {
141  size_t pages = (hsize + psize - 1) / psize;
142  return xnheap_align(sizeof(xnextent_t)
143  + pages * sizeof(struct xnpagemap), psize);
144 }
145 
146 static inline size_t xnheap_internal_overhead(size_t hsize, size_t psize)
147 {
148  /* o = (h - o) * m / p + e
149  o * p = (h - o) * m + e * p
150  o * (p + m) = h * m + e * p
151  o = (h * m + e *p) / (p + m)
152  */
153  return xnheap_align((sizeof(xnextent_t) * psize
154  + sizeof(struct xnpagemap) * hsize)
155  / (psize + sizeof(struct xnpagemap)), psize);
156 }
157 
158 #define xnmalloc(size) xnheap_alloc(&kheap,size)
159 #define xnfree(ptr) xnheap_free(&kheap,ptr)
160 #define xnfreesync() xnheap_finalize_free(&kheap)
161 #define xnfreesafe(thread, ptr, ln) \
162  do { \
163  if (xnpod_current_p(thread)) \
164  xnheap_schedule_free(&kheap, ptr, ln); \
165  else \
166  xnheap_free(&kheap,ptr); \
167  } while(0)
168 
169 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
170 {
171  /*
172  * Account for the minimum heap size (i.e. 2 * page size) plus
173  * overhead so that the actual heap space is large enough to
174  * match the requested size. Using a small page size for large
175  * single-block heaps might reserve a lot of useless page map
176  * memory, but this should never get pathological anyway,
177  * since we only consume 4 bytes per page.
178  */
179  if (hsize < 2 * psize)
180  hsize = 2 * psize;
181  hsize += xnheap_external_overhead(hsize, psize);
182  return xnheap_align(hsize, psize);
183 }
184 
185 #ifdef __cplusplus
186 extern "C" {
187 #endif
188 
189 /* Private interface. */
190 
191 #ifdef __KERNEL__
192 
193 int xnheap_mount(void);
194 
195 void xnheap_umount(void);
196 
197 void xnheap_init_proc(void);
198 
199 void xnheap_cleanup_proc(void);
200 
201 int xnheap_init_mapped(xnheap_t *heap,
202  u_long heapsize,
203  int memflags);
204 
205 void xnheap_destroy_mapped(xnheap_t *heap,
206  void (*release)(struct xnheap *heap),
207  void __user *mapaddr);
208 
209 #define xnheap_base_memory(heap) \
210  ((unsigned long)((heap)->archdep.heapbase))
211 
212 #define xnheap_mapped_offset(heap,ptr) \
213  (((caddr_t)(ptr)) - (caddr_t)xnheap_base_memory(heap))
214 
215 #define xnheap_mapped_address(heap,off) \
216  ((caddr_t)xnheap_base_memory(heap) + (off))
217 
218 #define xnheap_mapped_p(heap) \
219  (xnheap_base_memory(heap) != 0)
220 
221 #endif /* __KERNEL__ */
222 
223 /* Public interface. */
224 
225 int xnheap_init(xnheap_t *heap,
226  void *heapaddr,
227  u_long heapsize,
228  u_long pagesize);
229 
230 void xnheap_set_label(xnheap_t *heap, const char *name, ...);
231 
232 void xnheap_destroy(xnheap_t *heap,
233  void (*flushfn)(xnheap_t *heap,
234  void *extaddr,
235  u_long extsize,
236  void *cookie),
237  void *cookie);
238 
239 int xnheap_extend(xnheap_t *heap,
240  void *extaddr,
241  u_long extsize);
242 
243 void *xnheap_alloc(xnheap_t *heap,
244  u_long size);
245 
246 int xnheap_test_and_free(xnheap_t *heap,
247  void *block,
248  int (*ckfn)(void *block));
249 
250 int xnheap_free(xnheap_t *heap,
251  void *block);
252 
253 void xnheap_schedule_free(xnheap_t *heap,
254  void *block,
255  xnholder_t *link);
256 
257 void xnheap_finalize_free_inner(xnheap_t *heap,
258  int cpu);
259 
260 static inline void xnheap_finalize_free(xnheap_t *heap)
261 {
262  int cpu = xnarch_current_cpu();
263 
264  XENO_ASSERT(NUCLEUS,
265  spltest() != 0,
266  xnpod_fatal("%s called in unsafe context", __FUNCTION__));
267 
268  if (heap->idleq[cpu])
269  xnheap_finalize_free_inner(heap, cpu);
270 }
271 
272 int xnheap_check_block(xnheap_t *heap,
273  void *block);
274 
275 #ifdef __cplusplus
276 }
277 #endif
278 
279 #endif /* __KERNEL__ || __XENO_SIM__ */
280 
281 #define XNHEAP_DEV_NAME "/dev/rtheap"
282 #define XNHEAP_DEV_MINOR 254
283 
284 /* Possible arguments to the sys_heap_info syscall */
285 #define XNHEAP_PROC_PRIVATE_HEAP 0
286 #define XNHEAP_PROC_SHARED_HEAP 1
287 #define XNHEAP_SYS_HEAP 2
288 #define XNHEAP_SYS_STACKPOOL 3
289 
290 struct xnheap_desc {
291  unsigned long handle;
292  unsigned int size;
293  unsigned long area;
294  unsigned long used;
295 };
296 
297 #endif /* !_XENO_NUCLEUS_HEAP_H */
int xnheap_extend(xnheap_t *heap, void *extaddr, u_long extsize)
Extend a memory heap.
Definition: heap.c:966
void * xnheap_alloc(xnheap_t *heap, u_long size)
Allocate a memory block from a memory heap.
Definition: heap.c:570
int xnheap_test_and_free(xnheap_t *heap, void *block, int(*ckfn)(void *block))
Test and release a memory block to a memory heap.
Definition: heap.c:711
void xnheap_schedule_free(xnheap_t *heap, void *block, xnholder_t *link)
Schedule a memory block for release.
Definition: heap.c:1016
int xnheap_free(xnheap_t *heap, void *block)
Release a memory block to a memory heap.
Definition: heap.c:931
void xnheap_set_label(xnheap_t *heap, const char *name,...)
Set the heap's label string.
Definition: heap.c:365
int xnheap_init(xnheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize)
Initialize a memory heap.
Definition: heap.c:256