00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #ifndef _XENO_NUCLEUS_HEAP_H
00023 #define _XENO_NUCLEUS_HEAP_H
00024
00025 #include <nucleus/queue.h>
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00048
00049 #define XNHEAP_MINLOG2 3
00050 #define XNHEAP_MAXLOG2 22
00051 #define XNHEAP_MINALLOCSZ (1 << XNHEAP_MINLOG2)
00052 #define XNHEAP_MINALIGNSZ (1 << 4)
00053 #define XNHEAP_NBUCKETS (XNHEAP_MAXLOG2 - XNHEAP_MINLOG2 + 2)
00054 #define XNHEAP_MAXEXTSZ (1 << 31)
00055
00056 #define XNHEAP_PFREE 0
00057 #define XNHEAP_PCONT 1
00058 #define XNHEAP_PLIST 2
00059
00060 #define XNHEAP_GFP_NONCACHED (1 << __GFP_BITS_SHIFT)
00061
00062 typedef struct xnextent {
00063
00064 xnholder_t link;
00065
00066 #define link2extent(ln) container_of(ln, xnextent_t, link)
00067
00068 caddr_t membase,
00069 memlim,
00070 freelist;
00071
00072 struct xnpagemap {
00073 unsigned int type : 8;
00074 unsigned int bcount : 24;
00075 } pagemap[1];
00076
00077 } xnextent_t;
00078
00079 typedef struct xnheap {
00080
00081 xnholder_t link;
00082
00083 #define link2heap(ln) container_of(ln, xnheap_t, link)
00084
00085 u_long extentsize,
00086 pagesize,
00087 pageshift,
00088 hdrsize,
00089 npages,
00090 ubytes,
00091 maxcont;
00092
00093 xnqueue_t extents;
00094
00095 DECLARE_XNLOCK(lock);
00096
00097 struct xnbucket {
00098 caddr_t freelist;
00099 int fcount;
00100 } buckets[XNHEAP_NBUCKETS];
00101
00102 xnholder_t *idleq;
00103
00104 xnarch_heapcb_t archdep;
00105
00106 XNARCH_DECL_DISPLAY_CONTEXT();
00107
00108 } xnheap_t;
00109
00110 extern xnheap_t kheap;
00111
00112 #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
00113 extern xnheap_t kstacks;
00114 #endif
00115
00116 #define xnheap_extentsize(heap) ((heap)->extentsize)
00117 #define xnheap_page_size(heap) ((heap)->pagesize)
00118 #define xnheap_page_count(heap) ((heap)->npages)
00119 #define xnheap_usable_mem(heap) ((heap)->maxcont * countq(&(heap)->extents))
00120 #define xnheap_used_mem(heap) ((heap)->ubytes)
00121 #define xnheap_max_contiguous(heap) ((heap)->maxcont)
00122
00123 static inline size_t xnheap_align(size_t size, size_t al)
00124 {
00125
00126 return ((size+al-1)&(~(al-1)));
00127 }
00128
00129 static inline size_t xnheap_external_overhead(size_t hsize, size_t psize)
00130 {
00131 size_t pages = (hsize + psize - 1) / psize;
00132 return xnheap_align(sizeof(xnextent_t)
00133 + pages * sizeof(struct xnpagemap), psize);
00134 }
00135
00136 static inline size_t xnheap_internal_overhead(size_t hsize, size_t psize)
00137 {
00138
00139
00140
00141
00142
00143 return xnheap_align((sizeof(xnextent_t) * psize
00144 + sizeof(struct xnpagemap) * hsize)
00145 / (psize + sizeof(struct xnpagemap)), psize);
00146 }
00147
00148 #define xnmalloc(size) xnheap_alloc(&kheap,size)
00149 #define xnfree(ptr) xnheap_free(&kheap,ptr)
00150 #define xnfreesync() xnheap_finalize_free(&kheap)
00151 #define xnfreesafe(thread,ptr,ln) \
00152 do { \
00153 if (xnpod_current_p(thread)) \
00154 xnheap_schedule_free(&kheap,ptr,ln); \
00155 else \
00156 xnheap_free(&kheap,ptr); \
00157 } while(0)
00158
00159 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
00160 {
00161
00162
00163
00164
00165
00166
00167
00168
00169 if (hsize < 2 * psize)
00170 hsize = 2 * psize;
00171 hsize += xnheap_external_overhead(hsize, psize);
00172 return xnheap_align(hsize, psize);
00173 }
00174
00175 #ifdef __cplusplus
00176 extern "C" {
00177 #endif
00178
00179
00180
00181 #ifdef __KERNEL__
00182
00183 #define XNHEAP_DEV_MINOR 254
00184
00185 int xnheap_mount(void);
00186
00187 void xnheap_umount(void);
00188
00189 int xnheap_init_mapped(xnheap_t *heap,
00190 u_long heapsize,
00191 int memflags);
00192
00193 int xnheap_destroy_mapped(xnheap_t *heap,
00194 void (*release)(struct xnheap *heap),
00195 void __user *mapaddr);
00196
00197 #define xnheap_mapped_offset(heap,ptr) \
00198 (((caddr_t)(ptr)) - ((caddr_t)(heap)->archdep.heapbase))
00199
00200 #define xnheap_mapped_address(heap,off) \
00201 (((caddr_t)(heap)->archdep.heapbase) + (off))
00202
00203 #define xnheap_mapped_p(heap) \
00204 ((heap)->archdep.heapbase != NULL)
00205
00206 #endif
00207
00208
00209
00210 int xnheap_init(xnheap_t *heap,
00211 void *heapaddr,
00212 u_long heapsize,
00213 u_long pagesize);
00214
00215 int xnheap_destroy(xnheap_t *heap,
00216 void (*flushfn)(xnheap_t *heap,
00217 void *extaddr,
00218 u_long extsize,
00219 void *cookie),
00220 void *cookie);
00221
00222 int xnheap_extend(xnheap_t *heap,
00223 void *extaddr,
00224 u_long extsize);
00225
00226 void *xnheap_alloc(xnheap_t *heap,
00227 u_long size);
00228
00229 int xnheap_test_and_free(xnheap_t *heap,
00230 void *block,
00231 int (*ckfn)(void *block));
00232
00233 int xnheap_free(xnheap_t *heap,
00234 void *block);
00235
00236 void xnheap_schedule_free(xnheap_t *heap,
00237 void *block,
00238 xnholder_t *link);
00239
00240 void xnheap_finalize_free_inner(xnheap_t *heap);
00241
00242 static inline void xnheap_finalize_free(xnheap_t *heap)
00243 {
00244 if (heap->idleq)
00245 xnheap_finalize_free_inner(heap);
00246 }
00247
00248 int xnheap_check_block(xnheap_t *heap,
00249 void *block);
00250
00251 #ifdef __cplusplus
00252 }
00253 #endif
00254
00255 #endif
00256
00257 #define XNHEAP_DEV_NAME "/dev/rtheap"
00258
00259 #endif