Xenomai API  2.5.6.1
include/nucleus/heap.h
00001 /*
00002  * @note Copyright (C) 2001,2002,2003 Philippe Gerum <[email protected]>.
00003  *
00004  * Xenomai is free software; you can redistribute it and/or modify
00005  * it under the terms of the GNU General Public License as published
00006  * by the Free Software Foundation; either version 2 of the License,
00007  * or (at your option) any later version.
00008  *
00009  * Xenomai is distributed in the hope that it will be useful, but
00010  * WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00012  * General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with Xenomai; if not, write to the Free Software
00016  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
00017  * 02111-1307, USA.
00018  *
00019  * \ingroup heap
00020  */
00021 
00022 #ifndef _XENO_NUCLEUS_HEAP_H
00023 #define _XENO_NUCLEUS_HEAP_H
00024 
00025 #include <nucleus/queue.h>
00026 
00027 /*
00028  * CONSTRAINTS:
00029  *
00030  * Minimum page size is 2 ** XNHEAP_MINLOG2 (must be large enough to
00031  * hold a pointer).
00032  *
00033  * Maximum page size is 2 ** XNHEAP_MAXLOG2.
00034  *
00035  * Minimum block size equals the minimum page size.
00036  *
00037  * Requested block size smaller than the minimum block size is
00038  * rounded to the minimum block size.
00039  *
00040  * Requested block size larger than 2 times the page size is rounded
00041  * to the next page boundary and obtained from the free page
00042  * list. So we need a bucket for each power of two between
00043  * XNHEAP_MINLOG2 and XNHEAP_MAXLOG2 inclusive, plus one to honor
00044  * requests ranging from the maximum page size to twice this size.
00045  */
00046 
00047 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00048 
00049 #define XNHEAP_PAGE_SIZE        512 /* A reasonable value for the xnheap page size */
00050 #define XNHEAP_PAGE_MASK        (~(XNHEAP_PAGE_SIZE-1))
00051 #define XNHEAP_PAGE_ALIGN(addr) (((addr)+XNHEAP_PAGE_SIZE-1)&XNHEAP_PAGE_MASK)
00052 
00053 #define XNHEAP_MINLOG2    3
00054 #define XNHEAP_MAXLOG2    22    /* Must hold pagemap::bcount objects */
00055 #define XNHEAP_MINALLOCSZ (1 << XNHEAP_MINLOG2)
00056 #define XNHEAP_MINALIGNSZ (1 << 4) /* i.e. 16 bytes */
00057 #define XNHEAP_NBUCKETS   (XNHEAP_MAXLOG2 - XNHEAP_MINLOG2 + 2)
00058 #define XNHEAP_MAXEXTSZ   (1 << 31) /* i.e. 2Gb */
00059 
00060 #define XNHEAP_PFREE   0
00061 #define XNHEAP_PCONT   1
00062 #define XNHEAP_PLIST   2
00063 
00064 #define XNHEAP_GFP_NONCACHED (1 << __GFP_BITS_SHIFT)
00065 
00066 struct xnpagemap {
00067         unsigned int type : 8;    /* PFREE, PCONT, PLIST or log2 */
00068         unsigned int bcount : 24; /* Number of active blocks. */
00069 };
00070 
00071 typedef struct xnextent {
00072 
00073         xnholder_t link;
00074 
00075 #define link2extent(ln) container_of(ln, xnextent_t, link)
00076 
00077         caddr_t membase,        /* Base address of the page array */
00078                 memlim,         /* Memory limit of page array */
00079                 freelist;       /* Head of the free page list */
00080 
00081         struct xnpagemap pagemap[1];    /* Beginning of page map */
00082 
00083 } xnextent_t;
00084 
00085 typedef struct xnheap {
00086 
00087         xnholder_t link;
00088 
00089 #define link2heap(ln)           container_of(ln, xnheap_t, link)
00090 
00091         u_long extentsize,
00092                 pagesize,
00093                 pageshift,
00094                 hdrsize,
00095                 npages,         /* Number of pages per extent */
00096                 ubytes,
00097                 maxcont;
00098 
00099         xnqueue_t extents;
00100 
00101         DECLARE_XNLOCK(lock);
00102 
00103         struct xnbucket {
00104                 caddr_t freelist;
00105                 int fcount;
00106         } buckets[XNHEAP_NBUCKETS];
00107 
00108         xnholder_t *idleq[XNARCH_NR_CPUS];
00109 
00110         xnarch_heapcb_t archdep;
00111 
00112         XNARCH_DECL_DISPLAY_CONTEXT();
00113 
00114         xnholder_t stat_link;   /* Link in heapq */
00115 
00116         char label[XNOBJECT_NAME_LEN+16];
00117 
00118 } xnheap_t;
00119 
00120 extern xnheap_t kheap;
00121 
00122 #if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
00123 extern xnheap_t kstacks;
00124 #endif
00125 
00126 #define xnheap_extentsize(heap)         ((heap)->extentsize)
00127 #define xnheap_page_size(heap)          ((heap)->pagesize)
00128 #define xnheap_page_count(heap)         ((heap)->npages)
00129 #define xnheap_usable_mem(heap)         ((heap)->maxcont * countq(&(heap)->extents))
00130 #define xnheap_used_mem(heap)           ((heap)->ubytes)
00131 #define xnheap_max_contiguous(heap)     ((heap)->maxcont)
00132 
00133 static inline size_t xnheap_align(size_t size, size_t al)
00134 {
00135         /* The alignment value must be a power of 2 */
00136         return ((size+al-1)&(~(al-1)));
00137 }
00138 
00139 static inline size_t xnheap_external_overhead(size_t hsize, size_t psize)
00140 {
00141         size_t pages = (hsize + psize - 1) / psize;
00142         return xnheap_align(sizeof(xnextent_t)
00143                             + pages * sizeof(struct xnpagemap), psize);
00144 }
00145 
00146 static inline size_t xnheap_internal_overhead(size_t hsize, size_t psize)
00147 {
00148         /* o = (h - o) * m / p + e
00149            o * p = (h - o) * m + e * p
00150            o * (p + m) = h * m + e * p
00151            o = (h * m + e *p) / (p + m)
00152         */
00153         return xnheap_align((sizeof(xnextent_t) * psize
00154                              + sizeof(struct xnpagemap) * hsize)
00155                             / (psize + sizeof(struct xnpagemap)), psize);
00156 }
00157 
00158 #define xnmalloc(size)     xnheap_alloc(&kheap,size)
00159 #define xnfree(ptr)        xnheap_free(&kheap,ptr)
00160 #define xnfreesync()       xnheap_finalize_free(&kheap)
00161 #define xnfreesafe(thread, ptr, ln)                             \
00162         do {                                                    \
00163                 if (xnpod_current_p(thread))                    \
00164                         xnheap_schedule_free(&kheap, ptr, ln);  \
00165                 else                                            \
00166                         xnheap_free(&kheap,ptr);                \
00167         } while(0)
00168 
00169 static inline size_t xnheap_rounded_size(size_t hsize, size_t psize)
00170 {
00171         /*
00172          * Account for the minimum heap size (i.e. 2 * page size) plus
00173          * overhead so that the actual heap space is large enough to
00174          * match the requested size. Using a small page size for large
00175          * single-block heaps might reserve a lot of useless page map
00176          * memory, but this should never get pathological anyway,
00177          * since we only consume 4 bytes per page.
00178          */
00179         if (hsize < 2 * psize)
00180                 hsize = 2 * psize;
00181         hsize += xnheap_external_overhead(hsize, psize);
00182         return xnheap_align(hsize, psize);
00183 }
00184 
00185 #ifdef __cplusplus
00186 extern "C" {
00187 #endif
00188 
00189 /* Private interface. */
00190 
00191 #ifdef __KERNEL__
00192 
00193 int xnheap_mount(void);
00194 
00195 void xnheap_umount(void);
00196 
00197 void xnheap_init_proc(void);
00198 
00199 void xnheap_cleanup_proc(void);
00200 
00201 int xnheap_init_mapped(xnheap_t *heap,
00202                        u_long heapsize,
00203                        int memflags);
00204 
00205 void xnheap_destroy_mapped(xnheap_t *heap,
00206                            void (*release)(struct xnheap *heap),
00207                            void __user *mapaddr);
00208 
00209 #define xnheap_base_memory(heap) \
00210         ((caddr_t)(heap)->archdep.heapbase)
00211 
00212 #define xnheap_mapped_offset(heap,ptr) \
00213         (((caddr_t)(ptr)) - xnheap_base_memory(heap))
00214 
00215 #define xnheap_mapped_address(heap,off) \
00216         (xnheap_base_memory(heap) + (off))
00217 
00218 #define xnheap_mapped_p(heap) \
00219         (xnheap_base_memory(heap) != NULL)
00220 
00221 #endif /* __KERNEL__ */
00222 
00223 /* Public interface. */
00224 
00225 int xnheap_init(xnheap_t *heap,
00226                 void *heapaddr,
00227                 u_long heapsize,
00228                 u_long pagesize);
00229 
00230 void xnheap_set_label(xnheap_t *heap, const char *name, ...);
00231 
00232 void xnheap_destroy(xnheap_t *heap,
00233                     void (*flushfn)(xnheap_t *heap,
00234                                     void *extaddr,
00235                                     u_long extsize,
00236                                     void *cookie),
00237                     void *cookie);
00238 
00239 int xnheap_extend(xnheap_t *heap,
00240                   void *extaddr,
00241                   u_long extsize);
00242 
00243 void *xnheap_alloc(xnheap_t *heap,
00244                    u_long size);
00245 
00246 int xnheap_test_and_free(xnheap_t *heap,
00247                          void *block,
00248                          int (*ckfn)(void *block));
00249 
00250 int xnheap_free(xnheap_t *heap,
00251                 void *block);
00252 
00253 void xnheap_schedule_free(xnheap_t *heap,
00254                           void *block,
00255                           xnholder_t *link);
00256 
00257 void xnheap_finalize_free_inner(xnheap_t *heap,
00258                                 int cpu);
00259 
00260 static inline void xnheap_finalize_free(xnheap_t *heap)
00261 {
00262         int cpu = xnarch_current_cpu();
00263 
00264         XENO_ASSERT(NUCLEUS,
00265                     spltest() != 0,
00266                     xnpod_fatal("%s called in unsafe context", __FUNCTION__));
00267 
00268         if (heap->idleq[cpu])
00269                 xnheap_finalize_free_inner(heap, cpu);
00270 }
00271 
00272 int xnheap_check_block(xnheap_t *heap,
00273                        void *block);
00274 
00275 #ifdef __cplusplus
00276 }
00277 #endif
00278 
00279 #endif /* __KERNEL__ || __XENO_SIM__ */
00280 
00281 #define XNHEAP_DEV_NAME  "/dev/rtheap"
00282 #define XNHEAP_DEV_MINOR 254
00283 
00284 #ifdef CONFIG_MMU
00285 /* XXX: 2.5.x ABI preserved for MMU-enabled only. */
00286 #define xnheap_area_decl();
00287 #define xnheap_area_set(p, val)
00288 #else
00289 #define xnheap_area_decl()      unsigned long area
00290 #define xnheap_area_set(p, val) (p)->area = (unsigned long)(val)
00291 #endif
00292 
00293 struct xnheap_desc {
00294         unsigned long handle;
00295         unsigned int size;
00296         xnheap_area_decl();
00297 };
00298 
00299 #endif /* !_XENO_NUCLEUS_HEAP_H */
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines