Xenomai API  2.5.6.1
include/analogy/buffer.h
Go to the documentation of this file.
00001 
00023 #ifndef __ANALOGY_BUFFER_H__
00024 #define __ANALOGY_BUFFER_H__
00025 
00026 #ifndef DOXYGEN_CPP
00027 
00028 #ifdef __KERNEL__
00029 
00030 #include <linux/version.h>
00031 #include <linux/mm.h>
00032 
00033 #include <rtdm/rtdm_driver.h>
00034 
00035 #include <analogy/os_facilities.h>
00036 #include <analogy/context.h>
00037 
00038 /* --- Events bits / flags --- */
00039 
00040 #define A4L_BUF_EOBUF_NR 0
00041 #define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
00042 
00043 #define A4L_BUF_ERROR_NR 1
00044 #define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
00045 
00046 #define A4L_BUF_EOA_NR 2
00047 #define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
00048 
00049 /* --- Status bits / flags --- */
00050 
00051 #define A4L_BUF_BULK_NR 8
00052 #define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
00053 
00054 #define A4L_BUF_MAP_NR 9
00055 #define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
00056 
00057 struct a4l_subdevice;
00058 
00059 /* Buffer descriptor structure */
00060 struct a4l_buffer {
00061 
00062         /* Added by the structure update */
00063         struct a4l_subdevice *subd;
00064 
00065         /* Buffer's first virtual page pointer */
00066         void *buf;
00067 
00068         /* Buffer's global size */
00069         unsigned long size;
00070         /* Tab containing buffer's pages pointers */
00071         unsigned long *pg_list;
00072 
00073         /* RT/NRT synchronization element */
00074         a4l_sync_t sync;
00075 
00076         /* Counters needed for transfer */
00077         unsigned long end_count;
00078         unsigned long prd_count;
00079         unsigned long cns_count;
00080         unsigned long tmp_count;
00081 
00082         /* Status + events occuring during transfer */
00083         unsigned long flags;
00084 
00085         /* Command on progress */
00086         a4l_cmd_t *cur_cmd;
00087 
00088         /* Munge counter */
00089         unsigned long mng_count;
00090 };
00091 typedef struct a4l_buffer a4l_buf_t;
00092 
00093 /* --- Static inline functions related with
00094    user<->kernel data transfers --- */
00095 
00096 /* The function __produce is an inline function which copies data into
00097    the asynchronous buffer and takes care of the non-contiguous issue
00098    when looping. This function is used in read and write operations */
00099 static inline int __produce(a4l_cxt_t *cxt,
00100                             a4l_buf_t *buf, void *pin, unsigned long count)
00101 {
00102         unsigned long start_ptr = (buf->prd_count % buf->size);
00103         unsigned long tmp_cnt = count;
00104         int ret = 0;
00105 
00106         while (ret == 0 && tmp_cnt != 0) {
00107                 /* Check the data copy can be performed contiguously */
00108                 unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
00109                         buf->size - start_ptr : tmp_cnt;
00110 
00111                 /* Perform the copy */
00112                 if (cxt == NULL)
00113                         memcpy(buf->buf + start_ptr, pin, blk_size);
00114                 else
00115                         ret = rtdm_safe_copy_from_user(cxt->user_info,
00116                                                        buf->buf + start_ptr,
00117                                                        pin, blk_size);
00118 
00119                 /* Update pointers/counts */
00120                 pin += blk_size;
00121                 tmp_cnt -= blk_size;
00122                 start_ptr = 0;
00123         }
00124 
00125         return ret;
00126 }
00127 
00128 /* The function __consume is an inline function which copies data from
00129    the asynchronous buffer and takes care of the non-contiguous issue
00130    when looping. This function is used in read and write operations */
00131 static inline int __consume(a4l_cxt_t *cxt,
00132                             a4l_buf_t *buf, void *pout, unsigned long count)
00133 {
00134         unsigned long start_ptr = (buf->cns_count % buf->size);
00135         unsigned long tmp_cnt = count;
00136         int ret = 0;
00137 
00138         while (ret == 0 && tmp_cnt != 0) {
00139                 /* Check the data copy can be performed contiguously */
00140                 unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
00141                         buf->size - start_ptr : tmp_cnt;
00142 
00143                 /* Perform the copy */
00144                 if (cxt == NULL)
00145                         memcpy(pout, buf->buf + start_ptr, blk_size);
00146                 else
00147                         ret = rtdm_safe_copy_to_user(cxt->user_info,
00148                                                      pout,
00149                                                      buf->buf + start_ptr,
00150                                                      blk_size);
00151 
00152                 /* Update pointers/counts */
00153                 pout += blk_size;
00154                 tmp_cnt -= blk_size;
00155                 start_ptr = 0;
00156         }
00157 
00158         return ret;
00159 }
00160 
00161 /* The function __munge is an inline function which calls the
00162    subdevice specific munge callback on contiguous windows within the
00163    whole buffer. This function is used in read and write operations */
00164 static inline void __munge(struct a4l_subdevice * subd,
00165                            void (*munge) (struct a4l_subdevice *,
00166                                           void *, unsigned long),
00167                            a4l_buf_t * buf, unsigned long count)
00168 {
00169         unsigned long start_ptr = (buf->mng_count % buf->size);
00170         unsigned long tmp_cnt = count;
00171 
00172         while (tmp_cnt != 0) {
00173                 /* Check the data copy can be performed contiguously */
00174                 unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
00175                         buf->size - start_ptr : tmp_cnt;
00176 
00177                 /* Perform the munge operation */
00178                 munge(subd, buf->buf + start_ptr, blk_size);
00179 
00180                 /* Update the start pointer and the count */
00181                 tmp_cnt -= blk_size;
00182                 start_ptr = 0;
00183         }
00184 }
00185 
00186 /* The function __handle_event can only be called from process context
00187    (not interrupt service routine). It allows the client process to
00188    retrieve the buffer status which has been updated by the driver */
00189 static inline int __handle_event(a4l_buf_t * buf)
00190 {
00191         int ret = 0;
00192 
00193         /* The event "End of acquisition" must not be cleaned
00194            before the complete flush of the buffer */
00195         if (test_bit(A4L_BUF_EOA_NR, &buf->flags)) {
00196                 ret = -ENOENT;
00197         }
00198 
00199         if (test_bit(A4L_BUF_ERROR_NR, &buf->flags)) {
00200                 ret = -EPIPE;
00201         }
00202 
00203         return ret;
00204 }
00205 
00206 /* --- Counters management functions --- */
00207 
00208 /* Here, we may wonder why we need more than two counters / pointers.
00209 
00210    Theoretically, we only need two counters (or two pointers):
00211    - one which tells where the reader should be within the buffer
00212    - one which tells where the writer should be within the buffer
00213 
00214    With these two counters (or pointers), we just have to check that
00215    the writer does not overtake the reader inside the ring buffer
00216    BEFORE any read / write operations.
00217 
00218    However, if one element is a DMA controller, we have to be more
00219    careful. Generally a DMA transfer occurs like this:
00220    DMA shot
00221       |-> then DMA interrupt
00222          |-> then DMA soft handler which checks the counter
00223 
00224    So, the checkings occur AFTER the write operations.
00225 
00226    Let's take an example: the reader is a software task and the writer
00227    is a DMA controller. At the end of the DMA shot, the write counter
00228    is higher than the read counter. Unfortunately, a read operation
00229    occurs between the DMA shot and the DMA interrupt, so the handler
00230    will not notice that an overflow occured.
00231 
00232    That is why tmp_count comes into play: tmp_count records the
00233    read/consumer current counter before the next DMA shot and once the
00234    next DMA shot is done, we check that the updated writer/producer
00235    counter is not higher than tmp_count. Thus we are sure that the DMA
00236    writer has not overtaken the reader because it was not able to
00237    overtake the n-1 value. */
00238 
00239 static inline int __pre_abs_put(a4l_buf_t * buf, unsigned long count)
00240 {
00241         if (count - buf->tmp_count > buf->size) {
00242                 set_bit(A4L_BUF_ERROR_NR, &buf->flags);
00243                 return -EPIPE;
00244         }
00245 
00246         buf->tmp_count = buf->cns_count;
00247 
00248         return 0;
00249 }
00250 
00251 static inline int __pre_put(a4l_buf_t * buf, unsigned long count)
00252 {
00253         return __pre_abs_put(buf, buf->tmp_count + count);
00254 }
00255 
00256 static inline int __pre_abs_get(a4l_buf_t * buf, unsigned long count)
00257 {
00258         /* The first time, we expect the buffer to be properly filled
00259         before the trigger occurence; by the way, we need tmp_count to
00260         have been initialized and tmp_count is updated right here */
00261         if (buf->tmp_count == 0 || buf->cns_count == 0)
00262                 goto out;
00263 
00264         /* At the end of the acquisition, the user application has
00265         written the defined amount of data into the buffer; so the
00266         last time, the DMA channel can easily overtake the tmp
00267         frontier because no more data were sent from user space;
00268         therefore no useless alarm should be sent */
00269         if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
00270                 goto out;
00271 
00272         /* Once the exception are passed, we check that the DMA
00273         transfer has not overtaken the last record of the production
00274         count (tmp_count was updated with prd_count the last time
00275         __pre_abs_get was called). We must understand that we cannot
00276         compare the current DMA count with the current production
00277         count because even if, right now, the production count is
00278         higher than the DMA count, it does not mean that the DMA count
00279         was not greater a few cycles before; in such case, the DMA
00280         channel would have retrieved the wrong data */
00281         if ((long)(count - buf->tmp_count) > 0) {
00282                 set_bit(A4L_BUF_ERROR_NR, &buf->flags);
00283                 return -EPIPE;
00284         }
00285 
00286 out:
00287         buf->tmp_count = buf->prd_count;
00288 
00289         return 0;
00290 }
00291 
00292 static inline int __pre_get(a4l_buf_t * buf, unsigned long count)
00293 {
00294         return __pre_abs_get(buf, buf->tmp_count + count);
00295 }
00296 
00297 static inline int __abs_put(a4l_buf_t * buf, unsigned long count)
00298 {
00299         unsigned long old = buf->prd_count;
00300 
00301         if ((long)(buf->prd_count - count) >= 0)
00302                 return -EINVAL;
00303 
00304         buf->prd_count = count;
00305 
00306         if ((old / buf->size) != (count / buf->size))
00307                 set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
00308 
00309         if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
00310                 set_bit(A4L_BUF_EOA_NR, &buf->flags);
00311 
00312         return 0;
00313 }
00314 
00315 static inline int __put(a4l_buf_t * buf, unsigned long count)
00316 {
00317         return __abs_put(buf, buf->prd_count + count);
00318 }
00319 
00320 static inline int __abs_get(a4l_buf_t * buf, unsigned long count)
00321 {
00322         unsigned long old = buf->cns_count;
00323 
00324         if ((long)(buf->cns_count - count) >= 0)
00325                 return -EINVAL;
00326 
00327         buf->cns_count = count;
00328 
00329         if ((old / buf->size) != count / buf->size)
00330                 set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
00331 
00332         if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
00333                 set_bit(A4L_BUF_EOA_NR, &buf->flags);
00334 
00335         return 0;
00336 }
00337 
00338 static inline int __get(a4l_buf_t * buf, unsigned long count)
00339 {
00340         return __abs_get(buf, buf->cns_count + count);
00341 }
00342 
00343 static inline unsigned long __count_to_put(a4l_buf_t * buf)
00344 {
00345         unsigned long ret;
00346 
00347         if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
00348                 ret = buf->size + buf->cns_count - buf->prd_count;
00349         else
00350                 ret = 0;
00351 
00352         return ret;
00353 }
00354 
00355 static inline unsigned long __count_to_get(a4l_buf_t * buf)
00356 {
00357         unsigned long ret;
00358 
00359         /* If the acquisition is unlimited (end_count == 0), we must
00360            not take into account end_count */
00361         if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
00362                 ret = buf->prd_count;
00363         else
00364                 ret = buf->end_count;
00365 
00366         if ((long)(ret - buf->cns_count) > 0)
00367                 ret -= buf->cns_count;
00368         else
00369                 ret = 0;
00370 
00371         return ret;
00372 }
00373 
00374 /* --- Buffer internal functions --- */
00375 
00376 int a4l_alloc_buffer(a4l_buf_t *buf_desc, int buf_size);
00377 
00378 void a4l_free_buffer(a4l_buf_t *buf_desc);
00379 
00380 void a4l_init_buffer(a4l_buf_t * buf_desc);
00381 
00382 void a4l_cleanup_buffer(a4l_buf_t * buf_desc);
00383 
00384 int a4l_setup_buffer(a4l_cxt_t *cxt, a4l_cmd_t *cmd);
00385 
00386 int a4l_cancel_buffer(a4l_cxt_t *cxt);
00387 
00388 int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
00389                            unsigned long count);
00390 
00391 int a4l_buf_commit_absput(struct a4l_subdevice *subd,
00392                           unsigned long count);
00393 
00394 int a4l_buf_prepare_put(struct a4l_subdevice *subd,
00395                         unsigned long count);
00396 
00397 int a4l_buf_commit_put(struct a4l_subdevice *subd,
00398                        unsigned long count);
00399 
00400 int a4l_buf_put(struct a4l_subdevice *subd,
00401                 void *bufdata, unsigned long count);
00402 
00403 int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
00404                            unsigned long count);
00405 
00406 int a4l_buf_commit_absget(struct a4l_subdevice *subd,
00407                           unsigned long count);
00408 
00409 int a4l_buf_prepare_get(struct a4l_subdevice *subd,
00410                         unsigned long count);
00411 
00412 int a4l_buf_commit_get(struct a4l_subdevice *subd,
00413                        unsigned long count);
00414 
00415 int a4l_buf_get(struct a4l_subdevice *subd,
00416                 void *bufdata, unsigned long count);
00417 
00418 int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
00419 
00420 unsigned long a4l_buf_count(struct a4l_subdevice *subd);
00421 
00422 /* --- Current Command management function --- */
00423 
00424 static inline a4l_cmd_t *a4l_get_cmd(a4l_subd_t *subd)
00425 {
00426         return (subd->buf) ? subd->buf->cur_cmd : NULL;
00427 }
00428 
00429 /* --- Munge related function --- */
00430 
00431 int a4l_get_chan(struct a4l_subdevice *subd);
00432 
00433 /* --- IOCTL / FOPS functions --- */
00434 
00435 int a4l_ioctl_mmap(a4l_cxt_t * cxt, void *arg);
00436 int a4l_ioctl_bufcfg(a4l_cxt_t * cxt, void *arg);
00437 int a4l_ioctl_bufinfo(a4l_cxt_t * cxt, void *arg);
00438 int a4l_ioctl_poll(a4l_cxt_t * cxt, void *arg);
00439 ssize_t a4l_read_buffer(a4l_cxt_t * cxt, void *bufdata, size_t nbytes);
00440 ssize_t a4l_write_buffer(a4l_cxt_t * cxt, const void *bufdata, size_t nbytes);
00441 int a4l_select(a4l_cxt_t *cxt,
00442                rtdm_selector_t *selector,
00443                enum rtdm_selecttype type, unsigned fd_index);
00444 
00445 #endif /* __KERNEL__ */
00446 
00447 /* MMAP ioctl argument structure */
00448 struct a4l_mmap_arg {
00449         unsigned int idx_subd;
00450         unsigned long size;
00451         void *ptr;
00452 };
00453 typedef struct a4l_mmap_arg a4l_mmap_t;
00454 
00455 /* Constants related with buffer size
00456    (might be used with BUFCFG ioctl) */
00457 #define A4L_BUF_MAXSIZE 0x1000000
00458 #define A4L_BUF_DEFSIZE 0x10000
00459 #define A4L_BUF_DEFMAGIC 0xffaaff55
00460 
00461 /* BUFCFG ioctl argument structure */
00462 struct a4l_buffer_config {
00463         /* NOTE: with the last buffer implementation, the field
00464            idx_subd became useless; the buffer are now
00465            per-context. So, the buffer size configuration is specific
00466            to an opened device. There is a little exception: we can
00467            define a default buffer size for a device.
00468            So far, a hack is used to implement the configuration of
00469            the default buffer size */
00470         unsigned int idx_subd;
00471         unsigned long buf_size;
00472 };
00473 typedef struct a4l_buffer_config a4l_bufcfg_t;
00474 
00475 /* BUFINFO ioctl argument structure */
00476 struct a4l_buffer_info {
00477         unsigned int idx_subd;
00478         unsigned long buf_size;
00479         unsigned long rw_count;
00480 };
00481 typedef struct a4l_buffer_info a4l_bufinfo_t;
00482 
00483 /* POLL ioctl argument structure */
00484 struct a4l_poll {
00485         unsigned int idx_subd;
00486         unsigned long arg;
00487 };
00488 typedef struct a4l_poll a4l_poll_t;
00489 
00490 #endif /* !DOXYGEN_CPP */
00491 
00492 #endif /* __ANALOGY_BUFFER_H__ */
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines