Xenomai API  2.6.5
buffer.h
Go to the documentation of this file.
1 
23 #ifndef __ANALOGY_BUFFER_H__
24 #define __ANALOGY_BUFFER_H__
25 
26 #ifndef DOXYGEN_CPP
27 
28 #ifdef __KERNEL__
29 
30 #include <linux/version.h>
31 #include <linux/mm.h>
32 
33 #include <rtdm/rtdm_driver.h>
34 
35 #include <analogy/os_facilities.h>
36 #include <analogy/context.h>
37 
38 /* --- Events bits / flags --- */
39 
40 #define A4L_BUF_EOBUF_NR 0
41 #define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
42 
43 #define A4L_BUF_ERROR_NR 1
44 #define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
45 
46 #define A4L_BUF_EOA_NR 2
47 #define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
48 
49 /* --- Status bits / flags --- */
50 
51 #define A4L_BUF_BULK_NR 8
52 #define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
53 
54 #define A4L_BUF_MAP_NR 9
55 #define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
56 
57 struct a4l_subdevice;
58 
59 /* Buffer descriptor structure */
60 struct a4l_buffer {
61 
62  /* Added by the structure update */
63  struct a4l_subdevice *subd;
64 
65  /* Buffer's first virtual page pointer */
66  void *buf;
67 
68  /* Buffer's global size */
69  unsigned long size;
70  /* Tab containing buffer's pages pointers */
71  unsigned long *pg_list;
72 
73  /* RT/NRT synchronization element */
74  a4l_sync_t sync;
75 
76  /* Counters needed for transfer */
77  unsigned long end_count;
78  unsigned long prd_count;
79  unsigned long cns_count;
80  unsigned long tmp_count;
81 
82  /* Status + events occuring during transfer */
83  unsigned long flags;
84 
85  /* Command on progress */
86  a4l_cmd_t *cur_cmd;
87 
88  /* Munge counter */
89  unsigned long mng_count;
90 
91  /* Theshold below which the user process should not be
92  awakened */
93  unsigned long wake_count;
94 };
95 typedef struct a4l_buffer a4l_buf_t;
96 
97 /* --- Static inline functions related with
98  user<->kernel data transfers --- */
99 
100 /* The function __produce is an inline function which copies data into
101  the asynchronous buffer and takes care of the non-contiguous issue
102  when looping. This function is used in read and write operations */
103 static inline int __produce(a4l_cxt_t *cxt,
104  a4l_buf_t *buf, void *pin, unsigned long count)
105 {
106  unsigned long start_ptr = (buf->prd_count % buf->size);
107  unsigned long tmp_cnt = count;
108  int ret = 0;
109 
110  while (ret == 0 && tmp_cnt != 0) {
111  /* Check the data copy can be performed contiguously */
112  unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
113  buf->size - start_ptr : tmp_cnt;
114 
115  /* Perform the copy */
116  if (cxt == NULL)
117  memcpy(buf->buf + start_ptr, pin, blk_size);
118  else
119  ret = rtdm_safe_copy_from_user(cxt->user_info,
120  buf->buf + start_ptr,
121  pin, blk_size);
122 
123  /* Update pointers/counts */
124  pin += blk_size;
125  tmp_cnt -= blk_size;
126  start_ptr = 0;
127  }
128 
129  return ret;
130 }
131 
132 /* The function __consume is an inline function which copies data from
133  the asynchronous buffer and takes care of the non-contiguous issue
134  when looping. This function is used in read and write operations */
135 static inline int __consume(a4l_cxt_t *cxt,
136  a4l_buf_t *buf, void *pout, unsigned long count)
137 {
138  unsigned long start_ptr = (buf->cns_count % buf->size);
139  unsigned long tmp_cnt = count;
140  int ret = 0;
141 
142  while (ret == 0 && tmp_cnt != 0) {
143  /* Check the data copy can be performed contiguously */
144  unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
145  buf->size - start_ptr : tmp_cnt;
146 
147  /* Perform the copy */
148  if (cxt == NULL)
149  memcpy(pout, buf->buf + start_ptr, blk_size);
150  else
151  ret = rtdm_safe_copy_to_user(cxt->user_info,
152  pout,
153  buf->buf + start_ptr,
154  blk_size);
155 
156  /* Update pointers/counts */
157  pout += blk_size;
158  tmp_cnt -= blk_size;
159  start_ptr = 0;
160  }
161 
162  return ret;
163 }
164 
165 /* The function __munge is an inline function which calls the
166  subdevice specific munge callback on contiguous windows within the
167  whole buffer. This function is used in read and write operations */
168 static inline void __munge(struct a4l_subdevice * subd,
169  void (*munge) (struct a4l_subdevice *,
170  void *, unsigned long),
171  a4l_buf_t * buf, unsigned long count)
172 {
173  unsigned long start_ptr = (buf->mng_count % buf->size);
174  unsigned long tmp_cnt = count;
175 
176  while (tmp_cnt != 0) {
177  /* Check the data copy can be performed contiguously */
178  unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
179  buf->size - start_ptr : tmp_cnt;
180 
181  /* Perform the munge operation */
182  munge(subd, buf->buf + start_ptr, blk_size);
183 
184  /* Update the start pointer and the count */
185  tmp_cnt -= blk_size;
186  start_ptr = 0;
187  }
188 }
189 
190 /* The function __handle_event can only be called from process context
191  (not interrupt service routine). It allows the client process to
192  retrieve the buffer status which has been updated by the driver */
193 static inline int __handle_event(a4l_buf_t * buf)
194 {
195  int ret = 0;
196 
197  /* The event "End of acquisition" must not be cleaned
198  before the complete flush of the buffer */
199  if (test_bit(A4L_BUF_EOA_NR, &buf->flags)) {
200  ret = -ENOENT;
201  }
202 
203  if (test_bit(A4L_BUF_ERROR_NR, &buf->flags)) {
204  ret = -EPIPE;
205  }
206 
207  return ret;
208 }
209 
210 /* --- Counters management functions --- */
211 
212 /* Here, we may wonder why we need more than two counters / pointers.
213 
214  Theoretically, we only need two counters (or two pointers):
215  - one which tells where the reader should be within the buffer
216  - one which tells where the writer should be within the buffer
217 
218  With these two counters (or pointers), we just have to check that
219  the writer does not overtake the reader inside the ring buffer
220  BEFORE any read / write operations.
221 
222  However, if one element is a DMA controller, we have to be more
223  careful. Generally a DMA transfer occurs like this:
224  DMA shot
225  |-> then DMA interrupt
226  |-> then DMA soft handler which checks the counter
227 
228  So, the checkings occur AFTER the write operations.
229 
230  Let's take an example: the reader is a software task and the writer
231  is a DMA controller. At the end of the DMA shot, the write counter
232  is higher than the read counter. Unfortunately, a read operation
233  occurs between the DMA shot and the DMA interrupt, so the handler
234  will not notice that an overflow occured.
235 
236  That is why tmp_count comes into play: tmp_count records the
237  read/consumer current counter before the next DMA shot and once the
238  next DMA shot is done, we check that the updated writer/producer
239  counter is not higher than tmp_count. Thus we are sure that the DMA
240  writer has not overtaken the reader because it was not able to
241  overtake the n-1 value. */
242 
243 static inline int __pre_abs_put(a4l_buf_t * buf, unsigned long count)
244 {
245  if (count - buf->tmp_count > buf->size) {
246  set_bit(A4L_BUF_ERROR_NR, &buf->flags);
247  return -EPIPE;
248  }
249 
250  buf->tmp_count = buf->cns_count;
251 
252  return 0;
253 }
254 
255 static inline int __pre_put(a4l_buf_t * buf, unsigned long count)
256 {
257  return __pre_abs_put(buf, buf->tmp_count + count);
258 }
259 
260 static inline int __pre_abs_get(a4l_buf_t * buf, unsigned long count)
261 {
262  /* The first time, we expect the buffer to be properly filled
263  before the trigger occurence; by the way, we need tmp_count to
264  have been initialized and tmp_count is updated right here */
265  if (buf->tmp_count == 0 || buf->cns_count == 0)
266  goto out;
267 
268  /* At the end of the acquisition, the user application has
269  written the defined amount of data into the buffer; so the
270  last time, the DMA channel can easily overtake the tmp
271  frontier because no more data were sent from user space;
272  therefore no useless alarm should be sent */
273  if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
274  goto out;
275 
276  /* Once the exception are passed, we check that the DMA
277  transfer has not overtaken the last record of the production
278  count (tmp_count was updated with prd_count the last time
279  __pre_abs_get was called). We must understand that we cannot
280  compare the current DMA count with the current production
281  count because even if, right now, the production count is
282  higher than the DMA count, it does not mean that the DMA count
283  was not greater a few cycles before; in such case, the DMA
284  channel would have retrieved the wrong data */
285  if ((long)(count - buf->tmp_count) > 0) {
286  set_bit(A4L_BUF_ERROR_NR, &buf->flags);
287  return -EPIPE;
288  }
289 
290 out:
291  buf->tmp_count = buf->prd_count;
292 
293  return 0;
294 }
295 
296 static inline int __pre_get(a4l_buf_t * buf, unsigned long count)
297 {
298  return __pre_abs_get(buf, buf->tmp_count + count);
299 }
300 
301 static inline int __abs_put(a4l_buf_t * buf, unsigned long count)
302 {
303  unsigned long old = buf->prd_count;
304 
305  if ((long)(buf->prd_count - count) >= 0)
306  return -EINVAL;
307 
308  buf->prd_count = count;
309 
310  if ((old / buf->size) != (count / buf->size))
311  set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
312 
313  if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
314  set_bit(A4L_BUF_EOA_NR, &buf->flags);
315 
316  return 0;
317 }
318 
319 static inline int __put(a4l_buf_t * buf, unsigned long count)
320 {
321  return __abs_put(buf, buf->prd_count + count);
322 }
323 
324 static inline int __abs_get(a4l_buf_t * buf, unsigned long count)
325 {
326  unsigned long old = buf->cns_count;
327 
328  if ((long)(buf->cns_count - count) >= 0)
329  return -EINVAL;
330 
331  buf->cns_count = count;
332 
333  if ((old / buf->size) != count / buf->size)
334  set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
335 
336  if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
337  set_bit(A4L_BUF_EOA_NR, &buf->flags);
338 
339  return 0;
340 }
341 
342 static inline int __get(a4l_buf_t * buf, unsigned long count)
343 {
344  return __abs_get(buf, buf->cns_count + count);
345 }
346 
347 static inline unsigned long __count_to_put(a4l_buf_t * buf)
348 {
349  unsigned long ret;
350 
351  if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
352  ret = buf->size + buf->cns_count - buf->prd_count;
353  else
354  ret = 0;
355 
356  return ret;
357 }
358 
359 static inline unsigned long __count_to_get(a4l_buf_t * buf)
360 {
361  unsigned long ret;
362 
363  /* If the acquisition is unlimited (end_count == 0), we must
364  not take into account end_count */
365  if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
366  ret = buf->prd_count;
367  else
368  ret = buf->end_count;
369 
370  if ((long)(ret - buf->cns_count) > 0)
371  ret -= buf->cns_count;
372  else
373  ret = 0;
374 
375  return ret;
376 }
377 
378 static inline unsigned long __count_to_end(a4l_buf_t * buf)
379 {
380  unsigned long ret = buf->end_count - buf->cns_count;
381 
382  if (buf->end_count == 0)
383  return ULONG_MAX;
384 
385  return ((long)ret) < 0 ? 0 : ret;
386 }
387 
388 /* --- Buffer internal functions --- */
389 
390 int a4l_alloc_buffer(a4l_buf_t *buf_desc, int buf_size);
391 
392 void a4l_free_buffer(a4l_buf_t *buf_desc);
393 
394 void a4l_init_buffer(a4l_buf_t * buf_desc);
395 
396 void a4l_cleanup_buffer(a4l_buf_t * buf_desc);
397 
398 int a4l_setup_buffer(a4l_cxt_t *cxt, a4l_cmd_t *cmd);
399 
400 int a4l_cancel_buffer(a4l_cxt_t *cxt);
401 
402 int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
403  unsigned long count);
404 
405 int a4l_buf_commit_absput(struct a4l_subdevice *subd,
406  unsigned long count);
407 
408 int a4l_buf_prepare_put(struct a4l_subdevice *subd,
409  unsigned long count);
410 
411 int a4l_buf_commit_put(struct a4l_subdevice *subd,
412  unsigned long count);
413 
414 int a4l_buf_put(struct a4l_subdevice *subd,
415  void *bufdata, unsigned long count);
416 
417 int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
418  unsigned long count);
419 
420 int a4l_buf_commit_absget(struct a4l_subdevice *subd,
421  unsigned long count);
422 
423 int a4l_buf_prepare_get(struct a4l_subdevice *subd,
424  unsigned long count);
425 
426 int a4l_buf_commit_get(struct a4l_subdevice *subd,
427  unsigned long count);
428 
429 int a4l_buf_get(struct a4l_subdevice *subd,
430  void *bufdata, unsigned long count);
431 
432 int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
433 
434 unsigned long a4l_buf_count(struct a4l_subdevice *subd);
435 
436 /* --- Current Command management function --- */
437 
438 static inline a4l_cmd_t *a4l_get_cmd(a4l_subd_t *subd)
439 {
440  return (subd->buf) ? subd->buf->cur_cmd : NULL;
441 }
442 
443 /* --- Munge related function --- */
444 
445 int a4l_get_chan(struct a4l_subdevice *subd);
446 
447 /* --- IOCTL / FOPS functions --- */
448 
449 int a4l_ioctl_mmap(a4l_cxt_t * cxt, void *arg);
450 int a4l_ioctl_bufcfg(a4l_cxt_t * cxt, void *arg);
451 int a4l_ioctl_bufcfg2(a4l_cxt_t * cxt, void *arg);
452 int a4l_ioctl_bufinfo(a4l_cxt_t * cxt, void *arg);
453 int a4l_ioctl_bufinfo2(a4l_cxt_t * cxt, void *arg);
454 int a4l_ioctl_poll(a4l_cxt_t * cxt, void *arg);
455 ssize_t a4l_read_buffer(a4l_cxt_t * cxt, void *bufdata, size_t nbytes);
456 ssize_t a4l_write_buffer(a4l_cxt_t * cxt, const void *bufdata, size_t nbytes);
457 int a4l_select(a4l_cxt_t *cxt,
458  rtdm_selector_t *selector,
459  enum rtdm_selecttype type, unsigned fd_index);
460 
461 #endif /* __KERNEL__ */
462 
463 /* MMAP ioctl argument structure */
464 struct a4l_mmap_arg {
465  unsigned int idx_subd;
466  unsigned long size;
467  void *ptr;
468 };
469 typedef struct a4l_mmap_arg a4l_mmap_t;
470 
471 /* Constants related with buffer size
472  (might be used with BUFCFG ioctl) */
473 #define A4L_BUF_MAXSIZE 0x1000000
474 #define A4L_BUF_DEFSIZE 0x10000
475 #define A4L_BUF_DEFMAGIC 0xffaaff55
476 
477 /* BUFCFG ioctl argument structure */
478 struct a4l_buffer_config {
479  /* NOTE: with the last buffer implementation, the field
480  idx_subd became useless; the buffer are now
481  per-context. So, the buffer size configuration is specific
482  to an opened device. There is a little exception: we can
483  define a default buffer size for a device.
484  So far, a hack is used to implement the configuration of
485  the default buffer size */
486  unsigned int idx_subd;
487  unsigned long buf_size;
488 };
489 typedef struct a4l_buffer_config a4l_bufcfg_t;
490 
491 /* BUFINFO ioctl argument structure */
492 struct a4l_buffer_info {
493  unsigned int idx_subd;
494  unsigned long buf_size;
495  unsigned long rw_count;
496 };
497 typedef struct a4l_buffer_info a4l_bufinfo_t;
498 
499 /* BUFCFG2 / BUFINFO2 ioctl argument structure */
500 struct a4l_buffer_config2 {
501  unsigned long wake_count;
502  unsigned long reserved[3];
503 };
504 typedef struct a4l_buffer_config2 a4l_bufcfg2_t;
505 
506 /* POLL ioctl argument structure */
507 struct a4l_poll {
508  unsigned int idx_subd;
509  unsigned long arg;
510 };
511 typedef struct a4l_poll a4l_poll_t;
512 
513 #endif /* !DOXYGEN_CPP */
514 
515 #endif /* __ANALOGY_BUFFER_H__ */
int a4l_buf_commit_absput(a4l_subd_t *subd, unsigned long count)
Set the absolute count of data which was sent from the device to the buffer since the start of the ac...
int a4l_buf_commit_absget(a4l_subd_t *subd, unsigned long count)
Set the absolute count of data which was sent from the buffer to the device since the start of the ac...
struct a4l_buffer * buf
Linked buffer.
Definition: subdevice.h:191
int a4l_buf_get(a4l_subd_t *subd, void *bufdata, unsigned long count)
Copy some data from the buffer to the device driver.
int rtdm_safe_copy_from_user(rtdm_user_info_t *user_info, void *dst, const void __user *src, size_t size)
Check if read access to user-space memory block and copy it to specified buffer.
Analogy for Linux, context structure / macros declarations.
Structure describing the asynchronous instruction.
Definition: command.h:198
int rtdm_safe_copy_to_user(rtdm_user_info_t *user_info, void __user *dst, const void *src, size_t size)
Check if read/write access to user-space memory block is safe and copy specified buffer to it...
int a4l_buf_prepare_get(a4l_subd_t *subd, unsigned long count)
Set the count of data which is to be sent from the buffer to the device at the next DMA shot...
a4l_cmd_t * a4l_get_cmd(a4l_subd_t *subd)
Get the current Analogy command descriptor.
int a4l_buf_evt(a4l_subd_t *subd, unsigned long evts)
Signal some event(s) to a user-space program involved in some read / write operation.
int a4l_get_chan(a4l_subd_t *subd)
Get the channel index according to its type.
unsigned long a4l_buf_count(a4l_subd_t *subd)
Get the data amount available in the Analogy buffer.
Structure describing the subdevice.
Definition: subdevice.h:180
int a4l_poll(a4l_desc_t *dsc, unsigned int idx_subd, unsigned long ms_timeout)
Get the available data count.
Definition: async.c:289
Analogy for Linux, Operation system facilities.
int a4l_buf_prepare_absput(a4l_subd_t *subd, unsigned long count)
Update the absolute count of data sent from the device to the buffer since the start of the acquisiti...
int a4l_buf_prepare_absget(a4l_subd_t *subd, unsigned long count)
Update the absolute count of data sent from the buffer to the device since the start of the acquisiti...
rtdm_selecttype
Definition: rtdm_driver.h:139
int a4l_buf_prepare_put(a4l_subd_t *subd, unsigned long count)
Set the count of data which is to be sent to the buffer at the next DMA shot.
Real-Time Driver Model for Xenomai, driver API header.
int a4l_buf_commit_get(a4l_subd_t *subd, unsigned long count)
Set the count of data sent from the buffer to the device during the last completed DMA shots...
int a4l_buf_put(a4l_subd_t *subd, void *bufdata, unsigned long count)
Copy some data from the device driver to the buffer.
unsigned long flags
Type flags.
Definition: subdevice.h:199
int a4l_buf_commit_put(a4l_subd_t *subd, unsigned long count)
Set the count of data sent to the buffer during the last completed DMA shots.