Xenomai  3.0-rc3
buffer.h
Go to the documentation of this file.
1 
22 #ifndef _COBALT_RTDM_ANALOGY_BUFFER_H
23 #define _COBALT_RTDM_ANALOGY_BUFFER_H
24 
25 #include <linux/version.h>
26 #include <linux/mm.h>
27 #include <rtdm/driver.h>
28 #include <rtdm/uapi/analogy.h>
30 #include <rtdm/analogy/context.h>
31 #include <rtdm/analogy/command.h>
32 #include <rtdm/analogy/subdevice.h>
33 
34 /* --- Events bits / flags --- */
35 
36 #define A4L_BUF_EOBUF_NR 0
37 #define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
38 
39 #define A4L_BUF_ERROR_NR 1
40 #define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
41 
42 #define A4L_BUF_EOA_NR 2
43 #define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
44 
45 /* --- Status bits / flags --- */
46 
47 #define A4L_BUF_BULK_NR 8
48 #define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
49 
50 #define A4L_BUF_MAP_NR 9
51 #define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
52 
53 
54 /* Buffer descriptor structure */
55 struct a4l_buffer {
56 
57  /* Added by the structure update */
58  struct a4l_subdevice *subd;
59 
60  /* Buffer's first virtual page pointer */
61  void *buf;
62 
63  /* Buffer's global size */
64  unsigned long size;
65  /* Tab containing buffer's pages pointers */
66  unsigned long *pg_list;
67 
68  /* RT/NRT synchronization element */
69  struct a4l_sync sync;
70 
71  /* Counters needed for transfer */
72  unsigned long end_count;
73  unsigned long prd_count;
74  unsigned long cns_count;
75  unsigned long tmp_count;
76 
77  /* Status + events occuring during transfer */
78  unsigned long flags;
79 
80  /* Command on progress */
81  struct a4l_cmd_desc *cur_cmd;
82 
83  /* Munge counter */
84  unsigned long mng_count;
85 
86  /* Theshold below which the user process should not be
87  awakened */
88  unsigned long wake_count;
89 };
90 
91 static inline void __dump_buffer_counters(struct a4l_buffer *buf)
92 {
93  __a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf);
94  __a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n",
95  buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count);
96 }
97 
98 /* --- Static inline functions related with
99  user<->kernel data transfers --- */
100 
101 /* The function __produce is an inline function which copies data into
102  the asynchronous buffer and takes care of the non-contiguous issue
103  when looping. This function is used in read and write operations */
104 static inline int __produce(struct a4l_device_context *cxt,
105  struct a4l_buffer *buf, void *pin, unsigned long count)
106 {
107  unsigned long start_ptr = (buf->prd_count % buf->size);
108  struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
109  unsigned long tmp_cnt = count;
110  int ret = 0;
111 
112  while (ret == 0 && tmp_cnt != 0) {
113  /* Check the data copy can be performed contiguously */
114  unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
115  buf->size - start_ptr : tmp_cnt;
116 
117  /* Perform the copy */
118  if (cxt == NULL)
119  memcpy(buf->buf + start_ptr, pin, blk_size);
120  else
121  ret = rtdm_safe_copy_from_user(fd,
122  buf->buf + start_ptr,
123  pin, blk_size);
124 
125  /* Update pointers/counts */
126  pin += blk_size;
127  tmp_cnt -= blk_size;
128  start_ptr = 0;
129  }
130 
131  return ret;
132 }
133 
134 /* The function __consume is an inline function which copies data from
135  the asynchronous buffer and takes care of the non-contiguous issue
136  when looping. This function is used in read and write operations */
137 static inline int __consume(struct a4l_device_context *cxt,
138  struct a4l_buffer *buf, void *pout, unsigned long count)
139 {
140  unsigned long start_ptr = (buf->cns_count % buf->size);
141  struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
142  unsigned long tmp_cnt = count;
143  int ret = 0;
144 
145  while (ret == 0 && tmp_cnt != 0) {
146  /* Check the data copy can be performed contiguously */
147  unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
148  buf->size - start_ptr : tmp_cnt;
149 
150  /* Perform the copy */
151  if (cxt == NULL)
152  memcpy(pout, buf->buf + start_ptr, blk_size);
153  else
154  ret = rtdm_safe_copy_to_user(fd,
155  pout,
156  buf->buf + start_ptr,
157  blk_size);
158 
159  /* Update pointers/counts */
160  pout += blk_size;
161  tmp_cnt -= blk_size;
162  start_ptr = 0;
163  }
164 
165  return ret;
166 }
167 
168 /* The function __munge is an inline function which calls the
169  subdevice specific munge callback on contiguous windows within the
170  whole buffer. This function is used in read and write operations */
171 static inline void __munge(struct a4l_subdevice * subd,
172  void (*munge) (struct a4l_subdevice *,
173  void *, unsigned long),
174  struct a4l_buffer * buf, unsigned long count)
175 {
176  unsigned long start_ptr = (buf->mng_count % buf->size);
177  unsigned long tmp_cnt = count;
178 
179  while (tmp_cnt != 0) {
180  /* Check the data copy can be performed contiguously */
181  unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
182  buf->size - start_ptr : tmp_cnt;
183 
184  /* Perform the munge operation */
185  munge(subd, buf->buf + start_ptr, blk_size);
186 
187  /* Update the start pointer and the count */
188  tmp_cnt -= blk_size;
189  start_ptr = 0;
190  }
191 }
192 
193 /* The function __handle_event can only be called from process context
194  (not interrupt service routine). It allows the client process to
195  retrieve the buffer status which has been updated by the driver */
196 static inline int __handle_event(struct a4l_buffer * buf)
197 {
198  int ret = 0;
199 
200  /* The event "End of acquisition" must not be cleaned
201  before the complete flush of the buffer */
202  if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
203  ret = -ENOENT;
204 
205  if (test_bit(A4L_BUF_ERROR_NR, &buf->flags))
206  ret = -EPIPE;
207 
208  return ret;
209 }
210 
211 /* --- Counters management functions --- */
212 
213 /* Here, we may wonder why we need more than two counters / pointers.
214 
215  Theoretically, we only need two counters (or two pointers):
216  - one which tells where the reader should be within the buffer
217  - one which tells where the writer should be within the buffer
218 
219  With these two counters (or pointers), we just have to check that
220  the writer does not overtake the reader inside the ring buffer
221  BEFORE any read / write operations.
222 
223  However, if one element is a DMA controller, we have to be more
224  careful. Generally a DMA transfer occurs like this:
225  DMA shot
226  |-> then DMA interrupt
227  |-> then DMA soft handler which checks the counter
228 
229  So, the checkings occur AFTER the write operations.
230 
231  Let's take an example: the reader is a software task and the writer
232  is a DMA controller. At the end of the DMA shot, the write counter
233  is higher than the read counter. Unfortunately, a read operation
234  occurs between the DMA shot and the DMA interrupt, so the handler
235  will not notice that an overflow occured.
236 
237  That is why tmp_count comes into play: tmp_count records the
238  read/consumer current counter before the next DMA shot and once the
239  next DMA shot is done, we check that the updated writer/producer
240  counter is not higher than tmp_count. Thus we are sure that the DMA
241  writer has not overtaken the reader because it was not able to
242  overtake the n-1 value. */
243 
244 static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count)
245 {
246  if (count - buf->tmp_count > buf->size) {
247  set_bit(A4L_BUF_ERROR_NR, &buf->flags);
248  return -EPIPE;
249  }
250 
251  buf->tmp_count = buf->cns_count;
252 
253  return 0;
254 }
255 
256 static inline int __pre_put(struct a4l_buffer * buf, unsigned long count)
257 {
258  return __pre_abs_put(buf, buf->tmp_count + count);
259 }
260 
261 static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count)
262 {
263  /* The first time, we expect the buffer to be properly filled
264  before the trigger occurence; by the way, we need tmp_count to
265  have been initialized and tmp_count is updated right here */
266  if (buf->tmp_count == 0 || buf->cns_count == 0)
267  goto out;
268 
269  /* At the end of the acquisition, the user application has
270  written the defined amount of data into the buffer; so the
271  last time, the DMA channel can easily overtake the tmp
272  frontier because no more data were sent from user space;
273  therefore no useless alarm should be sent */
274  if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
275  goto out;
276 
277  /* Once the exception are passed, we check that the DMA
278  transfer has not overtaken the last record of the production
279  count (tmp_count was updated with prd_count the last time
280  __pre_abs_get was called). We must understand that we cannot
281  compare the current DMA count with the current production
282  count because even if, right now, the production count is
283  higher than the DMA count, it does not mean that the DMA count
284  was not greater a few cycles before; in such case, the DMA
285  channel would have retrieved the wrong data */
286  if ((long)(count - buf->tmp_count) > 0) {
287  set_bit(A4L_BUF_ERROR_NR, &buf->flags);
288  return -EPIPE;
289  }
290 
291 out:
292  buf->tmp_count = buf->prd_count;
293 
294  return 0;
295 }
296 
297 static inline int __pre_get(struct a4l_buffer * buf, unsigned long count)
298 {
299  return __pre_abs_get(buf, buf->tmp_count + count);
300 }
301 
302 static inline int __abs_put(struct a4l_buffer * buf, unsigned long count)
303 {
304  unsigned long old = buf->prd_count;
305 
306  if ((long)(buf->prd_count - count) >= 0)
307  return -EINVAL;
308 
309  buf->prd_count = count;
310 
311  if ((old / buf->size) != (count / buf->size))
312  set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
313 
314  if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
315  set_bit(A4L_BUF_EOA_NR, &buf->flags);
316 
317  return 0;
318 }
319 
320 static inline int __put(struct a4l_buffer * buf, unsigned long count)
321 {
322  return __abs_put(buf, buf->prd_count + count);
323 }
324 
325 static inline int __abs_get(struct a4l_buffer * buf, unsigned long count)
326 {
327  unsigned long old = buf->cns_count;
328 
329  if ((long)(buf->cns_count - count) >= 0)
330  return -EINVAL;
331 
332  buf->cns_count = count;
333 
334  if ((old / buf->size) != count / buf->size)
335  set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
336 
337  if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
338  set_bit(A4L_BUF_EOA_NR, &buf->flags);
339 
340  return 0;
341 }
342 
343 static inline int __get(struct a4l_buffer * buf, unsigned long count)
344 {
345  return __abs_get(buf, buf->cns_count + count);
346 }
347 
348 static inline unsigned long __count_to_put(struct a4l_buffer * buf)
349 {
350  unsigned long ret;
351 
352  if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
353  ret = buf->size + buf->cns_count - buf->prd_count;
354  else
355  ret = 0;
356 
357  return ret;
358 }
359 
360 static inline unsigned long __count_to_get(struct a4l_buffer * buf)
361 {
362  unsigned long ret;
363 
364  /* If the acquisition is unlimited (end_count == 0), we must
365  not take into account end_count */
366  if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
367  ret = buf->prd_count;
368  else
369  ret = buf->end_count;
370 
371  if ((long)(ret - buf->cns_count) > 0)
372  ret -= buf->cns_count;
373  else
374  ret = 0;
375 
376  return ret;
377 }
378 
379 static inline unsigned long __count_to_end(struct a4l_buffer * buf)
380 {
381  unsigned long ret = buf->end_count - buf->cns_count;
382 
383  if (buf->end_count == 0)
384  return ULONG_MAX;
385 
386  return ((long)ret) < 0 ? 0 : ret;
387 }
388 
389 /* --- Buffer internal functions --- */
390 
391 int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size);
392 
393 void a4l_free_buffer(struct a4l_buffer *buf_desc);
394 
395 void a4l_init_buffer(struct a4l_buffer * buf_desc);
396 
397 void a4l_cleanup_buffer(struct a4l_buffer * buf_desc);
398 
399 int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd);
400 
401 void a4l_cancel_buffer(struct a4l_device_context *cxt);
402 
403 int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
404  unsigned long count);
405 
406 int a4l_buf_commit_absput(struct a4l_subdevice *subd,
407  unsigned long count);
408 
409 int a4l_buf_prepare_put(struct a4l_subdevice *subd,
410  unsigned long count);
411 
412 int a4l_buf_commit_put(struct a4l_subdevice *subd,
413  unsigned long count);
414 
415 int a4l_buf_put(struct a4l_subdevice *subd,
416  void *bufdata, unsigned long count);
417 
418 int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
419  unsigned long count);
420 
421 int a4l_buf_commit_absget(struct a4l_subdevice *subd,
422  unsigned long count);
423 
424 int a4l_buf_prepare_get(struct a4l_subdevice *subd,
425  unsigned long count);
426 
427 int a4l_buf_commit_get(struct a4l_subdevice *subd,
428  unsigned long count);
429 
430 int a4l_buf_get(struct a4l_subdevice *subd,
431  void *bufdata, unsigned long count);
432 
433 int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
434 
435 unsigned long a4l_buf_count(struct a4l_subdevice *subd);
436 
437 /* --- Current Command management function --- */
438 
439 static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd)
440 {
441  return (subd->buf) ? subd->buf->cur_cmd : NULL;
442 }
443 
444 /* --- Munge related function --- */
445 
446 int a4l_get_chan(struct a4l_subdevice *subd);
447 
448 /* --- IOCTL / FOPS functions --- */
449 
450 int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg);
451 int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg);
452 int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg);
453 int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg);
454 int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg);
455 int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg);
456 ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes);
457 ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes);
458 int a4l_select(struct a4l_device_context *cxt,
459  rtdm_selector_t *selector,
460  enum rtdm_selecttype type, unsigned fd_index);
461 
462 #endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
struct a4l_buffer * buf
Linked buffer.
Definition: subdevice.h:51
Analogy for Linux, context structure / macros declarations.
Structure describing the asynchronous instruction.
Definition: analogy.h:289
Analogy for Linux, subdevice related features.
Real-Time Driver Model for Xenomai, driver API header.
struct a4l_cmd_desc * a4l_get_cmd(struct a4l_subdevice *subd)
Get the current Analogy command descriptor.
Analogy for Linux, Operation system facilities.
rtdm_selecttype
Definition: driver.h:108
Structure describing the subdevice.
Definition: subdevice.h:40
int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst, const void *src, size_t size)
Check if read/write access to user-space memory block is safe and copy specified buffer to it...
static struct rtdm_fd * rtdm_private_to_fd(void *dev_private)
Locate a device file descriptor structure from its driver private area.
Definition: driver.h:171
Analogy for Linux, UAPI bits.
int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst, const void __user *src, size_t size)
Check if read access to user-space memory block and copy it to specified buffer.