Xenomai  3.0-rc3
heapobj.h
1 /*
2  * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #ifndef _COPPERPLATE_HEAPOBJ_H
20 #define _COPPERPLATE_HEAPOBJ_H
21 
22 #include <sys/types.h>
23 #include <stdint.h>
24 #include <string.h>
25 #include <assert.h>
26 #include <errno.h>
27 #include <pthread.h>
28 #include <xeno_config.h>
29 #include <boilerplate/wrappers.h>
30 #include <boilerplate/list.h>
31 #include <copperplate/reference.h>
32 #include <boilerplate/lock.h>
33 #include <copperplate/debug.h>
34 
35 struct heapobj {
36  void *pool;
37  size_t size;
38  char name[32];
39 #ifdef CONFIG_XENO_PSHARED
40  char fsname[256];
41 #endif
42 };
43 
44 struct sysgroup {
45  int thread_count;
46  struct list thread_list;
47  int heap_count;
48  struct list heap_list;
49  pthread_mutex_t lock;
50 };
51 
52 #ifdef __cplusplus
53 extern "C" {
54 #endif
55 
56 int heapobj_pkg_init_private(void);
57 
58 int __heapobj_init_private(struct heapobj *hobj, const char *name,
59  size_t size, void *mem);
60 
61 int heapobj_init_array_private(struct heapobj *hobj, const char *name,
62  size_t size, int elems);
63 #ifdef __cplusplus
64 }
65 #endif
66 
67 #ifdef CONFIG_XENO_TLSF
68 
69 size_t get_used_size(void *pool);
70 void destroy_memory_pool(void *pool);
71 size_t add_new_area(void *pool, size_t size, void *mem);
72 void *malloc_ex(size_t size, void *pool);
73 void free_ex(void *pool, void *ptr);
74 void *tlsf_malloc(size_t size);
75 void tlsf_free(void *ptr);
76 size_t malloc_usable_size_ex(void *ptr, void *pool);
77 
78 static inline
79 void pvheapobj_destroy(struct heapobj *hobj)
80 {
81  destroy_memory_pool(hobj->pool);
82 }
83 
84 static inline
85 int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
86 {
87  hobj->size = add_new_area(hobj->pool, size, mem);
88  if (hobj->size == (size_t)-1)
89  return __bt(-EINVAL);
90 
91  return 0;
92 }
93 
94 static inline
95 void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
96 {
97  return malloc_ex(size, hobj->pool);
98 }
99 
100 static inline
101 void pvheapobj_free(struct heapobj *hobj, void *ptr)
102 {
103  free_ex(ptr, hobj->pool);
104 }
105 
106 static inline
107 size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
108 {
109  return malloc_usable_size_ex(ptr, hobj->pool);
110 }
111 
112 static inline
113 size_t pvheapobj_inquire(struct heapobj *hobj)
114 {
115  return get_used_size(hobj->pool);
116 }
117 
118 static inline void *pvmalloc(size_t size)
119 {
120  return tlsf_malloc(size);
121 }
122 
123 static inline void pvfree(void *ptr)
124 {
125  tlsf_free(ptr);
126 }
127 
128 static inline char *pvstrdup(const char *ptr)
129 {
130  char *str;
131 
132  str = (char *)pvmalloc(strlen(ptr) + 1);
133  if (str == NULL)
134  return NULL;
135 
136  return strcpy(str, ptr);
137 }
138 
139 #else /* !CONFIG_XENO_TLSF, i.e. malloc */
140 
141 #include <malloc.h>
142 
143 static inline void *pvmalloc(size_t size)
144 {
145  /*
146  * NOTE: We don't want debug _nrt assertions to trigger when
147  * running over Cobalt if the user picked this allocator, so
148  * we make sure to call the glibc directly, not the Cobalt
149  * wrappers.
150  */
151  return __STD(malloc(size));
152 }
153 
154 static inline void pvfree(void *ptr)
155 {
156  __STD(free(ptr));
157 }
158 
159 static inline char *pvstrdup(const char *ptr)
160 {
161  return strdup(ptr);
162 }
163 
164 void pvheapobj_destroy(struct heapobj *hobj);
165 
166 int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem);
167 
168 void *pvheapobj_alloc(struct heapobj *hobj, size_t size);
169 
170 void pvheapobj_free(struct heapobj *hobj, void *ptr);
171 
172 size_t pvheapobj_inquire(struct heapobj *hobj);
173 
174 size_t pvheapobj_validate(struct heapobj *hobj, void *ptr);
175 
176 #endif /* !CONFIG_XENO_TLSF */
177 
178 #ifdef CONFIG_XENO_PSHARED
179 
180 extern void *__main_heap;
181 
182 extern struct hash_table *__main_catalog;
183 #define main_catalog (*((struct hash_table *)__main_catalog))
184 
185 extern struct sysgroup *__main_sysgroup;
186 
187 struct sysgroup_memspec {
189  struct holder next;
190 };
191 
192 struct agent_memspec {
194  pid_t pid;
195 };
196 
197 static inline void *mainheap_ptr(memoff_t off)
198 {
199  return off ? (void *)__memptr(__main_heap, off) : NULL;
200 }
201 
202 static inline memoff_t mainheap_off(void *addr)
203 {
204  return addr ? (memoff_t)__memoff(__main_heap, addr) : 0;
205 }
206 
207 /*
208  * ptr shall point to a block of memory allocated within the main heap
209  * if non-null; such address is always 8-byte aligned. Handles of
210  * shared heap pointers are returned with bit #0 set, which serves as
211  * a special tag detected in mainhead_deref(). A null pointer is
212  * always translated as a null handle.
213  */
214 #define mainheap_ref(ptr, type) \
215  ({ \
216  type handle; \
217  assert(__builtin_types_compatible_p(typeof(type), unsigned long) || \
218  __builtin_types_compatible_p(typeof(type), uintptr_t)); \
219  assert(ptr == NULL || __memchk(__main_heap, ptr)); \
220  handle = (type)mainheap_off(ptr); \
221  handle|1; \
222  })
223 /*
224  * Handles of shared heap-based pointers have bit #0 set. Other values
225  * are not translated, and the return value is the original handle
226  * cast to a pointer. A null handle is always returned unchanged.
227  */
228 #define mainheap_deref(handle, type) \
229  ({ \
230  type *ptr; \
231  assert(__builtin_types_compatible_p(typeof(handle), unsigned long) || \
232  __builtin_types_compatible_p(typeof(handle), uintptr_t)); \
233  ptr = (handle & 1) ? (type *)mainheap_ptr(handle & ~1UL) : (type *)handle; \
234  ptr; \
235  })
236 
237 static inline void
238 __sysgroup_add(struct sysgroup_memspec *obj, struct list *q, int *countp)
239 {
240  write_lock_nocancel(&__main_sysgroup->lock);
241  (*countp)++;
242  list_append(&obj->next, q);
243  write_unlock(&__main_sysgroup->lock);
244 }
245 
246 #define sysgroup_add(__group, __obj) \
247  __sysgroup_add(__obj, &(__main_sysgroup->__group ## _list), \
248  &(__main_sysgroup->__group ## _count))
249 
250 static inline void
251 __sysgroup_remove(struct sysgroup_memspec *obj, int *countp)
252 {
253  write_lock_nocancel(&__main_sysgroup->lock);
254  (*countp)--;
255  list_remove(&obj->next);
256  write_unlock(&__main_sysgroup->lock);
257 }
258 
259 #define sysgroup_remove(__group, __obj) \
260  __sysgroup_remove(__obj, &(__main_sysgroup->__group ## _count))
261 
262 static inline void sysgroup_lock(void)
263 {
264  read_lock_nocancel(&__main_sysgroup->lock);
265 }
266 
267 static inline void sysgroup_unlock(void)
268 {
269  read_unlock(&__main_sysgroup->lock);
270 }
271 
272 #define sysgroup_count(__group) \
273  (__main_sysgroup->__group ## _count)
274 
275 #define for_each_sysgroup(__obj, __group) \
276  list_for_each_entry(__obj, &(__main_sysgroup->__group ## _list), next)
277 
278 int heapobj_pkg_init_shared(void);
279 
280 int heapobj_init(struct heapobj *hobj, const char *name,
281  size_t size);
282 
283 static inline int __heapobj_init(struct heapobj *hobj, const char *name,
284  size_t size, void *unused)
285 {
286  /* Can't work on user-defined memory in shared mode. */
287  return heapobj_init(hobj, name, size);
288 }
289 
290 int heapobj_init_array(struct heapobj *hobj, const char *name,
291  size_t size, int elems);
292 
293 void heapobj_destroy(struct heapobj *hobj);
294 
295 int heapobj_extend(struct heapobj *hobj,
296  size_t size, void *mem);
297 
298 void *heapobj_alloc(struct heapobj *hobj,
299  size_t size);
300 
301 void heapobj_free(struct heapobj *hobj,
302  void *ptr);
303 
304 size_t heapobj_validate(struct heapobj *hobj,
305  void *ptr);
306 
307 size_t heapobj_inquire(struct heapobj *hobj);
308 
309 int heapobj_bind_session(const char *session);
310 
311 void heapobj_unbind_session(void);
312 
313 void *xnmalloc(size_t size);
314 
315 void xnfree(void *ptr);
316 
317 char *xnstrdup(const char *ptr);
318 
319 #else /* !CONFIG_XENO_PSHARED */
320 
321 struct sysgroup_memspec {
322 };
323 
324 struct agent_memspec {
325 };
326 
327 /*
328  * Whether an object is laid in some shared heap. Never if pshared
329  * mode is disabled.
330  */
331 static inline int pshared_check(void *heap, void *addr)
332 {
333  return 0;
334 }
335 
336 #ifdef __cplusplus
337 #define __check_ref_width(__dst, __src) \
338  ({ \
339  assert(sizeof(__dst) >= sizeof(__src)); \
340  (typeof(__dst))__src; \
341  })
342 #else
343 #define __check_ref_width(__dst, __src) \
344  __builtin_choose_expr( \
345  sizeof(__dst) >= sizeof(__src), (typeof(__dst))__src, \
346  ((void)0))
347 #endif
348 
349 #define mainheap_ref(ptr, type) \
350  ({ \
351  type handle; \
352  handle = __check_ref_width(handle, ptr); \
353  assert(ptr == NULL || __memchk(__main_heap, ptr)); \
354  handle; \
355  })
356 #define mainheap_deref(handle, type) \
357  ({ \
358  type *ptr; \
359  ptr = __check_ref_width(ptr, handle); \
360  ptr; \
361  })
362 
363 #define sysgroup_add(__group, __obj) do { } while (0)
364 #define sysgroup_remove(__group, __obj) do { } while (0)
365 
366 static inline int heapobj_pkg_init_shared(void)
367 {
368  return 0;
369 }
370 
371 static inline int __heapobj_init(struct heapobj *hobj, const char *name,
372  size_t size, void *mem)
373 {
374  return __heapobj_init_private(hobj, name, size, mem);
375 }
376 
377 static inline int heapobj_init(struct heapobj *hobj, const char *name,
378  size_t size)
379 {
380  return __heapobj_init_private(hobj, name, size, NULL);
381 }
382 
383 static inline int heapobj_init_array(struct heapobj *hobj, const char *name,
384  size_t size, int elems)
385 {
386  return heapobj_init_array_private(hobj, name, size, elems);
387 }
388 
389 static inline void heapobj_destroy(struct heapobj *hobj)
390 {
391  pvheapobj_destroy(hobj);
392 }
393 
394 static inline int heapobj_extend(struct heapobj *hobj,
395  size_t size, void *mem)
396 {
397  return pvheapobj_extend(hobj, size, mem);
398 }
399 
400 static inline void *heapobj_alloc(struct heapobj *hobj,
401  size_t size)
402 {
403  return pvheapobj_alloc(hobj, size);
404 }
405 
406 static inline void heapobj_free(struct heapobj *hobj,
407  void *ptr)
408 {
409  pvheapobj_free(hobj, ptr);
410 }
411 
412 static inline size_t heapobj_validate(struct heapobj *hobj,
413  void *ptr)
414 {
415  return pvheapobj_validate(hobj, ptr);
416 }
417 
418 static inline size_t heapobj_inquire(struct heapobj *hobj)
419 {
420  return pvheapobj_inquire(hobj);
421 }
422 
423 static inline int heapobj_bind_session(const char *session)
424 {
425  return -ENOSYS;
426 }
427 
428 static inline void heapobj_unbind_session(void) { }
429 
430 static inline void *xnmalloc(size_t size)
431 {
432  return pvmalloc(size);
433 }
434 
435 static inline void xnfree(void *ptr)
436 {
437  pvfree(ptr);
438 }
439 
440 static inline char *xnstrdup(const char *ptr)
441 {
442  return pvstrdup(ptr);
443 }
444 
445 #endif /* !CONFIG_XENO_PSHARED */
446 
447 static inline const char *heapobj_name(struct heapobj *hobj)
448 {
449  return hobj->name;
450 }
451 
452 static inline size_t heapobj_size(struct heapobj *hobj)
453 {
454  return hobj->size;
455 }
456 
457 #endif /* _COPPERPLATE_HEAPOBJ_H */