Xenomai API  2.5.6.1
ksrc/skins/posix/cb_lock.h
00001 #ifndef CB_LOCK_H
00002 #define CB_LOCK_H
00003 
00004 #include <asm/xenomai/atomic.h>
00005 #include <nucleus/compiler.h>
00006 #include <nucleus/types.h>
00007 
00008 #if !defined(__KERNEL__) && !defined(__XENO_SIM__)
00009 typedef void xnthread_t;
00010 #endif /* __KERNEL__ */
00011 
00012 #define __CLAIMED_BIT           XN_HANDLE_SPARE3
00013 
00014 #define test_claimed(owner)     xnhandle_test_spare(owner, __CLAIMED_BIT)
00015 #define clear_claimed(owner)    xnhandle_mask_spare(owner)
00016 #define set_claimed(owner, bit) ({ \
00017         xnhandle_t __tmp = xnhandle_mask_spare(owner); \
00018         if (bit) \
00019                 xnhandle_set_spare(__tmp, __CLAIMED_BIT); \
00020         __tmp; \
00021 })
00022 
00023 #ifdef CONFIG_XENO_FASTSYNCH
00024 
00025 static  __inline__ int __cb_try_read_lock(xnarch_atomic_t *lock)
00026 {
00027         unsigned val = xnarch_atomic_get(lock);
00028         while (likely(val != -1)) {
00029                 unsigned old = xnarch_atomic_cmpxchg(lock, val, val + 1);
00030                 if (likely(old == val))
00031                         return 0;
00032                 val = old;
00033         }
00034         return -EBUSY;
00035 }
00036 
00037 static __inline__ void __cb_read_unlock(xnarch_atomic_t *lock)
00038 {
00039         unsigned old, val = xnarch_atomic_get(lock);
00040         while (likely(val != -1)) {
00041                 old = xnarch_atomic_cmpxchg(lock, val, val - 1);
00042                 if (likely(old == val))
00043                         return;
00044                 val = old;
00045         }
00046 }
00047 
00048 static __inline__ int __cb_try_write_lock(xnarch_atomic_t *lock)
00049 {
00050         unsigned old = xnarch_atomic_cmpxchg(lock, 0, -1);
00051         if (unlikely(old))
00052                 return -EBUSY;
00053         return 0;
00054 }
00055 
00056 static __inline__ void __cb_force_write_lock(xnarch_atomic_t *lock)
00057 {
00058         xnarch_atomic_set(lock, -1);
00059 }
00060 
00061 static __inline__ void __cb_write_unlock(xnarch_atomic_t *lock)
00062 {
00063         xnarch_atomic_set(lock, 0);
00064 }
00065 #define DECLARE_CB_LOCK_FLAGS(name) struct { } name __attribute__((unused))
00066 #define cb_try_read_lock(lock, flags) __cb_try_read_lock(lock)
00067 #define cb_read_unlock(lock, flags) __cb_read_unlock(lock)
00068 #define cb_try_write_lock(lock, flags) __cb_try_write_lock(lock)
00069 #define cb_force_write_lock(lock, flags) __cb_force_write_lock(lock)
00070 #define cb_write_unlock(lock, flags) __cb_write_unlock(lock)
00071 #else /* !CONFIG_XENO_FASTSYNCH */
00072 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00073 #define DECLARE_CB_LOCK_FLAGS(name) spl_t name
00074 #define cb_try_read_lock(lock, flags) \
00075         ({ xnlock_get_irqsave(&nklock, flags); 0; })
00076 #define cb_read_unlock(lock, flags) xnlock_put_irqrestore(&nklock, flags)
00077 #define cb_try_write_lock(lock, flags)  \
00078         ({ xnlock_get_irqsave(&nklock, flags); 0; })
00079 #define cb_force_write_lock(lock, flags)  \
00080         ({ xnlock_get_irqsave(&nklock, flags); 0; })
00081 #define cb_write_unlock(lock, flags) xnlock_put_irqrestore(&nklock, flags)
00082 #else /* !__KERNEL__ */
00083 #define DECLARE_CB_LOCK_FLAGS(name)
00084 #define cb_try_read_lock(lock, flags) (0)
00085 #define cb_read_unlock(lock, flags) do { } while (0)
00086 #define cb_try_write_lock(lock, flags) (0)
00087 #define cb_force_write_lock(lock, flags) do { } while (0)
00088 #define cb_write_unlock(lock, flags) do { } while (0)
00089 #endif /* !__KERNEL__ */
00090 #endif /* !CONFIG_XENO_FASTSYNCH */
00091 
00092 #endif /* CB_LOCK_H */
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines