os_adapt.h 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. #ifndef _OS_ADAPT_H
  2. #define _OS_ADAPT_H
  3. #include <string.h>
  4. #include "FreeRTOS.h"
  5. #include "list.h"
  6. #include "typedef.h"
  7. #ifdef __cplusplus
  8. extern "C" {
  9. #endif
  10. #define __INLINE inline
  11. #define PAGE_SIZE 4096
  12. #define ARCH_DMA_MINALIGN 32
  13. #define USEC_PER_MSEC 1000
  14. #define MSEC_PER_SEC 1000
  15. #define BUG() PrintVariableValueHex("bug on", __LINE__);
  16. #define BUG_ON(condition) if (condition) BUG()
  17. #define WARN_ON(condition) if (condition) BUG()
  18. #define barrier()
  19. #define wmb()
  20. #define EXPORT_SYMBOL(x)
  21. #define dev_dbg(dev, ...) TRACE_DEBUG(__VA_ARGS__)
  22. #define dev_vdbg dev_dbg
  23. #define dev_info(dev, ...) TRACE_INFO(__VA_ARGS__)
  24. #define dev_warn(dev, ...) TRACE_WARNING(__VA_ARGS__)
  25. #define dev_err(dev, ...) TRACE_ERROR(__VA_ARGS__)
  26. #define __iomem volatile
  27. #define unlikely(x) (x)
  28. #define likely(x) (x)
  29. #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
  30. #define DIV_ROUND_UP_ULL(ll, d) (((unsigned long long)(ll) + (d) - 1) / (d))
  31. #define BITS_PER_BYTE 8
  32. #define BITS_PER_LONG 32
  33. #define BIT(nr) (1UL << (nr))
  34. #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  35. #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
  36. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  37. #define min(x,y) ((x)<(y)?(x):(y))
  38. /*
  39. * Create a contiguous bitmask starting at bit position @l and ending at
  40. * position @h. */
  41. #define GENMASK(h, l) \
  42. (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
  43. #define VOID void
  44. #define BOOL int
  45. #define TRUE 1
  46. #define FALSE 0
  47. #define FAR
  48. #define NEAR
  49. typedef unsigned long long u64;
  50. typedef int32_t s32;
  51. typedef int16_t s16;
  52. typedef int8_t s8;
  53. //typedef s32 ssize_t;
  54. typedef u32 __le32;
  55. typedef UINT32 AARCHPTR;
  56. typedef u32 dma_addr_t;
  57. typedef u32 phys_addr_t;
  58. typedef phys_addr_t resource_size_t;
  59. typedef unsigned gfp_t;
  60. #define le16_to_cpu(x) (x)
  61. #define le32_to_cpu(x) (x)
  62. #define cpu_to_le32(x) (x)
  63. #define ___constant_swab32(x) ((u32)( \
  64. (((u32)(x) & (u32)0x000000ffUL) << 24) | \
  65. (((u32)(x) & (u32)0x0000ff00UL) << 8) | \
  66. (((u32)(x) & (u32)0x00ff0000UL) >> 8) | \
  67. (((u32)(x) & (u32)0xff000000UL) >> 24)))
  68. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
  69. #define DECLARE_BITMAP(name,bits) \
  70. unsigned long name[BITS_TO_LONGS(bits)]
  71. typedef struct {
  72. int counter;
  73. } atomic_t;
  74. typedef struct refcount_struct {
  75. atomic_t refs;
  76. } refcount_t;
  77. #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
  78. struct kref {
  79. refcount_t refcount;
  80. };
  81. #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
  82. #define small_const_nbits(nbits) \
  83. (nbits && (nbits) <= BITS_PER_LONG)
  84. #define reg8_read(addr) *((volatile uint8_t *)(addr))
  85. #define reg16_read(addr) *((volatile uint16_t *)(addr))
  86. #define reg32_read(addr) *((volatile uint32_t *)(addr))
  87. #define reg8_write(addr,val) *((volatile uint8_t *)(addr)) = (val)
  88. #define reg16_write(addr,val) *((volatile uint16_t *)(addr)) = (val)
  89. #define reg32_write(addr,val) *((volatile uint32_t *)(addr)) = (val)
  90. #define mem8_read(addr) *((volatile uint8_t *)(addr))
  91. #define mem16_read(addr) *((volatile uint16_t *)(addr))
  92. #define mem32_read(addr) *((volatile uint32_t *)(addr))
  93. #define mem8_write(addr,val) *((volatile uint8_t *)(addr)) = (val)
  94. #define mem16_write(addr,val) *((volatile uint16_t *)(addr)) = (val)
  95. #define mem32_write(addr,val) *((volatile uint32_t *)(addr)) = (val)
  96. #define readb(a) reg8_read(a)
  97. #define readw(a) reg16_read(a)
  98. #define readl(a) reg32_read(a)
  99. #define writeb(v, a) reg8_write(a, v)
  100. #define writew(v, a) reg16_write(a, v)
  101. #define writel(v, a) reg32_write(a, v)
  102. static __INLINE void memset_s(void *dest, size_t destMax, int c, size_t count)
  103. {
  104. memset(dest, c, destMax);
  105. }
  106. static __INLINE void bitmap_zero(unsigned long *dst, unsigned int nbits)
  107. {
  108. if (small_const_nbits(nbits))
  109. *dst = 0UL;
  110. else {
  111. unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
  112. memset(dst, 0, len);
  113. }
  114. }
  115. /**
  116. * fls - find last (most-significant) bit set
  117. * @x: the word to search
  118. *
  119. * This is defined the same way as ffs.
  120. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  121. */
  122. static __INLINE int fls(u32 x)
  123. {
  124. int r = 32;
  125. if (!x)
  126. return 0;
  127. if (!(x & 0xffff0000u)) {
  128. x <<= 16;
  129. r -= 16;
  130. }
  131. if (!(x & 0xff000000u)) {
  132. x <<= 8;
  133. r -= 8;
  134. }
  135. if (!(x & 0xf0000000u)) {
  136. x <<= 4;
  137. r -= 4;
  138. }
  139. if (!(x & 0xc0000000u)) {
  140. x <<= 2;
  141. r -= 2;
  142. }
  143. if (!(x & 0x80000000u)) {
  144. x <<= 1;
  145. r -= 1;
  146. }
  147. return r;
  148. }
  149. /*
  150. * __fls() returns the bit position of the last bit set, where the
  151. * LSB is 0 and MSB is 31. Zero input is undefined.
  152. */
  153. static __INLINE unsigned long __fls(unsigned long x)
  154. {
  155. return fls(x) - 1;
  156. }
  157. /*
  158. * ffs() returns zero if the input was zero, otherwise returns the bit
  159. * position of the first set bit, where the LSB is 1 and MSB is 32.
  160. */
  161. static __INLINE int ffs(int x)
  162. {
  163. return fls(x & -x);
  164. }
  165. /*
  166. * __ffs() returns the bit position of the first bit set, where the
  167. * LSB is 0 and MSB is 31. Zero input is undefined.
  168. */
  169. static __INLINE unsigned long __ffs(unsigned long x)
  170. {
  171. return ffs(x) - 1;
  172. }
  173. #define MAX_ERRNO 4095
  174. #define IS_ERR_VALUE(x) (x) >= (unsigned long)-MAX_ERRNO
  175. static __INLINE void *ERR_PTR(long error)
  176. {
  177. return (void *) error;
  178. }
  179. static __INLINE long PTR_ERR(const void *ptr)
  180. {
  181. return (long) ptr;
  182. }
  183. static __INLINE long IS_ERR(const void *ptr)
  184. {
  185. return IS_ERR_VALUE((unsigned long)ptr);
  186. }
  187. static __INLINE int IS_ERR_OR_NULL(const void *ptr)
  188. {
  189. return !ptr || IS_ERR_VALUE((unsigned long)ptr);
  190. }
  191. static __INLINE void set_bit(int nr, volatile unsigned long *addr)
  192. {
  193. unsigned long mask = BIT_MASK(nr);
  194. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  195. *p |= mask;
  196. }
  197. static __INLINE void clear_bit(int nr, volatile unsigned long *addr)
  198. {
  199. unsigned long mask = BIT_MASK(nr);
  200. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  201. *p &= ~mask;
  202. }
  203. /**
  204. * test_bit - Determine whether a bit is set
  205. * @nr: bit number to test
  206. * @addr: Address to start counting from
  207. */
  208. static __INLINE int test_bit(int nr, const volatile unsigned long *addr)
  209. {
  210. return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
  211. }
  212. /**
  213. * test_and_set_bit - Set a bit and return its old value
  214. * @nr: Bit to set
  215. * @addr: Address to count from
  216. *
  217. * This operation is atomic and cannot be reordered.
  218. * It may be reordered on other architectures than x86.
  219. * It also implies a memory barrier.
  220. */
  221. static __INLINE int test_and_set_bit(int nr, volatile unsigned long *addr)
  222. {
  223. unsigned long mask = BIT_MASK(nr);
  224. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  225. unsigned long old;
  226. old = *p;
  227. *p = old | mask;
  228. return (old & mask) != 0;
  229. }
  230. static __INLINE int test_and_clear_bit(int nr, volatile unsigned long *addr)
  231. {
  232. unsigned long mask = BIT_MASK(nr);
  233. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  234. unsigned long old;
  235. old = *p;
  236. *p = old & ~mask;
  237. return (old & mask) != 0;
  238. }
  239. /**
  240. * list_splice_init - join two lists and reinitialise the emptied list.
  241. * @list: the new list to add.
  242. * @head: the place to add it in the first list.
  243. *
  244. * The list at @list is reinitialised
  245. */
  246. static __INLINE void list_splice_init(List_t *list, List_t *head)
  247. {
  248. ListItem_t *pxIndex = head->pxIndex;
  249. ListItem_t *first = list->pxIndex->pxNext;
  250. ListItem_t *last = list->pxIndex->pxPrevious;
  251. if (!listLIST_IS_EMPTY(list)) {
  252. first->pxPrevious = pxIndex->pxPrevious;
  253. last->pxNext = pxIndex;
  254. pxIndex->pxPrevious->pxNext = first;
  255. pxIndex->pxPrevious = last;
  256. head->uxNumberOfItems += list->uxNumberOfItems;
  257. vListInitialise(list);
  258. }
  259. }
  260. /**
  261. * list_move - delete from one list and add as another's head
  262. * @list: the entry to move
  263. * @head: the head that will precede our entry
  264. */
  265. static __INLINE void list_move(ListItem_t *item, List_t *list, ListItem_t *pos)
  266. {
  267. void *pvOwner = item->pvOwner;
  268. uxListRemove(item);
  269. item->pvOwner = pvOwner;
  270. item->pxNext = pos->pxNext;
  271. item->pxNext->pxPrevious = item;
  272. item->pxPrevious = pos;
  273. pos->pxNext = item;
  274. /* Remember which list the item is in. This allows fast removal of the
  275. item later. */
  276. item->pxContainer = list;
  277. list->uxNumberOfItems++;
  278. }
  279. #define list_for_each_entry(pxListItem, pvOwner, list) \
  280. for (pxListItem = listGET_HEAD_ENTRY(list), \
  281. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem); \
  282. pxListItem != listGET_END_MARKER(list); \
  283. pxListItem = listGET_NEXT(pxListItem), \
  284. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem))
  285. #define list_entry(pxListItem) listGET_LIST_ITEM_OWNER(pxListItem)
  286. #define list_first_entry(pxList) listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(pxList))
  287. #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
  288. #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
  289. #define SG_MITER_FROM_SG (1 << 2) /* nop */
  290. void *malloc(size_t size);
  291. extern void free(void *ptr);
  292. struct device {
  293. const char *init_name;
  294. };
  295. #ifdef __cplusplus
  296. }
  297. #endif
  298. #endif