os_adapt.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. #ifndef _OS_ADAPT_H
  2. #define _OS_ADAPT_H
  3. #include <string.h>
  4. #include "FreeRTOS.h"
  5. #include "list.h"
  6. #ifdef __cplusplus
  7. extern "C" {
  8. #endif
  9. #define __INLINE inline
  10. #define PAGE_SIZE 4096
  11. #define ARCH_DMA_MINALIGN 32
  12. #define USEC_PER_MSEC 1000
  13. #define MSEC_PER_SEC 1000
  14. #define BUG() printf("bug on %s %d.\n", __func__, __LINE__);
  15. #define BUG_ON(condition) if (condition) BUG()
  16. #define WARN_ON(condition) if (condition) BUG()
  17. #define barrier()
  18. #define wmb()
  19. #define EXPORT_SYMBOL(x)
  20. #define dev_dbg(dev, ...) TRACE_DEBUG(__VA_ARGS__)
  21. #define dev_vdbg dev_dbg
  22. #define dev_info(dev, ...) TRACE_INFO(__VA_ARGS__)
  23. #define dev_warn(dev, ...) TRACE_WARNING(__VA_ARGS__)
  24. #define dev_err(dev, ...) TRACE_ERROR(__VA_ARGS__)
  25. #define __iomem volatile
  26. #define unlikely(x) (x)
  27. #define likely(x) (x)
  28. #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
  29. #define DIV_ROUND_UP_ULL(ll, d) (((unsigned long long)(ll) + (d) - 1) / (d))
  30. #define BITS_PER_BYTE 8
  31. #define BITS_PER_LONG 32
  32. #define BIT(nr) (1UL << (nr))
  33. #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  34. #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
  35. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  36. #define min(x,y) ((x)<(y)?(x):(y))
  37. /*
  38. * Create a contiguous bitmask starting at bit position @l and ending at
  39. * position @h. */
  40. #define GENMASK(h, l) \
  41. (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
  42. #define VOID void
  43. #define BOOL int
  44. #define TRUE 1
  45. #define FALSE 0
  46. #define FAR
  47. #define NEAR
  48. typedef unsigned long long u64;
  49. typedef uint32_t UINT32;
  50. typedef uint16_t UINT16;
  51. typedef uint8_t UINT8;
  52. typedef int32_t INT32;
  53. typedef int16_t INT16;
  54. typedef int8_t INT8;
  55. typedef char CHAR;
  56. typedef uint32_t u32;
  57. typedef uint16_t u16;
  58. typedef uint8_t u8;
  59. typedef int32_t s32;
  60. typedef int16_t s16;
  61. typedef int8_t s8;
  62. //typedef s32 ssize_t;
  63. typedef u32 __le32;
  64. typedef UINT32 AARCHPTR;
  65. typedef u32 dma_addr_t;
  66. typedef u32 phys_addr_t;
  67. typedef phys_addr_t resource_size_t;
  68. typedef unsigned gfp_t;
  69. #define le16_to_cpu(x) (x)
  70. #define le32_to_cpu(x) (x)
  71. #define cpu_to_le32(x) (x)
  72. #define ___constant_swab32(x) ((u32)( \
  73. (((u32)(x) & (u32)0x000000ffUL) << 24) | \
  74. (((u32)(x) & (u32)0x0000ff00UL) << 8) | \
  75. (((u32)(x) & (u32)0x00ff0000UL) >> 8) | \
  76. (((u32)(x) & (u32)0xff000000UL) >> 24)))
  77. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
  78. #define DECLARE_BITMAP(name,bits) \
  79. unsigned long name[BITS_TO_LONGS(bits)]
  80. typedef struct {
  81. int counter;
  82. } atomic_t;
  83. typedef struct refcount_struct {
  84. atomic_t refs;
  85. } refcount_t;
  86. #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
  87. struct kref {
  88. refcount_t refcount;
  89. };
  90. #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
  91. #define small_const_nbits(nbits) \
  92. (nbits && (nbits) <= BITS_PER_LONG)
  93. #define reg8_read(addr) *((volatile uint8_t *)(addr))
  94. #define reg16_read(addr) *((volatile uint16_t *)(addr))
  95. #define reg32_read(addr) *((volatile uint32_t *)(addr))
  96. #define reg8_write(addr,val) *((volatile uint8_t *)(addr)) = (val)
  97. #define reg16_write(addr,val) *((volatile uint16_t *)(addr)) = (val)
  98. #define reg32_write(addr,val) *((volatile uint32_t *)(addr)) = (val)
  99. #define mem8_read(addr) *((volatile uint8_t *)(addr))
  100. #define mem16_read(addr) *((volatile uint16_t *)(addr))
  101. #define mem32_read(addr) *((volatile uint32_t *)(addr))
  102. #define mem8_write(addr,val) *((volatile uint8_t *)(addr)) = (val)
  103. #define mem16_write(addr,val) *((volatile uint16_t *)(addr)) = (val)
  104. #define mem32_write(addr,val) *((volatile uint32_t *)(addr)) = (val)
  105. #define readb(a) reg8_read(a)
  106. #define readw(a) reg16_read(a)
  107. #define readl(a) reg32_read(a)
  108. #define writeb(v, a) reg8_write(a, v)
  109. #define writew(v, a) reg16_write(a, v)
  110. #define writel(v, a) reg32_write(a, v)
  111. static __INLINE void memset_s(void *dest, size_t destMax, int c, size_t count)
  112. {
  113. memset(dest, c, destMax);
  114. }
  115. static __INLINE void bitmap_zero(unsigned long *dst, unsigned int nbits)
  116. {
  117. if (small_const_nbits(nbits))
  118. *dst = 0UL;
  119. else {
  120. unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
  121. memset(dst, 0, len);
  122. }
  123. }
  124. /**
  125. * fls - find last (most-significant) bit set
  126. * @x: the word to search
  127. *
  128. * This is defined the same way as ffs.
  129. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  130. */
  131. static __INLINE int fls(u32 x)
  132. {
  133. int r = 32;
  134. if (!x)
  135. return 0;
  136. if (!(x & 0xffff0000u)) {
  137. x <<= 16;
  138. r -= 16;
  139. }
  140. if (!(x & 0xff000000u)) {
  141. x <<= 8;
  142. r -= 8;
  143. }
  144. if (!(x & 0xf0000000u)) {
  145. x <<= 4;
  146. r -= 4;
  147. }
  148. if (!(x & 0xc0000000u)) {
  149. x <<= 2;
  150. r -= 2;
  151. }
  152. if (!(x & 0x80000000u)) {
  153. x <<= 1;
  154. r -= 1;
  155. }
  156. return r;
  157. }
  158. /*
  159. * __fls() returns the bit position of the last bit set, where the
  160. * LSB is 0 and MSB is 31. Zero input is undefined.
  161. */
  162. static __INLINE unsigned long __fls(unsigned long x)
  163. {
  164. return fls(x) - 1;
  165. }
  166. /*
  167. * ffs() returns zero if the input was zero, otherwise returns the bit
  168. * position of the first set bit, where the LSB is 1 and MSB is 32.
  169. */
  170. static __INLINE int ffs(int x)
  171. {
  172. return fls(x & -x);
  173. }
  174. /*
  175. * __ffs() returns the bit position of the first bit set, where the
  176. * LSB is 0 and MSB is 31. Zero input is undefined.
  177. */
  178. static __INLINE unsigned long __ffs(unsigned long x)
  179. {
  180. return ffs(x) - 1;
  181. }
  182. #define MAX_ERRNO 4095
  183. #define IS_ERR_VALUE(x) (x) >= (unsigned long)-MAX_ERRNO
  184. static __INLINE void *ERR_PTR(long error)
  185. {
  186. return (void *) error;
  187. }
  188. static __INLINE long PTR_ERR(const void *ptr)
  189. {
  190. return (long) ptr;
  191. }
  192. static __INLINE long IS_ERR(const void *ptr)
  193. {
  194. return IS_ERR_VALUE((unsigned long)ptr);
  195. }
  196. static __INLINE int IS_ERR_OR_NULL(const void *ptr)
  197. {
  198. return !ptr || IS_ERR_VALUE((unsigned long)ptr);
  199. }
  200. static __INLINE void set_bit(int nr, volatile unsigned long *addr)
  201. {
  202. unsigned long mask = BIT_MASK(nr);
  203. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  204. *p |= mask;
  205. }
  206. static __INLINE void clear_bit(int nr, volatile unsigned long *addr)
  207. {
  208. unsigned long mask = BIT_MASK(nr);
  209. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  210. *p &= ~mask;
  211. }
  212. /**
  213. * test_bit - Determine whether a bit is set
  214. * @nr: bit number to test
  215. * @addr: Address to start counting from
  216. */
  217. static __INLINE int test_bit(int nr, const volatile unsigned long *addr)
  218. {
  219. return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
  220. }
  221. /**
  222. * test_and_set_bit - Set a bit and return its old value
  223. * @nr: Bit to set
  224. * @addr: Address to count from
  225. *
  226. * This operation is atomic and cannot be reordered.
  227. * It may be reordered on other architectures than x86.
  228. * It also implies a memory barrier.
  229. */
  230. static __INLINE int test_and_set_bit(int nr, volatile unsigned long *addr)
  231. {
  232. unsigned long mask = BIT_MASK(nr);
  233. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  234. unsigned long old;
  235. old = *p;
  236. *p = old | mask;
  237. return (old & mask) != 0;
  238. }
  239. static __INLINE int test_and_clear_bit(int nr, volatile unsigned long *addr)
  240. {
  241. unsigned long mask = BIT_MASK(nr);
  242. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  243. unsigned long old;
  244. old = *p;
  245. *p = old & ~mask;
  246. return (old & mask) != 0;
  247. }
  248. /**
  249. * list_splice_init - join two lists and reinitialise the emptied list.
  250. * @list: the new list to add.
  251. * @head: the place to add it in the first list.
  252. *
  253. * The list at @list is reinitialised
  254. */
  255. static __INLINE void list_splice_init(List_t *list, List_t *head)
  256. {
  257. ListItem_t *pxIndex = head->pxIndex;
  258. ListItem_t *first = list->pxIndex->pxNext;
  259. ListItem_t *last = list->pxIndex->pxPrevious;
  260. if (!listLIST_IS_EMPTY(list)) {
  261. first->pxPrevious = pxIndex->pxPrevious;
  262. last->pxNext = pxIndex;
  263. pxIndex->pxPrevious->pxNext = first;
  264. pxIndex->pxPrevious = last;
  265. head->uxNumberOfItems += list->uxNumberOfItems;
  266. vListInitialise(list);
  267. }
  268. }
  269. /**
  270. * list_move - delete from one list and add as another's head
  271. * @list: the entry to move
  272. * @head: the head that will precede our entry
  273. */
  274. static __INLINE void list_move(ListItem_t *item, List_t *list, ListItem_t *pos)
  275. {
  276. void *pvOwner = item->pvOwner;
  277. uxListRemove(item);
  278. item->pvOwner = pvOwner;
  279. item->pxNext = pos->pxNext;
  280. item->pxNext->pxPrevious = item;
  281. item->pxPrevious = pos;
  282. pos->pxNext = item;
  283. /* Remember which list the item is in. This allows fast removal of the
  284. item later. */
  285. item->pxContainer = list;
  286. list->uxNumberOfItems++;
  287. }
  288. #define list_for_each_entry(pxListItem, pvOwner, list) \
  289. for (pxListItem = listGET_HEAD_ENTRY(list), \
  290. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem); \
  291. pxListItem != listGET_END_MARKER(list); \
  292. pxListItem = listGET_NEXT(pxListItem), \
  293. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem))
  294. #define list_entry(pxListItem) listGET_LIST_ITEM_OWNER(pxListItem)
  295. #define list_first_entry(pxList) listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(pxList))
  296. #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
  297. #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
  298. #define SG_MITER_FROM_SG (1 << 2) /* nop */
  299. void *malloc(size_t size);
  300. void free(void *ptr);
  301. void *realloc(void *ptr, size_t size);
  302. #ifdef __cplusplus
  303. }
  304. #endif
  305. #endif