os_adapt.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. #ifndef _OS_ADAPT_H
  2. #define _OS_ADAPT_H
  3. #include <string.h>
  4. #include "FreeRTOS.h"
  5. #include "list.h"
  6. #ifdef __cplusplus
  7. extern "C" {
  8. #endif
  9. #define __INLINE inline
  10. #define PAGE_SIZE 4096
  11. #define ARCH_DMA_MINALIGN 64
  12. #define USEC_PER_MSEC 1000
  13. #define MSEC_PER_SEC 1000
  14. #define BUG() printf("bug on %s %d.\n", __func__, __LINE__);
  15. #define BUG_ON(condition) if (condition) BUG()
  16. #define WARN_ON(condition) if (condition) BUG()
  17. #define barrier()
  18. #define wmb()
  19. #define EXPORT_SYMBOL(x)
  20. #define dev_dbg(dev, ...) TRACE_DEBUG(__VA_ARGS__)
  21. #define dev_vdbg dev_dbg
  22. #define dev_info(dev, ...) TRACE_INFO(__VA_ARGS__)
  23. #define dev_warn(dev, ...) TRACE_WARNING(__VA_ARGS__)
  24. #define dev_err(dev, ...) TRACE_ERROR(__VA_ARGS__)
  25. #define __iomem volatile
  26. #define unlikely(x) (x)
  27. #define likely(x) (x)
  28. #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
  29. #define DIV_ROUND_UP_ULL(ll, d) (((unsigned long long)(ll) + (d) - 1) / (d))
  30. #define BITS_PER_BYTE 8
  31. #define BITS_PER_LONG 32
  32. #ifndef BIT
  33. #define BIT(nr) (1UL << (nr))
  34. #endif
  35. #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  36. #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
  37. #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
  38. #define min(x,y) ((x)<(y)?(x):(y))
  39. #ifndef assert
  40. #define assert(...)
  41. #endif
  42. /*
  43. * Create a contiguous bitmask starting at bit position @l and ending at
  44. * position @h. */
  45. #define GENMASK(h, l) \
  46. (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
  47. #define VOID void
  48. #define BOOL int
  49. #define TRUE 1
  50. #define FALSE 0
  51. #define FAR
  52. #define NEAR
  53. typedef unsigned long long u64;
  54. typedef uint32_t UINT32;
  55. typedef uint16_t UINT16;
  56. typedef uint8_t UINT8;
  57. typedef int32_t INT32;
  58. typedef int16_t INT16;
  59. typedef int8_t INT8;
  60. typedef char CHAR;
  61. typedef uint32_t u32;
  62. typedef uint16_t u16;
  63. typedef uint8_t u8;
  64. typedef int32_t s32;
  65. typedef int16_t s16;
  66. typedef int8_t s8;
  67. //typedef s32 ssize_t;
  68. typedef u32 __le32;
  69. typedef UINT32 AARCHPTR;
  70. typedef u32 dma_addr_t;
  71. typedef u32 phys_addr_t;
  72. typedef phys_addr_t resource_size_t;
  73. typedef unsigned gfp_t;
  74. #define le16_to_cpu(x) (x)
  75. #define le32_to_cpu(x) (x)
  76. #define cpu_to_le32(x) (x)
  77. #define ___constant_swab32(x) ((u32)( \
  78. (((u32)(x) & (u32)0x000000ffUL) << 24) | \
  79. (((u32)(x) & (u32)0x0000ff00UL) << 8) | \
  80. (((u32)(x) & (u32)0x00ff0000UL) >> 8) | \
  81. (((u32)(x) & (u32)0xff000000UL) >> 24)))
  82. #ifndef ARRAY_SIZE
  83. #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
  84. #endif
  85. #define DECLARE_BITMAP(name,bits) \
  86. unsigned long name[BITS_TO_LONGS(bits)]
  87. typedef struct {
  88. int counter;
  89. } atomic_t;
  90. typedef struct refcount_struct {
  91. atomic_t refs;
  92. } refcount_t;
  93. #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
  94. struct kref {
  95. refcount_t refcount;
  96. };
  97. struct timeval {
  98. long tv_sec;
  99. long tv_usec;
  100. };
  101. #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), }
  102. #define small_const_nbits(nbits) \
  103. (nbits && (nbits) <= BITS_PER_LONG)
  104. #define reg8_read(addr) *((volatile uint8_t *)(addr))
  105. #define reg16_read(addr) *((volatile uint16_t *)(addr))
  106. #define reg32_read(addr) *((volatile uint32_t *)(addr))
  107. #define reg8_write(addr,val) *((volatile uint8_t *)(addr)) = (val)
  108. #define reg16_write(addr,val) *((volatile uint16_t *)(addr)) = (val)
  109. #define reg32_write(addr,val) *((volatile uint32_t *)(addr)) = (val)
  110. #define mem8_read(addr) *((volatile uint8_t *)(addr))
  111. #define mem16_read(addr) *((volatile uint16_t *)(addr))
  112. #define mem32_read(addr) *((volatile uint32_t *)(addr))
  113. #define mem8_write(addr,val) *((volatile uint8_t *)(addr)) = (val)
  114. #define mem16_write(addr,val) *((volatile uint16_t *)(addr)) = (val)
  115. #define mem32_write(addr,val) *((volatile uint32_t *)(addr)) = (val)
  116. #define readb(a) reg8_read(a)
  117. #define readw(a) reg16_read(a)
  118. #define readl(a) reg32_read(a)
  119. #define writeb(v, a) reg8_write(a, v)
  120. #define writew(v, a) reg16_write(a, v)
  121. #define writel(v, a) reg32_write(a, v)
  122. static __INLINE void memset_s(void *dest, size_t destMax, int c, size_t count)
  123. {
  124. memset(dest, c, destMax);
  125. }
  126. static __INLINE void bitmap_zero(unsigned long *dst, unsigned int nbits)
  127. {
  128. if (small_const_nbits(nbits))
  129. *dst = 0UL;
  130. else {
  131. unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
  132. memset(dst, 0, len);
  133. }
  134. }
  135. /**
  136. * fls - find last (most-significant) bit set
  137. * @x: the word to search
  138. *
  139. * This is defined the same way as ffs.
  140. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  141. */
  142. static __INLINE int fls(u32 x)
  143. {
  144. int r = 32;
  145. if (!x)
  146. return 0;
  147. if (!(x & 0xffff0000u)) {
  148. x <<= 16;
  149. r -= 16;
  150. }
  151. if (!(x & 0xff000000u)) {
  152. x <<= 8;
  153. r -= 8;
  154. }
  155. if (!(x & 0xf0000000u)) {
  156. x <<= 4;
  157. r -= 4;
  158. }
  159. if (!(x & 0xc0000000u)) {
  160. x <<= 2;
  161. r -= 2;
  162. }
  163. if (!(x & 0x80000000u)) {
  164. x <<= 1;
  165. r -= 1;
  166. }
  167. return r;
  168. }
  169. /*
  170. * __fls() returns the bit position of the last bit set, where the
  171. * LSB is 0 and MSB is 31. Zero input is undefined.
  172. */
  173. static __INLINE unsigned long __fls(unsigned long x)
  174. {
  175. return fls(x) - 1;
  176. }
  177. /*
  178. * ffs() returns zero if the input was zero, otherwise returns the bit
  179. * position of the first set bit, where the LSB is 1 and MSB is 32.
  180. */
  181. static __INLINE int ffs(int x)
  182. {
  183. return fls(x & -x);
  184. }
  185. /*
  186. * __ffs() returns the bit position of the first bit set, where the
  187. * LSB is 0 and MSB is 31. Zero input is undefined.
  188. */
  189. static __INLINE unsigned long __ffs(unsigned long x)
  190. {
  191. return ffs(x) - 1;
  192. }
  193. #define MAX_ERRNO 4095
  194. #define IS_ERR_VALUE(x) (x) >= (unsigned long)-MAX_ERRNO
  195. static __INLINE void *ERR_PTR(long error)
  196. {
  197. return (void *) error;
  198. }
  199. static __INLINE long PTR_ERR(const void *ptr)
  200. {
  201. return (long) ptr;
  202. }
  203. static __INLINE long IS_ERR(const void *ptr)
  204. {
  205. return IS_ERR_VALUE((unsigned long)ptr);
  206. }
  207. static __INLINE int IS_ERR_OR_NULL(const void *ptr)
  208. {
  209. return !ptr || IS_ERR_VALUE((unsigned long)ptr);
  210. }
  211. static __INLINE void set_bit(int nr, volatile unsigned long *addr)
  212. {
  213. unsigned long mask = BIT_MASK(nr);
  214. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  215. *p |= mask;
  216. }
  217. static __INLINE void clear_bit(int nr, volatile unsigned long *addr)
  218. {
  219. unsigned long mask = BIT_MASK(nr);
  220. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  221. *p &= ~mask;
  222. }
  223. /**
  224. * test_bit - Determine whether a bit is set
  225. * @nr: bit number to test
  226. * @addr: Address to start counting from
  227. */
  228. static __INLINE int test_bit(int nr, const volatile unsigned long *addr)
  229. {
  230. return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
  231. }
  232. /**
  233. * test_and_set_bit - Set a bit and return its old value
  234. * @nr: Bit to set
  235. * @addr: Address to count from
  236. *
  237. * This operation is atomic and cannot be reordered.
  238. * It may be reordered on other architectures than x86.
  239. * It also implies a memory barrier.
  240. */
  241. static __INLINE int test_and_set_bit(int nr, volatile unsigned long *addr)
  242. {
  243. unsigned long mask = BIT_MASK(nr);
  244. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  245. unsigned long old;
  246. old = *p;
  247. *p = old | mask;
  248. return (old & mask) != 0;
  249. }
  250. static __INLINE int test_and_clear_bit(int nr, volatile unsigned long *addr)
  251. {
  252. unsigned long mask = BIT_MASK(nr);
  253. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  254. unsigned long old;
  255. old = *p;
  256. *p = old & ~mask;
  257. return (old & mask) != 0;
  258. }
  259. /**
  260. * list_splice_init - join two lists and reinitialise the emptied list.
  261. * @list: the new list to add.
  262. * @head: the place to add it in the first list.
  263. *
  264. * The list at @list is reinitialised
  265. */
  266. static __INLINE void list_splice_init(List_t *list, List_t *head)
  267. {
  268. ListItem_t *pxIndex = head->pxIndex;
  269. ListItem_t *first = list->pxIndex->pxNext;
  270. ListItem_t *last = list->pxIndex->pxPrevious;
  271. if (!listLIST_IS_EMPTY(list)) {
  272. first->pxPrevious = pxIndex->pxPrevious;
  273. last->pxNext = pxIndex;
  274. pxIndex->pxPrevious->pxNext = first;
  275. pxIndex->pxPrevious = last;
  276. head->uxNumberOfItems += list->uxNumberOfItems;
  277. vListInitialise(list);
  278. }
  279. }
  280. /**
  281. * list_move - delete from one list and add as another's head
  282. * @list: the entry to move
  283. * @head: the head that will precede our entry
  284. */
  285. static __INLINE void list_move(ListItem_t *item, List_t *list, ListItem_t *pos)
  286. {
  287. void *pvOwner = item->pvOwner;
  288. uxListRemove(item);
  289. item->pvOwner = pvOwner;
  290. item->pxNext = pos->pxNext;
  291. item->pxNext->pxPrevious = item;
  292. item->pxPrevious = pos;
  293. pos->pxNext = item;
  294. /* Remember which list the item is in. This allows fast removal of the
  295. item later. */
  296. item->pxContainer = list;
  297. list->uxNumberOfItems++;
  298. }
  299. #define list_for_each_entry(pxListItem, pvOwner, list) \
  300. for (pxListItem = listGET_HEAD_ENTRY(list), \
  301. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem); \
  302. pxListItem != listGET_END_MARKER(list); \
  303. pxListItem = listGET_NEXT(pxListItem), \
  304. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem))
  305. #define list_entry(pxListItem) listGET_LIST_ITEM_OWNER(pxListItem)
  306. #define list_first_entry(pxList) listGET_LIST_ITEM_OWNER(listGET_HEAD_ENTRY(pxList))
  307. #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
  308. #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
  309. #define SG_MITER_FROM_SG (1 << 2) /* nop */
  310. void *malloc(size_t size);
  311. void free(void *ptr);
  312. void *realloc(void *ptr, size_t size);
  313. #ifdef __cplusplus
  314. }
  315. #endif
  316. #endif