usb_os_adapter.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. #ifndef _USB_OS_ADAPTER_H
  2. #define _USB_OS_ADAPTER_H
  3. #include "os_adapt.h"
  4. #ifdef __cplusplus
  5. extern "C" {
  6. #endif
  7. #define pr_err(...) TRACE_ERROR(__VA_ARGS__)
  8. #define WARN_ON_ONCE(condition) WARN_ON(condition)
  9. #define USB_UNUSED(x) ((void) x)
  10. typedef INT8 __s8;
  11. typedef UINT8 __u8;
  12. typedef INT16 __s16;
  13. typedef UINT16 __u16;
  14. typedef INT32 __s32;
  15. typedef UINT32 __u32;
  16. typedef long long __s64;
  17. typedef unsigned long long __u64;
  18. typedef __u16 __le16;
  19. typedef __u16 __be16;
  20. //typedef __u32 __le32;
  21. typedef __u32 __be32;
  22. typedef __u64 __le64;
  23. typedef __u64 __be64;
  24. typedef unsigned long ulong;
  25. typedef __u32 dev_t;
  26. #define __cpu_to_le16(x) (x)
  27. #define cpu_to_le16(x) (x)
  28. #define __le16_to_cpu le16_to_cpu
  29. #define get_unaligned(x) (*x)
  30. void iowrite32_rep(u32 addr, const void *buffer, unsigned int count);
  31. void ioread32_rep(u32 addr, void *buffer, unsigned int count);
  32. unsigned long bitmap_find_next_zero_area(unsigned long *map,
  33. unsigned long size,
  34. unsigned long start,
  35. unsigned int nr,
  36. unsigned long align_mask);
  37. void bitmap_set(unsigned long *map, unsigned int start, int len);
  38. void bitmap_clear(unsigned long *map, unsigned int start, int len);
  39. /**
  40. * list_move_tail - delete from one list and add as another's tail
  41. * @list: the entry to move
  42. * @head: the head that will follow our entry
  43. */
  44. static inline void list_move_tail(ListItem_t *item, List_t *list)
  45. {
  46. if (!listIS_CONTAINED_WITHIN(NULL, item))
  47. uxListRemove(item);
  48. vListInsertEnd(list, item);
  49. }
  50. static inline void list_del_init(ListItem_t *item)
  51. {
  52. if (!listIS_CONTAINED_WITHIN(NULL, item))//maybe item has removed from list
  53. uxListRemove(item);
  54. //vListInitialiseItem(item);
  55. }
  56. /**
  57. * list_add_tail - add a new entry
  58. * @new: new entry to be added
  59. * @head: list head to add it before
  60. *
  61. * Insert a new entry before the specified head.
  62. * This is useful for implementing queues.
  63. */
  64. static inline void list_add_tail(ListItem_t *item, List_t *list)
  65. {
  66. vListInsertEnd(list, item);
  67. }
  68. static inline void INIT_LIST_HEAD(List_t *list)
  69. {
  70. vListInitialise(list);
  71. }
  72. static inline void INIT_LIST_ITEM(ListItem_t *item)
  73. {
  74. vListInitialiseItem(item);
  75. }
  76. #define list_for_each_entry_safe(pxListItem, nListItem, pvOwner, list) \
  77. for (pxListItem = listGET_HEAD_ENTRY(list), \
  78. nListItem = listGET_NEXT(pxListItem), \
  79. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem); \
  80. pxListItem != listGET_END_MARKER(list); \
  81. pxListItem = nListItem, \
  82. nListItem = listGET_NEXT(pxListItem), \
  83. pvOwner = listGET_LIST_ITEM_OWNER(pxListItem))
  84. #define list_for_each_safe(pxListItem, nListItem, list) \
  85. for (pxListItem = listGET_HEAD_ENTRY(list), \
  86. nListItem = listGET_NEXT(pxListItem); \
  87. pxListItem != listGET_END_MARKER(list); \
  88. pxListItem = nListItem, \
  89. nListItem = listGET_NEXT(pxListItem))
  90. #define list_del(pxListItem) uxListRemove(pxListItem)
  91. #define list_empty(pxList) listLIST_IS_EMPTY(pxList)
  92. #define list_item_empty(pxListItem) ((pxListItem)->pxContainer == NULL)
  93. #define __ARG_PLACEHOLDER_1 0,
  94. #define config_enabled(cfg) _config_enabled(cfg)
  95. #define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
  96. #define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
  97. #define ___config_enabled(__ignored, val, ...) val
  98. /*
  99. * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
  100. * 0 otherwise.
  101. *
  102. */
  103. #define IS_ENABLED(option) \
  104. (config_enabled(option) || config_enabled(option##_MODULE))
  105. struct timer_list
  106. {
  107. int a;
  108. };
  109. struct unused {
  110. int a;
  111. };
  112. typedef struct unused unused_t;
  113. #define task_pid_nr(x) 0
  114. #define set_freezable(...) do { } while (0)
  115. #define try_to_freeze(...) 0
  116. #define set_current_state(...) do { } while (0)
  117. #define kthread_should_stop(...) 0
  118. #define schedule() do { } while (0)
  119. #define setup_timer(timer, func, data) do {} while (0)
  120. #define del_timer_sync(timer) do {} while (0)
  121. #define schedule_work(work) do {} while (0)
  122. #define INIT_WORK(work, fun) do {} while (0)
  123. #define local_irq_save(flag) do {(void)flag;} while (0)
  124. #define local_irq_restore(flag) do {(void)flag;} while (0)
  125. //#define local_irq_save(flag) do {portENTER_CRITICAL(); (void)flag;} while(0)
  126. //#define local_irq_restore(flag) do {portEXIT_CRITICAL(); (void)flag;} while(0)
  127. struct work_struct {
  128. int a;
  129. };
  130. struct kmem_cache {
  131. int sz;
  132. };
  133. typedef int wait_queue_head_t;
  134. typedef struct {
  135. volatile unsigned int slock;
  136. } spinlock_t;
  137. static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
  138. {
  139. //TODO
  140. unsigned volatile long y;
  141. switch(size){
  142. case 1:
  143. y = (*(char *)ptr) & 0x000000ff;
  144. *((char *)ptr) = (char)x;
  145. break;
  146. case 2:
  147. y = (*(short *)ptr) & 0x0000ffff;
  148. *((short *)ptr) = (short)x;
  149. break;
  150. default: // 4
  151. y = (*(unsigned long *)ptr) & 0xffffffff;
  152. *((unsigned long *)ptr) = x;
  153. break;
  154. }
  155. return y;
  156. }
  157. #define ARCH_SPIN_LOCK_UNLOCKED 1
  158. #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
  159. #define arch_spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
  160. #define xchg(ptr,v) ((unsigned int)__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
  161. #define __xg(x) ((volatile long *)(x))
  162. static inline void _raw_spin_unlock(spinlock_t *lock)
  163. {
  164. xchg(&lock->slock, 1);
  165. }
  166. static inline int _raw_spin_trylock(spinlock_t *lock)
  167. {
  168. return xchg(&lock->slock, 0) != 0 ? 1 : 0;
  169. }
  170. static inline void _raw_spin_lock(spinlock_t *lock)
  171. {
  172. volatile int was_locked;
  173. do {
  174. was_locked = xchg(&lock->slock, 0) == 0 ? 1 : 0;
  175. } while(was_locked);
  176. }
  177. #define SPINLOCK_MAGIC 0xdead4ead
  178. #define SPIN_LOCK_UNLOCKED ARCH_SPIN_LOCK_UNLOCKED
  179. #define spin_lock_init(x) do { (x)->slock = SPIN_LOCK_UNLOCKED; } while(0)
  180. #define spin_is_locked(x) arch_spin_is_locked(x)
  181. #define spin_unlock_wait(x) arch_spin_unlock_wait(x)
  182. #define _spin_trylock(lock) ({_raw_spin_trylock(lock) ? \
  183. 1 : ({ 0;});})
  184. #define _spin_lock(lock) \
  185. do { \
  186. _raw_spin_lock(lock); \
  187. } while(0)
  188. #define _spin_unlock(lock) \
  189. do { \
  190. _raw_spin_unlock(lock); \
  191. } while (0)
  192. #define spin_lock(lock) _spin_lock(lock)
  193. #define spin_unlock(lock) _spin_unlock(lock)
  194. #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
  195. #define spin_lock_irqsave(lock, flags) \
  196. do { \
  197. local_irq_save(flags); \
  198. spin_lock(lock); \
  199. } while (0)
  200. #define spin_unlock_irqrestore(lock, flags) \
  201. do { \
  202. spin_unlock(lock); \
  203. local_irq_restore(flags); \
  204. } while (0)
  205. #define assert_spin_locked(lock) do {} while (0)
  206. #define irqreturn_t int
  207. #define IRQ_NONE 0
  208. #define IRQ_HANDLED 1
  209. #define IRQ_WAKE_THREAD 2
  210. #define GFP_ATOMIC ((gfp_t) 0)
  211. #define GFP_KERNEL ((gfp_t) 0)
  212. #define GFP_NOFS ((gfp_t) 0)
  213. #define GFP_USER ((gfp_t) 0)
  214. #define __GFP_NOWARN ((gfp_t) 0)
  215. #define __GFP_ZERO ((gfp_t)0x8000u)
  216. #define UINT_MAX (~0U)
  217. void *kmem_cache_alloc(struct kmem_cache *obj, int flag);
  218. void kmem_cache_free(struct kmem_cache *cachep, void *obj);
  219. void kmem_cache_destroy(struct kmem_cache *cachep);
  220. void *kcalloc(size_t n, size_t size, gfp_t flags);
  221. void *kmalloc(size_t size, int flags);
  222. void *kzalloc(size_t size, gfp_t flags);
  223. void kfree(void* addr);
  224. struct device;
  225. void *devm_kzalloc(struct device *dev, size_t size, gfp_t flags);
  226. struct kmem_cache *get_mem(int element_sz);
  227. #define kmem_cache_create(a, sz, c, d, e) get_mem(sz)
  228. #define min_t(type, x, y) (x < y ? x: y)
  229. #define max_t(type, x, y) (x > y ? x: y)
  230. #define msleep mdelay
  231. #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
  232. #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
  233. #define __round_mask(x, y) ((unsigned long)((y)-1))
  234. #define round_down(x, y) ((x) & ~(__round_mask((x), (y))))
  235. #define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
  236. //#define min(x,y) ((x) < (y) ? x : y)
  237. #define max(x,y) ((x) > (y) ? x : y)
  238. #define min3(x, y, z) min(min(x, y), z)
  239. #define max3(x, y, z) max(max(x, y), z)
  240. #define ROUND(a,b) (((a) + (b) - 1) & ~((b) - 1))
  241. #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
  242. #define ALIGN(x,a) __ALIGN_MASK((x),(uintptr_t)(a)-1)
  243. //#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
  244. //typedef unsigned long uintptr_t;
  245. #define PAD_COUNT(s, pad) (((s) - 1) / (pad) + 1)
  246. #define PAD_SIZE(s, pad) (PAD_COUNT(s, pad) * pad)
  247. #define ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, pad) \
  248. char __##name[ROUND(PAD_SIZE((size) * sizeof(type), pad), align) \
  249. + (align - 1)]; \
  250. \
  251. type *name = (type *)ALIGN((uintptr_t)__##name, align)
  252. #define ALLOC_ALIGN_BUFFER(type, name, size, align) \
  253. ALLOC_ALIGN_BUFFER_PAD(type, name, size, align, 1)
  254. #define ALLOC_CACHE_ALIGN_BUFFER_PAD(type, name, size, pad) \
  255. ALLOC_ALIGN_BUFFER_PAD(type, name, size, ARCH_DMA_MINALIGN, pad)
  256. #define ALLOC_CACHE_ALIGN_BUFFER(type, name, size) \
  257. ALLOC_ALIGN_BUFFER(type, name, size, ARCH_DMA_MINALIGN)
  258. #define be32_to_cpu(x) ((uint32_t)( \
  259. (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
  260. (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
  261. (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
  262. (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
  263. #define LOG2(x) (((x & 0xaaaaaaaa) ? 1 : 0) + ((x & 0xcccccccc) ? 2 : 0) + \
  264. ((x & 0xf0f0f0f0) ? 4 : 0) + ((x & 0xff00ff00) ? 8 : 0) + \
  265. ((x & 0xffff0000) ? 16 : 0))
  266. #ifdef __cplusplus
  267. }
  268. #endif
  269. #endif