bitops.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _PARISC_BITOPS_H
  3. #define _PARISC_BITOPS_H
  4. #ifndef _LINUX_BITOPS_H
  5. #error only <linux/bitops.h> can be included directly
  6. #endif
  7. #include <linux/compiler.h>
  8. #include <asm/types.h>
  9. #include <asm/byteorder.h>
  10. #include <asm/barrier.h>
  11. #include <linux/atomic.h>
  12. /*
  13. * HP-PARISC specific bit operations
  14. * for a detailed description of the functions please refer
  15. * to include/asm-i386/bitops.h or kerneldoc
  16. */
  17. #if __BITS_PER_LONG == 64
  18. #define SHIFT_PER_LONG 6
  19. #else
  20. #define SHIFT_PER_LONG 5
  21. #endif
  22. #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
  23. /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
  24. * on use of volatile and __*_bit() (set/clear/change):
  25. * *_bit() want use of volatile.
  26. * __*_bit() are "relaxed" and don't use spinlock or volatile.
  27. */
  28. static __inline__ void set_bit(int nr, volatile unsigned long * addr)
  29. {
  30. unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  31. unsigned long flags;
  32. addr += (nr >> SHIFT_PER_LONG);
  33. _atomic_spin_lock_irqsave(addr, flags);
  34. *addr |= mask;
  35. _atomic_spin_unlock_irqrestore(addr, flags);
  36. }
  37. static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
  38. {
  39. unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
  40. unsigned long flags;
  41. addr += (nr >> SHIFT_PER_LONG);
  42. _atomic_spin_lock_irqsave(addr, flags);
  43. *addr &= mask;
  44. _atomic_spin_unlock_irqrestore(addr, flags);
  45. }
  46. static __inline__ void change_bit(int nr, volatile unsigned long * addr)
  47. {
  48. unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  49. unsigned long flags;
  50. addr += (nr >> SHIFT_PER_LONG);
  51. _atomic_spin_lock_irqsave(addr, flags);
  52. *addr ^= mask;
  53. _atomic_spin_unlock_irqrestore(addr, flags);
  54. }
  55. static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
  56. {
  57. unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  58. unsigned long old;
  59. unsigned long flags;
  60. int set;
  61. addr += (nr >> SHIFT_PER_LONG);
  62. _atomic_spin_lock_irqsave(addr, flags);
  63. old = *addr;
  64. set = (old & mask) ? 1 : 0;
  65. if (!set)
  66. *addr = old | mask;
  67. _atomic_spin_unlock_irqrestore(addr, flags);
  68. return set;
  69. }
  70. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
  71. {
  72. unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  73. unsigned long old;
  74. unsigned long flags;
  75. int set;
  76. addr += (nr >> SHIFT_PER_LONG);
  77. _atomic_spin_lock_irqsave(addr, flags);
  78. old = *addr;
  79. set = (old & mask) ? 1 : 0;
  80. if (set)
  81. *addr = old & ~mask;
  82. _atomic_spin_unlock_irqrestore(addr, flags);
  83. return set;
  84. }
  85. static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
  86. {
  87. unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
  88. unsigned long oldbit;
  89. unsigned long flags;
  90. addr += (nr >> SHIFT_PER_LONG);
  91. _atomic_spin_lock_irqsave(addr, flags);
  92. oldbit = *addr;
  93. *addr = oldbit ^ mask;
  94. _atomic_spin_unlock_irqrestore(addr, flags);
  95. return (oldbit & mask) ? 1 : 0;
  96. }
  97. #include <asm-generic/bitops/non-atomic.h>
  98. #ifdef __KERNEL__
  99. /**
  100. * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
  101. * @word: The word to search
  102. *
  103. * __ffs() return is undefined if no bit is set.
  104. *
  105. * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
  106. * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
  107. * (with help from willy/jejb to get the semantics right)
  108. *
  109. * This algorithm avoids branches by making use of nullification.
  110. * One side effect of "extr" instructions is it sets PSW[N] bit.
  111. * How PSW[N] (nullify next insn) gets set is determined by the
  112. * "condition" field (eg "<>" or "TR" below) in the extr* insn.
  113. * Only the 1st and one of either the 2cd or 3rd insn will get executed.
  114. * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
  115. * cycles for each mispredicted branch.
  116. */
  117. static __inline__ unsigned long __ffs(unsigned long x)
  118. {
  119. unsigned long ret;
  120. __asm__(
  121. #ifdef CONFIG_64BIT
  122. " ldi 63,%1\n"
  123. " extrd,u,*<> %0,63,32,%%r0\n"
  124. " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
  125. " addi -32,%1,%1\n"
  126. #else
  127. " ldi 31,%1\n"
  128. #endif
  129. " extru,<> %0,31,16,%%r0\n"
  130. " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
  131. " addi -16,%1,%1\n"
  132. " extru,<> %0,31,8,%%r0\n"
  133. " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
  134. " addi -8,%1,%1\n"
  135. " extru,<> %0,31,4,%%r0\n"
  136. " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
  137. " addi -4,%1,%1\n"
  138. " extru,<> %0,31,2,%%r0\n"
  139. " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
  140. " addi -2,%1,%1\n"
  141. " extru,= %0,31,1,%%r0\n" /* check last bit */
  142. " addi -1,%1,%1\n"
  143. : "+r" (x), "=r" (ret) );
  144. return ret;
  145. }
  146. #include <asm-generic/bitops/ffz.h>
  147. /*
  148. * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
  149. * This is defined the same way as the libc and compiler builtin
  150. * ffs routines, therefore differs in spirit from the above ffz (man ffs).
  151. */
  152. static __inline__ int ffs(int x)
  153. {
  154. return x ? (__ffs((unsigned long)x) + 1) : 0;
  155. }
  156. /*
  157. * fls: find last (most significant) bit set.
  158. * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  159. */
  160. static __inline__ int fls(int x)
  161. {
  162. int ret;
  163. if (!x)
  164. return 0;
  165. __asm__(
  166. " ldi 1,%1\n"
  167. " extru,<> %0,15,16,%%r0\n"
  168. " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
  169. " addi 16,%1,%1\n"
  170. " extru,<> %0,7,8,%%r0\n"
  171. " zdep,TR %0,23,24,%0\n" /* xx000000 */
  172. " addi 8,%1,%1\n"
  173. " extru,<> %0,3,4,%%r0\n"
  174. " zdep,TR %0,27,28,%0\n" /* x0000000 */
  175. " addi 4,%1,%1\n"
  176. " extru,<> %0,1,2,%%r0\n"
  177. " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
  178. " addi 2,%1,%1\n"
  179. " extru,= %0,0,1,%%r0\n"
  180. " addi 1,%1,%1\n" /* if y & 8, add 1 */
  181. : "+r" (x), "=r" (ret) );
  182. return ret;
  183. }
  184. #include <asm-generic/bitops/__fls.h>
  185. #include <asm-generic/bitops/fls64.h>
  186. #include <asm-generic/bitops/hweight.h>
  187. #include <asm-generic/bitops/lock.h>
  188. #include <asm-generic/bitops/sched.h>
  189. #endif /* __KERNEL__ */
  190. #include <asm-generic/bitops/find.h>
  191. #ifdef __KERNEL__
  192. #include <asm-generic/bitops/le.h>
  193. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  194. #endif /* __KERNEL__ */
  195. #endif /* _PARISC_BITOPS_H */