atomic.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /*
  2. * arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef __ASM_ARM_ATOMIC_H
  12. #define __ASM_ARM_ATOMIC_H
  13. #include <linux/compiler.h>
  14. #include <linux/prefetch.h>
  15. #include <linux/types.h>
  16. #include <linux/irqflags.h>
  17. #include <asm/barrier.h>
  18. #include <asm/cmpxchg.h>
  19. #define ATOMIC_INIT(i) { (i) }
  20. #ifdef __KERNEL__
  21. /*
  22. * On ARM, ordinary assignment (str instruction) doesn't clear the local
  23. * strex/ldrex monitor on some implementations. The reason we can use it for
  24. * atomic_set() is the clrex or dummy strex done on every exception return.
  25. */
  26. #define atomic_read(v) READ_ONCE((v)->counter)
  27. #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
  28. #if __LINUX_ARM_ARCH__ >= 6
  29. /*
  30. * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
  31. * store exclusive to ensure that these are atomic. We may loop
  32. * to ensure that the update happens.
  33. */
  34. #define ATOMIC_OP(op, c_op, asm_op) \
  35. static inline void atomic_##op(int i, atomic_t *v) \
  36. { \
  37. unsigned long tmp; \
  38. int result; \
  39. \
  40. prefetchw(&v->counter); \
  41. __asm__ __volatile__("@ atomic_" #op "\n" \
  42. "1: ldrex %0, [%3]\n" \
  43. " " #asm_op " %0, %0, %4\n" \
  44. " strex %1, %0, [%3]\n" \
  45. " teq %1, #0\n" \
  46. " bne 1b" \
  47. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  48. : "r" (&v->counter), "Ir" (i) \
  49. : "cc"); \
  50. } \
  51. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  52. static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
  53. { \
  54. unsigned long tmp; \
  55. int result; \
  56. \
  57. prefetchw(&v->counter); \
  58. \
  59. __asm__ __volatile__("@ atomic_" #op "_return\n" \
  60. "1: ldrex %0, [%3]\n" \
  61. " " #asm_op " %0, %0, %4\n" \
  62. " strex %1, %0, [%3]\n" \
  63. " teq %1, #0\n" \
  64. " bne 1b" \
  65. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  66. : "r" (&v->counter), "Ir" (i) \
  67. : "cc"); \
  68. \
  69. return result; \
  70. }
  71. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  72. static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
  73. { \
  74. unsigned long tmp; \
  75. int result, val; \
  76. \
  77. prefetchw(&v->counter); \
  78. \
  79. __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
  80. "1: ldrex %0, [%4]\n" \
  81. " " #asm_op " %1, %0, %5\n" \
  82. " strex %2, %1, [%4]\n" \
  83. " teq %2, #0\n" \
  84. " bne 1b" \
  85. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
  86. : "r" (&v->counter), "Ir" (i) \
  87. : "cc"); \
  88. \
  89. return result; \
  90. }
  91. #define atomic_add_return_relaxed atomic_add_return_relaxed
  92. #define atomic_sub_return_relaxed atomic_sub_return_relaxed
  93. #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
  94. #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
  95. #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
  96. #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
  97. #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
  98. #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
  99. static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
  100. {
  101. int oldval;
  102. unsigned long res;
  103. prefetchw(&ptr->counter);
  104. do {
  105. __asm__ __volatile__("@ atomic_cmpxchg\n"
  106. "ldrex %1, [%3]\n"
  107. "mov %0, #0\n"
  108. "teq %1, %4\n"
  109. "strexeq %0, %5, [%3]\n"
  110. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  111. : "r" (&ptr->counter), "Ir" (old), "r" (new)
  112. : "cc");
  113. } while (res);
  114. return oldval;
  115. }
  116. #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
  117. static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  118. {
  119. int oldval, newval;
  120. unsigned long tmp;
  121. smp_mb();
  122. prefetchw(&v->counter);
  123. __asm__ __volatile__ ("@ atomic_add_unless\n"
  124. "1: ldrex %0, [%4]\n"
  125. " teq %0, %5\n"
  126. " beq 2f\n"
  127. " add %1, %0, %6\n"
  128. " strex %2, %1, [%4]\n"
  129. " teq %2, #0\n"
  130. " bne 1b\n"
  131. "2:"
  132. : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
  133. : "r" (&v->counter), "r" (u), "r" (a)
  134. : "cc");
  135. if (oldval != u)
  136. smp_mb();
  137. return oldval;
  138. }
  139. #define atomic_fetch_add_unless atomic_fetch_add_unless
  140. #else /* ARM_ARCH_6 */
  141. #ifdef CONFIG_SMP
  142. #error SMP not supported on pre-ARMv6 CPUs
  143. #endif
  144. #define ATOMIC_OP(op, c_op, asm_op) \
  145. static inline void atomic_##op(int i, atomic_t *v) \
  146. { \
  147. unsigned long flags; \
  148. \
  149. raw_local_irq_save(flags); \
  150. v->counter c_op i; \
  151. raw_local_irq_restore(flags); \
  152. } \
  153. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  154. static inline int atomic_##op##_return(int i, atomic_t *v) \
  155. { \
  156. unsigned long flags; \
  157. int val; \
  158. \
  159. raw_local_irq_save(flags); \
  160. v->counter c_op i; \
  161. val = v->counter; \
  162. raw_local_irq_restore(flags); \
  163. \
  164. return val; \
  165. }
  166. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  167. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  168. { \
  169. unsigned long flags; \
  170. int val; \
  171. \
  172. raw_local_irq_save(flags); \
  173. val = v->counter; \
  174. v->counter c_op i; \
  175. raw_local_irq_restore(flags); \
  176. \
  177. return val; \
  178. }
  179. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  180. {
  181. int ret;
  182. unsigned long flags;
  183. raw_local_irq_save(flags);
  184. ret = v->counter;
  185. if (likely(ret == old))
  186. v->counter = new;
  187. raw_local_irq_restore(flags);
  188. return ret;
  189. }
  190. #define atomic_fetch_andnot atomic_fetch_andnot
  191. #endif /* __LINUX_ARM_ARCH__ */
  192. #define ATOMIC_OPS(op, c_op, asm_op) \
  193. ATOMIC_OP(op, c_op, asm_op) \
  194. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  195. ATOMIC_FETCH_OP(op, c_op, asm_op)
  196. ATOMIC_OPS(add, +=, add)
  197. ATOMIC_OPS(sub, -=, sub)
  198. #define atomic_andnot atomic_andnot
  199. #undef ATOMIC_OPS
  200. #define ATOMIC_OPS(op, c_op, asm_op) \
  201. ATOMIC_OP(op, c_op, asm_op) \
  202. ATOMIC_FETCH_OP(op, c_op, asm_op)
  203. ATOMIC_OPS(and, &=, and)
  204. ATOMIC_OPS(andnot, &= ~, bic)
  205. ATOMIC_OPS(or, |=, orr)
  206. ATOMIC_OPS(xor, ^=, eor)
  207. #undef ATOMIC_OPS
  208. #undef ATOMIC_FETCH_OP
  209. #undef ATOMIC_OP_RETURN
  210. #undef ATOMIC_OP
  211. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  212. #ifndef CONFIG_GENERIC_ATOMIC64
  213. typedef struct {
  214. long long counter;
  215. } atomic64_t;
  216. #define ATOMIC64_INIT(i) { (i) }
  217. #ifdef CONFIG_ARM_LPAE
  218. static inline long long atomic64_read(const atomic64_t *v)
  219. {
  220. long long result;
  221. __asm__ __volatile__("@ atomic64_read\n"
  222. " ldrd %0, %H0, [%1]"
  223. : "=&r" (result)
  224. : "r" (&v->counter), "Qo" (v->counter)
  225. );
  226. return result;
  227. }
  228. static inline void atomic64_set(atomic64_t *v, long long i)
  229. {
  230. __asm__ __volatile__("@ atomic64_set\n"
  231. " strd %2, %H2, [%1]"
  232. : "=Qo" (v->counter)
  233. : "r" (&v->counter), "r" (i)
  234. );
  235. }
  236. #else
  237. static inline long long atomic64_read(const atomic64_t *v)
  238. {
  239. long long result;
  240. __asm__ __volatile__("@ atomic64_read\n"
  241. " ldrexd %0, %H0, [%1]"
  242. : "=&r" (result)
  243. : "r" (&v->counter), "Qo" (v->counter)
  244. );
  245. return result;
  246. }
  247. static inline void atomic64_set(atomic64_t *v, long long i)
  248. {
  249. long long tmp;
  250. prefetchw(&v->counter);
  251. __asm__ __volatile__("@ atomic64_set\n"
  252. "1: ldrexd %0, %H0, [%2]\n"
  253. " strexd %0, %3, %H3, [%2]\n"
  254. " teq %0, #0\n"
  255. " bne 1b"
  256. : "=&r" (tmp), "=Qo" (v->counter)
  257. : "r" (&v->counter), "r" (i)
  258. : "cc");
  259. }
  260. #endif
  261. #define ATOMIC64_OP(op, op1, op2) \
  262. static inline void atomic64_##op(long long i, atomic64_t *v) \
  263. { \
  264. long long result; \
  265. unsigned long tmp; \
  266. \
  267. prefetchw(&v->counter); \
  268. __asm__ __volatile__("@ atomic64_" #op "\n" \
  269. "1: ldrexd %0, %H0, [%3]\n" \
  270. " " #op1 " %Q0, %Q0, %Q4\n" \
  271. " " #op2 " %R0, %R0, %R4\n" \
  272. " strexd %1, %0, %H0, [%3]\n" \
  273. " teq %1, #0\n" \
  274. " bne 1b" \
  275. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  276. : "r" (&v->counter), "r" (i) \
  277. : "cc"); \
  278. } \
  279. #define ATOMIC64_OP_RETURN(op, op1, op2) \
  280. static inline long long \
  281. atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
  282. { \
  283. long long result; \
  284. unsigned long tmp; \
  285. \
  286. prefetchw(&v->counter); \
  287. \
  288. __asm__ __volatile__("@ atomic64_" #op "_return\n" \
  289. "1: ldrexd %0, %H0, [%3]\n" \
  290. " " #op1 " %Q0, %Q0, %Q4\n" \
  291. " " #op2 " %R0, %R0, %R4\n" \
  292. " strexd %1, %0, %H0, [%3]\n" \
  293. " teq %1, #0\n" \
  294. " bne 1b" \
  295. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
  296. : "r" (&v->counter), "r" (i) \
  297. : "cc"); \
  298. \
  299. return result; \
  300. }
  301. #define ATOMIC64_FETCH_OP(op, op1, op2) \
  302. static inline long long \
  303. atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
  304. { \
  305. long long result, val; \
  306. unsigned long tmp; \
  307. \
  308. prefetchw(&v->counter); \
  309. \
  310. __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
  311. "1: ldrexd %0, %H0, [%4]\n" \
  312. " " #op1 " %Q1, %Q0, %Q5\n" \
  313. " " #op2 " %R1, %R0, %R5\n" \
  314. " strexd %2, %1, %H1, [%4]\n" \
  315. " teq %2, #0\n" \
  316. " bne 1b" \
  317. : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
  318. : "r" (&v->counter), "r" (i) \
  319. : "cc"); \
  320. \
  321. return result; \
  322. }
  323. #define ATOMIC64_OPS(op, op1, op2) \
  324. ATOMIC64_OP(op, op1, op2) \
  325. ATOMIC64_OP_RETURN(op, op1, op2) \
  326. ATOMIC64_FETCH_OP(op, op1, op2)
  327. ATOMIC64_OPS(add, adds, adc)
  328. ATOMIC64_OPS(sub, subs, sbc)
  329. #define atomic64_add_return_relaxed atomic64_add_return_relaxed
  330. #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
  331. #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
  332. #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
  333. #undef ATOMIC64_OPS
  334. #define ATOMIC64_OPS(op, op1, op2) \
  335. ATOMIC64_OP(op, op1, op2) \
  336. ATOMIC64_FETCH_OP(op, op1, op2)
  337. #define atomic64_andnot atomic64_andnot
  338. ATOMIC64_OPS(and, and, and)
  339. ATOMIC64_OPS(andnot, bic, bic)
  340. ATOMIC64_OPS(or, orr, orr)
  341. ATOMIC64_OPS(xor, eor, eor)
  342. #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
  343. #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
  344. #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
  345. #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
  346. #undef ATOMIC64_OPS
  347. #undef ATOMIC64_FETCH_OP
  348. #undef ATOMIC64_OP_RETURN
  349. #undef ATOMIC64_OP
  350. static inline long long
  351. atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
  352. {
  353. long long oldval;
  354. unsigned long res;
  355. prefetchw(&ptr->counter);
  356. do {
  357. __asm__ __volatile__("@ atomic64_cmpxchg\n"
  358. "ldrexd %1, %H1, [%3]\n"
  359. "mov %0, #0\n"
  360. "teq %1, %4\n"
  361. "teqeq %H1, %H4\n"
  362. "strexdeq %0, %5, %H5, [%3]"
  363. : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  364. : "r" (&ptr->counter), "r" (old), "r" (new)
  365. : "cc");
  366. } while (res);
  367. return oldval;
  368. }
  369. #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
  370. static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
  371. {
  372. long long result;
  373. unsigned long tmp;
  374. prefetchw(&ptr->counter);
  375. __asm__ __volatile__("@ atomic64_xchg\n"
  376. "1: ldrexd %0, %H0, [%3]\n"
  377. " strexd %1, %4, %H4, [%3]\n"
  378. " teq %1, #0\n"
  379. " bne 1b"
  380. : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
  381. : "r" (&ptr->counter), "r" (new)
  382. : "cc");
  383. return result;
  384. }
  385. #define atomic64_xchg_relaxed atomic64_xchg_relaxed
  386. static inline long long atomic64_dec_if_positive(atomic64_t *v)
  387. {
  388. long long result;
  389. unsigned long tmp;
  390. smp_mb();
  391. prefetchw(&v->counter);
  392. __asm__ __volatile__("@ atomic64_dec_if_positive\n"
  393. "1: ldrexd %0, %H0, [%3]\n"
  394. " subs %Q0, %Q0, #1\n"
  395. " sbc %R0, %R0, #0\n"
  396. " teq %R0, #0\n"
  397. " bmi 2f\n"
  398. " strexd %1, %0, %H0, [%3]\n"
  399. " teq %1, #0\n"
  400. " bne 1b\n"
  401. "2:"
  402. : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  403. : "r" (&v->counter)
  404. : "cc");
  405. smp_mb();
  406. return result;
  407. }
  408. #define atomic64_dec_if_positive atomic64_dec_if_positive
  409. static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
  410. long long u)
  411. {
  412. long long oldval, newval;
  413. unsigned long tmp;
  414. smp_mb();
  415. prefetchw(&v->counter);
  416. __asm__ __volatile__("@ atomic64_add_unless\n"
  417. "1: ldrexd %0, %H0, [%4]\n"
  418. " teq %0, %5\n"
  419. " teqeq %H0, %H5\n"
  420. " beq 2f\n"
  421. " adds %Q1, %Q0, %Q6\n"
  422. " adc %R1, %R0, %R6\n"
  423. " strexd %2, %1, %H1, [%4]\n"
  424. " teq %2, #0\n"
  425. " bne 1b\n"
  426. "2:"
  427. : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
  428. : "r" (&v->counter), "r" (u), "r" (a)
  429. : "cc");
  430. if (oldval != u)
  431. smp_mb();
  432. return oldval;
  433. }
  434. #define atomic64_fetch_add_unless atomic64_fetch_add_unless
  435. #endif /* !CONFIG_GENERIC_ATOMIC64 */
  436. #endif
  437. #endif