msr-smp.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/export.h>
  3. #include <linux/preempt.h>
  4. #include <linux/smp.h>
  5. #include <linux/completion.h>
  6. #include <asm/msr.h>
  7. static void __rdmsr_on_cpu(void *info)
  8. {
  9. struct msr_info *rv = info;
  10. struct msr *reg;
  11. if (rv->msrs)
  12. reg = this_cpu_ptr(rv->msrs);
  13. else
  14. reg = &rv->reg;
  15. rdmsr(rv->msr_no, reg->l, reg->h);
  16. }
  17. static void __wrmsr_on_cpu(void *info)
  18. {
  19. struct msr_info *rv = info;
  20. struct msr *reg;
  21. if (rv->msrs)
  22. reg = this_cpu_ptr(rv->msrs);
  23. else
  24. reg = &rv->reg;
  25. wrmsr(rv->msr_no, reg->l, reg->h);
  26. }
  27. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  28. {
  29. int err;
  30. struct msr_info rv;
  31. memset(&rv, 0, sizeof(rv));
  32. rv.msr_no = msr_no;
  33. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  34. *l = rv.reg.l;
  35. *h = rv.reg.h;
  36. return err;
  37. }
  38. EXPORT_SYMBOL(rdmsr_on_cpu);
  39. int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  40. {
  41. int err;
  42. struct msr_info rv;
  43. memset(&rv, 0, sizeof(rv));
  44. rv.msr_no = msr_no;
  45. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  46. *q = rv.reg.q;
  47. return err;
  48. }
  49. EXPORT_SYMBOL(rdmsrl_on_cpu);
  50. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  51. {
  52. int err;
  53. struct msr_info rv;
  54. memset(&rv, 0, sizeof(rv));
  55. rv.msr_no = msr_no;
  56. rv.reg.l = l;
  57. rv.reg.h = h;
  58. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  59. return err;
  60. }
  61. EXPORT_SYMBOL(wrmsr_on_cpu);
  62. int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  63. {
  64. int err;
  65. struct msr_info rv;
  66. memset(&rv, 0, sizeof(rv));
  67. rv.msr_no = msr_no;
  68. rv.reg.q = q;
  69. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  70. return err;
  71. }
  72. EXPORT_SYMBOL(wrmsrl_on_cpu);
  73. static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
  74. struct msr __percpu *msrs,
  75. void (*msr_func) (void *info))
  76. {
  77. struct msr_info rv;
  78. int this_cpu;
  79. memset(&rv, 0, sizeof(rv));
  80. rv.msrs = msrs;
  81. rv.msr_no = msr_no;
  82. this_cpu = get_cpu();
  83. if (cpumask_test_cpu(this_cpu, mask))
  84. msr_func(&rv);
  85. smp_call_function_many(mask, msr_func, &rv, 1);
  86. put_cpu();
  87. }
  88. /* rdmsr on a bunch of CPUs
  89. *
  90. * @mask: which CPUs
  91. * @msr_no: which MSR
  92. * @msrs: array of MSR values
  93. *
  94. */
  95. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
  96. {
  97. __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
  98. }
  99. EXPORT_SYMBOL(rdmsr_on_cpus);
  100. /*
  101. * wrmsr on a bunch of CPUs
  102. *
  103. * @mask: which CPUs
  104. * @msr_no: which MSR
  105. * @msrs: array of MSR values
  106. *
  107. */
  108. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
  109. {
  110. __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
  111. }
  112. EXPORT_SYMBOL(wrmsr_on_cpus);
  113. struct msr_info_completion {
  114. struct msr_info msr;
  115. struct completion done;
  116. };
  117. /* These "safe" variants are slower and should be used when the target MSR
  118. may not actually exist. */
  119. static void __rdmsr_safe_on_cpu(void *info)
  120. {
  121. struct msr_info_completion *rv = info;
  122. rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
  123. complete(&rv->done);
  124. }
  125. static void __wrmsr_safe_on_cpu(void *info)
  126. {
  127. struct msr_info *rv = info;
  128. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  129. }
  130. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  131. {
  132. struct msr_info_completion rv;
  133. call_single_data_t csd;
  134. int err;
  135. INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
  136. memset(&rv, 0, sizeof(rv));
  137. init_completion(&rv.done);
  138. rv.msr.msr_no = msr_no;
  139. err = smp_call_function_single_async(cpu, &csd);
  140. if (!err) {
  141. wait_for_completion(&rv.done);
  142. err = rv.msr.err;
  143. }
  144. *l = rv.msr.reg.l;
  145. *h = rv.msr.reg.h;
  146. return err;
  147. }
  148. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  149. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  150. {
  151. int err;
  152. struct msr_info rv;
  153. memset(&rv, 0, sizeof(rv));
  154. rv.msr_no = msr_no;
  155. rv.reg.l = l;
  156. rv.reg.h = h;
  157. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  158. return err ? err : rv.err;
  159. }
  160. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  161. int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  162. {
  163. int err;
  164. struct msr_info rv;
  165. memset(&rv, 0, sizeof(rv));
  166. rv.msr_no = msr_no;
  167. rv.reg.q = q;
  168. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  169. return err ? err : rv.err;
  170. }
  171. EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
  172. int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  173. {
  174. u32 low, high;
  175. int err;
  176. err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
  177. *q = (u64)high << 32 | low;
  178. return err;
  179. }
  180. EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
  181. /*
  182. * These variants are significantly slower, but allows control over
  183. * the entire 32-bit GPR set.
  184. */
  185. static void __rdmsr_safe_regs_on_cpu(void *info)
  186. {
  187. struct msr_regs_info *rv = info;
  188. rv->err = rdmsr_safe_regs(rv->regs);
  189. }
  190. static void __wrmsr_safe_regs_on_cpu(void *info)
  191. {
  192. struct msr_regs_info *rv = info;
  193. rv->err = wrmsr_safe_regs(rv->regs);
  194. }
  195. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  196. {
  197. int err;
  198. struct msr_regs_info rv;
  199. rv.regs = regs;
  200. rv.err = -EIO;
  201. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  202. return err ? err : rv.err;
  203. }
  204. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  205. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  206. {
  207. int err;
  208. struct msr_regs_info rv;
  209. rv.regs = regs;
  210. rv.err = -EIO;
  211. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  212. return err ? err : rv.err;
  213. }
  214. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);