msr-smp.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/export.h>
  3. #include <linux/preempt.h>
  4. #include <linux/smp.h>
  5. #include <linux/completion.h>
  6. #include <asm/msr.h>
  7. static void __rdmsr_on_cpu(void *info)
  8. {
  9. struct msr_info *rv = info;
  10. struct msr *reg;
  11. int this_cpu = raw_smp_processor_id();
  12. if (rv->msrs)
  13. reg = per_cpu_ptr(rv->msrs, this_cpu);
  14. else
  15. reg = &rv->reg;
  16. rdmsr(rv->msr_no, reg->l, reg->h);
  17. }
  18. static void __wrmsr_on_cpu(void *info)
  19. {
  20. struct msr_info *rv = info;
  21. struct msr *reg;
  22. int this_cpu = raw_smp_processor_id();
  23. if (rv->msrs)
  24. reg = per_cpu_ptr(rv->msrs, this_cpu);
  25. else
  26. reg = &rv->reg;
  27. wrmsr(rv->msr_no, reg->l, reg->h);
  28. }
  29. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  30. {
  31. int err;
  32. struct msr_info rv;
  33. memset(&rv, 0, sizeof(rv));
  34. rv.msr_no = msr_no;
  35. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  36. *l = rv.reg.l;
  37. *h = rv.reg.h;
  38. return err;
  39. }
  40. EXPORT_SYMBOL(rdmsr_on_cpu);
  41. int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  42. {
  43. int err;
  44. struct msr_info rv;
  45. memset(&rv, 0, sizeof(rv));
  46. rv.msr_no = msr_no;
  47. err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
  48. *q = rv.reg.q;
  49. return err;
  50. }
  51. EXPORT_SYMBOL(rdmsrl_on_cpu);
  52. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  53. {
  54. int err;
  55. struct msr_info rv;
  56. memset(&rv, 0, sizeof(rv));
  57. rv.msr_no = msr_no;
  58. rv.reg.l = l;
  59. rv.reg.h = h;
  60. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  61. return err;
  62. }
  63. EXPORT_SYMBOL(wrmsr_on_cpu);
  64. int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  65. {
  66. int err;
  67. struct msr_info rv;
  68. memset(&rv, 0, sizeof(rv));
  69. rv.msr_no = msr_no;
  70. rv.reg.q = q;
  71. err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
  72. return err;
  73. }
  74. EXPORT_SYMBOL(wrmsrl_on_cpu);
  75. static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
  76. struct msr *msrs,
  77. void (*msr_func) (void *info))
  78. {
  79. struct msr_info rv;
  80. int this_cpu;
  81. memset(&rv, 0, sizeof(rv));
  82. rv.msrs = msrs;
  83. rv.msr_no = msr_no;
  84. this_cpu = get_cpu();
  85. if (cpumask_test_cpu(this_cpu, mask))
  86. msr_func(&rv);
  87. smp_call_function_many(mask, msr_func, &rv, 1);
  88. put_cpu();
  89. }
  90. /* rdmsr on a bunch of CPUs
  91. *
  92. * @mask: which CPUs
  93. * @msr_no: which MSR
  94. * @msrs: array of MSR values
  95. *
  96. */
  97. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  98. {
  99. __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
  100. }
  101. EXPORT_SYMBOL(rdmsr_on_cpus);
  102. /*
  103. * wrmsr on a bunch of CPUs
  104. *
  105. * @mask: which CPUs
  106. * @msr_no: which MSR
  107. * @msrs: array of MSR values
  108. *
  109. */
  110. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
  111. {
  112. __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
  113. }
  114. EXPORT_SYMBOL(wrmsr_on_cpus);
  115. struct msr_info_completion {
  116. struct msr_info msr;
  117. struct completion done;
  118. };
  119. /* These "safe" variants are slower and should be used when the target MSR
  120. may not actually exist. */
  121. static void __rdmsr_safe_on_cpu(void *info)
  122. {
  123. struct msr_info_completion *rv = info;
  124. rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
  125. complete(&rv->done);
  126. }
  127. static void __wrmsr_safe_on_cpu(void *info)
  128. {
  129. struct msr_info *rv = info;
  130. rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
  131. }
  132. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  133. {
  134. struct msr_info_completion rv;
  135. call_single_data_t csd = {
  136. .func = __rdmsr_safe_on_cpu,
  137. .info = &rv,
  138. };
  139. int err;
  140. memset(&rv, 0, sizeof(rv));
  141. init_completion(&rv.done);
  142. rv.msr.msr_no = msr_no;
  143. err = smp_call_function_single_async(cpu, &csd);
  144. if (!err) {
  145. wait_for_completion(&rv.done);
  146. err = rv.msr.err;
  147. }
  148. *l = rv.msr.reg.l;
  149. *h = rv.msr.reg.h;
  150. return err;
  151. }
  152. EXPORT_SYMBOL(rdmsr_safe_on_cpu);
  153. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  154. {
  155. int err;
  156. struct msr_info rv;
  157. memset(&rv, 0, sizeof(rv));
  158. rv.msr_no = msr_no;
  159. rv.reg.l = l;
  160. rv.reg.h = h;
  161. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  162. return err ? err : rv.err;
  163. }
  164. EXPORT_SYMBOL(wrmsr_safe_on_cpu);
  165. int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  166. {
  167. int err;
  168. struct msr_info rv;
  169. memset(&rv, 0, sizeof(rv));
  170. rv.msr_no = msr_no;
  171. rv.reg.q = q;
  172. err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
  173. return err ? err : rv.err;
  174. }
  175. EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
  176. int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  177. {
  178. u32 low, high;
  179. int err;
  180. err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
  181. *q = (u64)high << 32 | low;
  182. return err;
  183. }
  184. EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
  185. /*
  186. * These variants are significantly slower, but allows control over
  187. * the entire 32-bit GPR set.
  188. */
  189. static void __rdmsr_safe_regs_on_cpu(void *info)
  190. {
  191. struct msr_regs_info *rv = info;
  192. rv->err = rdmsr_safe_regs(rv->regs);
  193. }
  194. static void __wrmsr_safe_regs_on_cpu(void *info)
  195. {
  196. struct msr_regs_info *rv = info;
  197. rv->err = wrmsr_safe_regs(rv->regs);
  198. }
  199. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  200. {
  201. int err;
  202. struct msr_regs_info rv;
  203. rv.regs = regs;
  204. rv.err = -EIO;
  205. err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
  206. return err ? err : rv.err;
  207. }
  208. EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
  209. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  210. {
  211. int err;
  212. struct msr_regs_info rv;
  213. rv.regs = regs;
  214. rv.err = -EIO;
  215. err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
  216. return err ? err : rv.err;
  217. }
  218. EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);