sysfs.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. #include <linux/device.h>
  2. #include <linux/cpu.h>
  3. #include <linux/smp.h>
  4. #include <linux/percpu.h>
  5. #include <linux/init.h>
  6. #include <linux/sched.h>
  7. #include <linux/export.h>
  8. #include <linux/nodemask.h>
  9. #include <linux/cpumask.h>
  10. #include <linux/notifier.h>
  11. #include <asm/current.h>
  12. #include <asm/processor.h>
  13. #include <asm/cputable.h>
  14. #include <asm/hvcall.h>
  15. #include <asm/prom.h>
  16. #include <asm/machdep.h>
  17. #include <asm/smp.h>
  18. #include <asm/pmc.h>
  19. #include <asm/firmware.h>
  20. #include "cacheinfo.h"
  21. #include "setup.h"
  22. #ifdef CONFIG_PPC64
  23. #include <asm/paca.h>
  24. #include <asm/lppaca.h>
  25. #endif
  26. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  27. #ifdef CONFIG_PPC64
  28. /*
  29. * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
  30. * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
  31. * 2014:
  32. *
  33. * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
  34. * up the kernel code."
  35. *
  36. * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
  37. * code should be removed.
  38. */
  39. static ssize_t store_smt_snooze_delay(struct device *dev,
  40. struct device_attribute *attr,
  41. const char *buf,
  42. size_t count)
  43. {
  44. pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
  45. current->comm, current->pid);
  46. return count;
  47. }
  48. static ssize_t show_smt_snooze_delay(struct device *dev,
  49. struct device_attribute *attr,
  50. char *buf)
  51. {
  52. pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
  53. current->comm, current->pid);
  54. return sprintf(buf, "100\n");
  55. }
  56. static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
  57. store_smt_snooze_delay);
  58. static int __init setup_smt_snooze_delay(char *str)
  59. {
  60. if (!cpu_has_feature(CPU_FTR_SMT))
  61. return 1;
  62. pr_warn("smt-snooze-delay command line option has no effect\n");
  63. return 1;
  64. }
  65. __setup("smt-snooze-delay=", setup_smt_snooze_delay);
  66. #endif /* CONFIG_PPC64 */
  67. #ifdef CONFIG_PPC_FSL_BOOK3E
  68. #define MAX_BIT 63
  69. static u64 pw20_wt;
  70. static u64 altivec_idle_wt;
  71. static unsigned int get_idle_ticks_bit(u64 ns)
  72. {
  73. u64 cycle;
  74. if (ns >= 10000)
  75. cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
  76. else
  77. cycle = div_u64(ns * tb_ticks_per_usec, 1000);
  78. if (!cycle)
  79. return 0;
  80. return ilog2(cycle);
  81. }
  82. static void do_show_pwrmgtcr0(void *val)
  83. {
  84. u32 *value = val;
  85. *value = mfspr(SPRN_PWRMGTCR0);
  86. }
  87. static ssize_t show_pw20_state(struct device *dev,
  88. struct device_attribute *attr, char *buf)
  89. {
  90. u32 value;
  91. unsigned int cpu = dev->id;
  92. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  93. value &= PWRMGTCR0_PW20_WAIT;
  94. return sprintf(buf, "%u\n", value ? 1 : 0);
  95. }
  96. static void do_store_pw20_state(void *val)
  97. {
  98. u32 *value = val;
  99. u32 pw20_state;
  100. pw20_state = mfspr(SPRN_PWRMGTCR0);
  101. if (*value)
  102. pw20_state |= PWRMGTCR0_PW20_WAIT;
  103. else
  104. pw20_state &= ~PWRMGTCR0_PW20_WAIT;
  105. mtspr(SPRN_PWRMGTCR0, pw20_state);
  106. }
  107. static ssize_t store_pw20_state(struct device *dev,
  108. struct device_attribute *attr,
  109. const char *buf, size_t count)
  110. {
  111. u32 value;
  112. unsigned int cpu = dev->id;
  113. if (kstrtou32(buf, 0, &value))
  114. return -EINVAL;
  115. if (value > 1)
  116. return -EINVAL;
  117. smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
  118. return count;
  119. }
  120. static ssize_t show_pw20_wait_time(struct device *dev,
  121. struct device_attribute *attr, char *buf)
  122. {
  123. u32 value;
  124. u64 tb_cycle = 1;
  125. u64 time;
  126. unsigned int cpu = dev->id;
  127. if (!pw20_wt) {
  128. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  129. value = (value & PWRMGTCR0_PW20_ENT) >>
  130. PWRMGTCR0_PW20_ENT_SHIFT;
  131. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  132. /* convert ms to ns */
  133. if (tb_ticks_per_usec > 1000) {
  134. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  135. } else {
  136. u32 rem_us;
  137. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  138. &rem_us);
  139. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  140. }
  141. } else {
  142. time = pw20_wt;
  143. }
  144. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  145. }
  146. static void set_pw20_wait_entry_bit(void *val)
  147. {
  148. u32 *value = val;
  149. u32 pw20_idle;
  150. pw20_idle = mfspr(SPRN_PWRMGTCR0);
  151. /* Set Automatic PW20 Core Idle Count */
  152. /* clear count */
  153. pw20_idle &= ~PWRMGTCR0_PW20_ENT;
  154. /* set count */
  155. pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
  156. mtspr(SPRN_PWRMGTCR0, pw20_idle);
  157. }
  158. static ssize_t store_pw20_wait_time(struct device *dev,
  159. struct device_attribute *attr,
  160. const char *buf, size_t count)
  161. {
  162. u32 entry_bit;
  163. u64 value;
  164. unsigned int cpu = dev->id;
  165. if (kstrtou64(buf, 0, &value))
  166. return -EINVAL;
  167. if (!value)
  168. return -EINVAL;
  169. entry_bit = get_idle_ticks_bit(value);
  170. if (entry_bit > MAX_BIT)
  171. return -EINVAL;
  172. pw20_wt = value;
  173. smp_call_function_single(cpu, set_pw20_wait_entry_bit,
  174. &entry_bit, 1);
  175. return count;
  176. }
  177. static ssize_t show_altivec_idle(struct device *dev,
  178. struct device_attribute *attr, char *buf)
  179. {
  180. u32 value;
  181. unsigned int cpu = dev->id;
  182. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  183. value &= PWRMGTCR0_AV_IDLE_PD_EN;
  184. return sprintf(buf, "%u\n", value ? 1 : 0);
  185. }
  186. static void do_store_altivec_idle(void *val)
  187. {
  188. u32 *value = val;
  189. u32 altivec_idle;
  190. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  191. if (*value)
  192. altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
  193. else
  194. altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
  195. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  196. }
  197. static ssize_t store_altivec_idle(struct device *dev,
  198. struct device_attribute *attr,
  199. const char *buf, size_t count)
  200. {
  201. u32 value;
  202. unsigned int cpu = dev->id;
  203. if (kstrtou32(buf, 0, &value))
  204. return -EINVAL;
  205. if (value > 1)
  206. return -EINVAL;
  207. smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
  208. return count;
  209. }
  210. static ssize_t show_altivec_idle_wait_time(struct device *dev,
  211. struct device_attribute *attr, char *buf)
  212. {
  213. u32 value;
  214. u64 tb_cycle = 1;
  215. u64 time;
  216. unsigned int cpu = dev->id;
  217. if (!altivec_idle_wt) {
  218. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  219. value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
  220. PWRMGTCR0_AV_IDLE_CNT_SHIFT;
  221. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  222. /* convert ms to ns */
  223. if (tb_ticks_per_usec > 1000) {
  224. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  225. } else {
  226. u32 rem_us;
  227. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  228. &rem_us);
  229. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  230. }
  231. } else {
  232. time = altivec_idle_wt;
  233. }
  234. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  235. }
  236. static void set_altivec_idle_wait_entry_bit(void *val)
  237. {
  238. u32 *value = val;
  239. u32 altivec_idle;
  240. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  241. /* Set Automatic AltiVec Idle Count */
  242. /* clear count */
  243. altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
  244. /* set count */
  245. altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
  246. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  247. }
  248. static ssize_t store_altivec_idle_wait_time(struct device *dev,
  249. struct device_attribute *attr,
  250. const char *buf, size_t count)
  251. {
  252. u32 entry_bit;
  253. u64 value;
  254. unsigned int cpu = dev->id;
  255. if (kstrtou64(buf, 0, &value))
  256. return -EINVAL;
  257. if (!value)
  258. return -EINVAL;
  259. entry_bit = get_idle_ticks_bit(value);
  260. if (entry_bit > MAX_BIT)
  261. return -EINVAL;
  262. altivec_idle_wt = value;
  263. smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
  264. &entry_bit, 1);
  265. return count;
  266. }
  267. /*
  268. * Enable/Disable interface:
  269. * 0, disable. 1, enable.
  270. */
  271. static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
  272. static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
  273. /*
  274. * Set wait time interface:(Nanosecond)
  275. * Example: Base on TBfreq is 41MHZ.
  276. * 1~48(ns): TB[63]
  277. * 49~97(ns): TB[62]
  278. * 98~195(ns): TB[61]
  279. * 196~390(ns): TB[60]
  280. * 391~780(ns): TB[59]
  281. * 781~1560(ns): TB[58]
  282. * ...
  283. */
  284. static DEVICE_ATTR(pw20_wait_time, 0600,
  285. show_pw20_wait_time,
  286. store_pw20_wait_time);
  287. static DEVICE_ATTR(altivec_idle_wait_time, 0600,
  288. show_altivec_idle_wait_time,
  289. store_altivec_idle_wait_time);
  290. #endif
  291. /*
  292. * Enabling PMCs will slow partition context switch times so we only do
  293. * it the first time we write to the PMCs.
  294. */
  295. static DEFINE_PER_CPU(char, pmcs_enabled);
  296. void ppc_enable_pmcs(void)
  297. {
  298. ppc_set_pmu_inuse(1);
  299. /* Only need to enable them once */
  300. if (__this_cpu_read(pmcs_enabled))
  301. return;
  302. __this_cpu_write(pmcs_enabled, 1);
  303. if (ppc_md.enable_pmcs)
  304. ppc_md.enable_pmcs();
  305. }
  306. EXPORT_SYMBOL(ppc_enable_pmcs);
  307. #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
  308. static void read_##NAME(void *val) \
  309. { \
  310. *(unsigned long *)val = mfspr(ADDRESS); \
  311. } \
  312. static void write_##NAME(void *val) \
  313. { \
  314. EXTRA; \
  315. mtspr(ADDRESS, *(unsigned long *)val); \
  316. }
  317. #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  318. static ssize_t show_##NAME(struct device *dev, \
  319. struct device_attribute *attr, \
  320. char *buf) \
  321. { \
  322. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  323. unsigned long val; \
  324. smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
  325. return sprintf(buf, "%lx\n", val); \
  326. } \
  327. static ssize_t __used \
  328. store_##NAME(struct device *dev, struct device_attribute *attr, \
  329. const char *buf, size_t count) \
  330. { \
  331. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  332. unsigned long val; \
  333. int ret = sscanf(buf, "%lx", &val); \
  334. if (ret != 1) \
  335. return -EINVAL; \
  336. smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
  337. return count; \
  338. }
  339. #define SYSFS_PMCSETUP(NAME, ADDRESS) \
  340. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
  341. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  342. #define SYSFS_SPRSETUP(NAME, ADDRESS) \
  343. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
  344. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  345. #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  346. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  347. /* Let's define all possible registers, we'll only hook up the ones
  348. * that are implemented on the current processor
  349. */
  350. #if defined(CONFIG_PPC64)
  351. #define HAS_PPC_PMC_CLASSIC 1
  352. #define HAS_PPC_PMC_IBM 1
  353. #define HAS_PPC_PMC_PA6T 1
  354. #elif defined(CONFIG_6xx)
  355. #define HAS_PPC_PMC_CLASSIC 1
  356. #define HAS_PPC_PMC_IBM 1
  357. #define HAS_PPC_PMC_G4 1
  358. #endif
  359. #ifdef HAS_PPC_PMC_CLASSIC
  360. SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
  361. SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
  362. SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
  363. SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
  364. SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
  365. SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
  366. SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
  367. SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
  368. #ifdef HAS_PPC_PMC_G4
  369. SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
  370. #endif
  371. #ifdef CONFIG_PPC64
  372. SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
  373. SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
  374. SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
  375. SYSFS_SPRSETUP(purr, SPRN_PURR);
  376. SYSFS_SPRSETUP(spurr, SPRN_SPURR);
  377. SYSFS_SPRSETUP(pir, SPRN_PIR);
  378. SYSFS_SPRSETUP(tscr, SPRN_TSCR);
  379. /*
  380. Lets only enable read for phyp resources and
  381. enable write when needed with a separate function.
  382. Lets be conservative and default to pseries.
  383. */
  384. static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
  385. static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
  386. static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
  387. static DEVICE_ATTR(pir, 0400, show_pir, NULL);
  388. static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
  389. /*
  390. * This is the system wide DSCR register default value. Any
  391. * change to this default value through the sysfs interface
  392. * will update all per cpu DSCR default values across the
  393. * system stored in their respective PACA structures.
  394. */
  395. static unsigned long dscr_default;
  396. /**
  397. * read_dscr() - Fetch the cpu specific DSCR default
  398. * @val: Returned cpu specific DSCR default value
  399. *
  400. * This function returns the per cpu DSCR default value
  401. * for any cpu which is contained in it's PACA structure.
  402. */
  403. static void read_dscr(void *val)
  404. {
  405. *(unsigned long *)val = get_paca()->dscr_default;
  406. }
  407. /**
  408. * write_dscr() - Update the cpu specific DSCR default
  409. * @val: New cpu specific DSCR default value to update
  410. *
  411. * This function updates the per cpu DSCR default value
  412. * for any cpu which is contained in it's PACA structure.
  413. */
  414. static void write_dscr(void *val)
  415. {
  416. get_paca()->dscr_default = *(unsigned long *)val;
  417. if (!current->thread.dscr_inherit) {
  418. current->thread.dscr = *(unsigned long *)val;
  419. mtspr(SPRN_DSCR, *(unsigned long *)val);
  420. }
  421. }
  422. SYSFS_SPRSETUP_SHOW_STORE(dscr);
  423. static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
  424. static void add_write_permission_dev_attr(struct device_attribute *attr)
  425. {
  426. attr->attr.mode |= 0200;
  427. }
  428. /**
  429. * show_dscr_default() - Fetch the system wide DSCR default
  430. * @dev: Device structure
  431. * @attr: Device attribute structure
  432. * @buf: Interface buffer
  433. *
  434. * This function returns the system wide DSCR default value.
  435. */
  436. static ssize_t show_dscr_default(struct device *dev,
  437. struct device_attribute *attr, char *buf)
  438. {
  439. return sprintf(buf, "%lx\n", dscr_default);
  440. }
  441. /**
  442. * store_dscr_default() - Update the system wide DSCR default
  443. * @dev: Device structure
  444. * @attr: Device attribute structure
  445. * @buf: Interface buffer
  446. * @count: Size of the update
  447. *
  448. * This function updates the system wide DSCR default value.
  449. */
  450. static ssize_t __used store_dscr_default(struct device *dev,
  451. struct device_attribute *attr, const char *buf,
  452. size_t count)
  453. {
  454. unsigned long val;
  455. int ret = 0;
  456. ret = sscanf(buf, "%lx", &val);
  457. if (ret != 1)
  458. return -EINVAL;
  459. dscr_default = val;
  460. on_each_cpu(write_dscr, &val, 1);
  461. return count;
  462. }
  463. static DEVICE_ATTR(dscr_default, 0600,
  464. show_dscr_default, store_dscr_default);
  465. static void sysfs_create_dscr_default(void)
  466. {
  467. if (cpu_has_feature(CPU_FTR_DSCR)) {
  468. int err = 0;
  469. int cpu;
  470. dscr_default = spr_default_dscr;
  471. for_each_possible_cpu(cpu)
  472. paca_ptrs[cpu]->dscr_default = dscr_default;
  473. err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
  474. }
  475. }
  476. #endif /* CONFIG_PPC64 */
  477. #ifdef HAS_PPC_PMC_PA6T
  478. SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
  479. SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
  480. SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
  481. SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
  482. SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
  483. SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
  484. #ifdef CONFIG_DEBUG_KERNEL
  485. SYSFS_SPRSETUP(hid0, SPRN_HID0);
  486. SYSFS_SPRSETUP(hid1, SPRN_HID1);
  487. SYSFS_SPRSETUP(hid4, SPRN_HID4);
  488. SYSFS_SPRSETUP(hid5, SPRN_HID5);
  489. SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
  490. SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
  491. SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
  492. SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
  493. SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
  494. SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
  495. SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
  496. SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
  497. SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
  498. SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
  499. SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
  500. SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
  501. SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
  502. SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
  503. SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
  504. SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
  505. SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
  506. SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
  507. SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
  508. SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
  509. SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
  510. SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
  511. SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
  512. SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
  513. #endif /* CONFIG_DEBUG_KERNEL */
  514. #endif /* HAS_PPC_PMC_PA6T */
  515. #ifdef HAS_PPC_PMC_IBM
  516. static struct device_attribute ibm_common_attrs[] = {
  517. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  518. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  519. };
  520. #endif /* HAS_PPC_PMC_G4 */
  521. #ifdef HAS_PPC_PMC_G4
  522. static struct device_attribute g4_common_attrs[] = {
  523. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  524. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  525. __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
  526. };
  527. #endif /* HAS_PPC_PMC_G4 */
  528. static struct device_attribute classic_pmc_attrs[] = {
  529. __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
  530. __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
  531. __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
  532. __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
  533. __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
  534. __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
  535. #ifdef CONFIG_PPC64
  536. __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
  537. __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
  538. #endif
  539. };
  540. #ifdef HAS_PPC_PMC_PA6T
  541. static struct device_attribute pa6t_attrs[] = {
  542. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  543. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  544. __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
  545. __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
  546. __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
  547. __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
  548. __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
  549. __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
  550. #ifdef CONFIG_DEBUG_KERNEL
  551. __ATTR(hid0, 0600, show_hid0, store_hid0),
  552. __ATTR(hid1, 0600, show_hid1, store_hid1),
  553. __ATTR(hid4, 0600, show_hid4, store_hid4),
  554. __ATTR(hid5, 0600, show_hid5, store_hid5),
  555. __ATTR(ima0, 0600, show_ima0, store_ima0),
  556. __ATTR(ima1, 0600, show_ima1, store_ima1),
  557. __ATTR(ima2, 0600, show_ima2, store_ima2),
  558. __ATTR(ima3, 0600, show_ima3, store_ima3),
  559. __ATTR(ima4, 0600, show_ima4, store_ima4),
  560. __ATTR(ima5, 0600, show_ima5, store_ima5),
  561. __ATTR(ima6, 0600, show_ima6, store_ima6),
  562. __ATTR(ima7, 0600, show_ima7, store_ima7),
  563. __ATTR(ima8, 0600, show_ima8, store_ima8),
  564. __ATTR(ima9, 0600, show_ima9, store_ima9),
  565. __ATTR(imaat, 0600, show_imaat, store_imaat),
  566. __ATTR(btcr, 0600, show_btcr, store_btcr),
  567. __ATTR(pccr, 0600, show_pccr, store_pccr),
  568. __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
  569. __ATTR(der, 0600, show_der, store_der),
  570. __ATTR(mer, 0600, show_mer, store_mer),
  571. __ATTR(ber, 0600, show_ber, store_ber),
  572. __ATTR(ier, 0600, show_ier, store_ier),
  573. __ATTR(sier, 0600, show_sier, store_sier),
  574. __ATTR(siar, 0600, show_siar, store_siar),
  575. __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
  576. __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
  577. __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
  578. __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
  579. #endif /* CONFIG_DEBUG_KERNEL */
  580. };
  581. #endif /* HAS_PPC_PMC_PA6T */
  582. #endif /* HAS_PPC_PMC_CLASSIC */
  583. static int register_cpu_online(unsigned int cpu)
  584. {
  585. struct cpu *c = &per_cpu(cpu_devices, cpu);
  586. struct device *s = &c->dev;
  587. struct device_attribute *attrs, *pmc_attrs;
  588. int i, nattrs;
  589. /* For cpus present at boot a reference was already grabbed in register_cpu() */
  590. if (!s->of_node)
  591. s->of_node = of_get_cpu_node(cpu, NULL);
  592. #ifdef CONFIG_PPC64
  593. if (cpu_has_feature(CPU_FTR_SMT))
  594. device_create_file(s, &dev_attr_smt_snooze_delay);
  595. #endif
  596. /* PMC stuff */
  597. switch (cur_cpu_spec->pmc_type) {
  598. #ifdef HAS_PPC_PMC_IBM
  599. case PPC_PMC_IBM:
  600. attrs = ibm_common_attrs;
  601. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  602. pmc_attrs = classic_pmc_attrs;
  603. break;
  604. #endif /* HAS_PPC_PMC_IBM */
  605. #ifdef HAS_PPC_PMC_G4
  606. case PPC_PMC_G4:
  607. attrs = g4_common_attrs;
  608. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  609. pmc_attrs = classic_pmc_attrs;
  610. break;
  611. #endif /* HAS_PPC_PMC_G4 */
  612. #ifdef HAS_PPC_PMC_PA6T
  613. case PPC_PMC_PA6T:
  614. /* PA Semi starts counting at PMC0 */
  615. attrs = pa6t_attrs;
  616. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  617. pmc_attrs = NULL;
  618. break;
  619. #endif /* HAS_PPC_PMC_PA6T */
  620. default:
  621. attrs = NULL;
  622. nattrs = 0;
  623. pmc_attrs = NULL;
  624. }
  625. for (i = 0; i < nattrs; i++)
  626. device_create_file(s, &attrs[i]);
  627. if (pmc_attrs)
  628. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  629. device_create_file(s, &pmc_attrs[i]);
  630. #ifdef CONFIG_PPC64
  631. if (cpu_has_feature(CPU_FTR_MMCRA))
  632. device_create_file(s, &dev_attr_mmcra);
  633. if (cpu_has_feature(CPU_FTR_PURR)) {
  634. if (!firmware_has_feature(FW_FEATURE_LPAR))
  635. add_write_permission_dev_attr(&dev_attr_purr);
  636. device_create_file(s, &dev_attr_purr);
  637. }
  638. if (cpu_has_feature(CPU_FTR_SPURR))
  639. device_create_file(s, &dev_attr_spurr);
  640. if (cpu_has_feature(CPU_FTR_DSCR))
  641. device_create_file(s, &dev_attr_dscr);
  642. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  643. device_create_file(s, &dev_attr_pir);
  644. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  645. !firmware_has_feature(FW_FEATURE_LPAR))
  646. device_create_file(s, &dev_attr_tscr);
  647. #endif /* CONFIG_PPC64 */
  648. #ifdef CONFIG_PPC_FSL_BOOK3E
  649. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  650. device_create_file(s, &dev_attr_pw20_state);
  651. device_create_file(s, &dev_attr_pw20_wait_time);
  652. device_create_file(s, &dev_attr_altivec_idle);
  653. device_create_file(s, &dev_attr_altivec_idle_wait_time);
  654. }
  655. #endif
  656. cacheinfo_cpu_online(cpu);
  657. return 0;
  658. }
  659. #ifdef CONFIG_HOTPLUG_CPU
  660. static int unregister_cpu_online(unsigned int cpu)
  661. {
  662. struct cpu *c = &per_cpu(cpu_devices, cpu);
  663. struct device *s = &c->dev;
  664. struct device_attribute *attrs, *pmc_attrs;
  665. int i, nattrs;
  666. BUG_ON(!c->hotpluggable);
  667. #ifdef CONFIG_PPC64
  668. if (cpu_has_feature(CPU_FTR_SMT))
  669. device_remove_file(s, &dev_attr_smt_snooze_delay);
  670. #endif
  671. /* PMC stuff */
  672. switch (cur_cpu_spec->pmc_type) {
  673. #ifdef HAS_PPC_PMC_IBM
  674. case PPC_PMC_IBM:
  675. attrs = ibm_common_attrs;
  676. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  677. pmc_attrs = classic_pmc_attrs;
  678. break;
  679. #endif /* HAS_PPC_PMC_IBM */
  680. #ifdef HAS_PPC_PMC_G4
  681. case PPC_PMC_G4:
  682. attrs = g4_common_attrs;
  683. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  684. pmc_attrs = classic_pmc_attrs;
  685. break;
  686. #endif /* HAS_PPC_PMC_G4 */
  687. #ifdef HAS_PPC_PMC_PA6T
  688. case PPC_PMC_PA6T:
  689. /* PA Semi starts counting at PMC0 */
  690. attrs = pa6t_attrs;
  691. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  692. pmc_attrs = NULL;
  693. break;
  694. #endif /* HAS_PPC_PMC_PA6T */
  695. default:
  696. attrs = NULL;
  697. nattrs = 0;
  698. pmc_attrs = NULL;
  699. }
  700. for (i = 0; i < nattrs; i++)
  701. device_remove_file(s, &attrs[i]);
  702. if (pmc_attrs)
  703. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  704. device_remove_file(s, &pmc_attrs[i]);
  705. #ifdef CONFIG_PPC64
  706. if (cpu_has_feature(CPU_FTR_MMCRA))
  707. device_remove_file(s, &dev_attr_mmcra);
  708. if (cpu_has_feature(CPU_FTR_PURR))
  709. device_remove_file(s, &dev_attr_purr);
  710. if (cpu_has_feature(CPU_FTR_SPURR))
  711. device_remove_file(s, &dev_attr_spurr);
  712. if (cpu_has_feature(CPU_FTR_DSCR))
  713. device_remove_file(s, &dev_attr_dscr);
  714. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  715. device_remove_file(s, &dev_attr_pir);
  716. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  717. !firmware_has_feature(FW_FEATURE_LPAR))
  718. device_remove_file(s, &dev_attr_tscr);
  719. #endif /* CONFIG_PPC64 */
  720. #ifdef CONFIG_PPC_FSL_BOOK3E
  721. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  722. device_remove_file(s, &dev_attr_pw20_state);
  723. device_remove_file(s, &dev_attr_pw20_wait_time);
  724. device_remove_file(s, &dev_attr_altivec_idle);
  725. device_remove_file(s, &dev_attr_altivec_idle_wait_time);
  726. }
  727. #endif
  728. cacheinfo_cpu_offline(cpu);
  729. of_node_put(s->of_node);
  730. s->of_node = NULL;
  731. return 0;
  732. }
  733. #else /* !CONFIG_HOTPLUG_CPU */
  734. #define unregister_cpu_online NULL
  735. #endif
  736. #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
  737. ssize_t arch_cpu_probe(const char *buf, size_t count)
  738. {
  739. if (ppc_md.cpu_probe)
  740. return ppc_md.cpu_probe(buf, count);
  741. return -EINVAL;
  742. }
  743. ssize_t arch_cpu_release(const char *buf, size_t count)
  744. {
  745. if (ppc_md.cpu_release)
  746. return ppc_md.cpu_release(buf, count);
  747. return -EINVAL;
  748. }
  749. #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
  750. static DEFINE_MUTEX(cpu_mutex);
  751. int cpu_add_dev_attr(struct device_attribute *attr)
  752. {
  753. int cpu;
  754. mutex_lock(&cpu_mutex);
  755. for_each_possible_cpu(cpu) {
  756. device_create_file(get_cpu_device(cpu), attr);
  757. }
  758. mutex_unlock(&cpu_mutex);
  759. return 0;
  760. }
  761. EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
  762. int cpu_add_dev_attr_group(struct attribute_group *attrs)
  763. {
  764. int cpu;
  765. struct device *dev;
  766. int ret;
  767. mutex_lock(&cpu_mutex);
  768. for_each_possible_cpu(cpu) {
  769. dev = get_cpu_device(cpu);
  770. ret = sysfs_create_group(&dev->kobj, attrs);
  771. WARN_ON(ret != 0);
  772. }
  773. mutex_unlock(&cpu_mutex);
  774. return 0;
  775. }
  776. EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
  777. void cpu_remove_dev_attr(struct device_attribute *attr)
  778. {
  779. int cpu;
  780. mutex_lock(&cpu_mutex);
  781. for_each_possible_cpu(cpu) {
  782. device_remove_file(get_cpu_device(cpu), attr);
  783. }
  784. mutex_unlock(&cpu_mutex);
  785. }
  786. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
  787. void cpu_remove_dev_attr_group(struct attribute_group *attrs)
  788. {
  789. int cpu;
  790. struct device *dev;
  791. mutex_lock(&cpu_mutex);
  792. for_each_possible_cpu(cpu) {
  793. dev = get_cpu_device(cpu);
  794. sysfs_remove_group(&dev->kobj, attrs);
  795. }
  796. mutex_unlock(&cpu_mutex);
  797. }
  798. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
  799. /* NUMA stuff */
  800. #ifdef CONFIG_NUMA
  801. static void register_nodes(void)
  802. {
  803. int i;
  804. for (i = 0; i < MAX_NUMNODES; i++)
  805. register_one_node(i);
  806. }
  807. int sysfs_add_device_to_node(struct device *dev, int nid)
  808. {
  809. struct node *node = node_devices[nid];
  810. return sysfs_create_link(&node->dev.kobj, &dev->kobj,
  811. kobject_name(&dev->kobj));
  812. }
  813. EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
  814. void sysfs_remove_device_from_node(struct device *dev, int nid)
  815. {
  816. struct node *node = node_devices[nid];
  817. sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
  818. }
  819. EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
  820. #else
  821. static void register_nodes(void)
  822. {
  823. return;
  824. }
  825. #endif
  826. /* Only valid if CPU is present. */
  827. static ssize_t show_physical_id(struct device *dev,
  828. struct device_attribute *attr, char *buf)
  829. {
  830. struct cpu *cpu = container_of(dev, struct cpu, dev);
  831. return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
  832. }
  833. static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
  834. static int __init topology_init(void)
  835. {
  836. int cpu, r;
  837. register_nodes();
  838. for_each_possible_cpu(cpu) {
  839. struct cpu *c = &per_cpu(cpu_devices, cpu);
  840. /*
  841. * For now, we just see if the system supports making
  842. * the RTAS calls for CPU hotplug. But, there may be a
  843. * more comprehensive way to do this for an individual
  844. * CPU. For instance, the boot cpu might never be valid
  845. * for hotplugging.
  846. */
  847. if (ppc_md.cpu_die)
  848. c->hotpluggable = 1;
  849. if (cpu_online(cpu) || c->hotpluggable) {
  850. register_cpu(c, cpu);
  851. device_create_file(&c->dev, &dev_attr_physical_id);
  852. }
  853. }
  854. r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
  855. register_cpu_online, unregister_cpu_online);
  856. WARN_ON(r < 0);
  857. #ifdef CONFIG_PPC64
  858. sysfs_create_dscr_default();
  859. #endif /* CONFIG_PPC64 */
  860. return 0;
  861. }
  862. subsys_initcall(topology_init);