dt_cpu_ftrs.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. /*
  2. * Copyright 2017, Nicholas Piggin, IBM Corporation
  3. * Licensed under GPLv2.
  4. */
  5. #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/libfdt.h>
  10. #include <linux/memblock.h>
  11. #include <linux/printk.h>
  12. #include <linux/sched.h>
  13. #include <linux/string.h>
  14. #include <linux/threads.h>
  15. #include <asm/cputable.h>
  16. #include <asm/dt_cpu_ftrs.h>
  17. #include <asm/mmu.h>
  18. #include <asm/oprofile_impl.h>
  19. #include <asm/prom.h>
  20. #include <asm/setup.h>
  21. /* Device-tree visible constants follow */
  22. #define ISA_V2_07B 2070
  23. #define ISA_V3_0B 3000
  24. #define USABLE_PR (1U << 0)
  25. #define USABLE_OS (1U << 1)
  26. #define USABLE_HV (1U << 2)
  27. #define HV_SUPPORT_HFSCR (1U << 0)
  28. #define OS_SUPPORT_FSCR (1U << 0)
  29. /* For parsing, we define all bits set as "NONE" case */
  30. #define HV_SUPPORT_NONE 0xffffffffU
  31. #define OS_SUPPORT_NONE 0xffffffffU
  32. struct dt_cpu_feature {
  33. const char *name;
  34. uint32_t isa;
  35. uint32_t usable_privilege;
  36. uint32_t hv_support;
  37. uint32_t os_support;
  38. uint32_t hfscr_bit_nr;
  39. uint32_t fscr_bit_nr;
  40. uint32_t hwcap_bit_nr;
  41. /* fdt parsing */
  42. unsigned long node;
  43. int enabled;
  44. int disabled;
  45. };
  46. #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
  47. #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
  48. PPC_FEATURE_ARCH_2_06 |\
  49. PPC_FEATURE_ICACHE_SNOOP)
  50. #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
  51. PPC_FEATURE2_ISEL)
  52. /*
  53. * Set up the base CPU
  54. */
  55. extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
  56. extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
  57. static int hv_mode;
  58. static struct {
  59. u64 lpcr;
  60. u64 lpcr_clear;
  61. u64 hfscr;
  62. u64 fscr;
  63. } system_registers;
  64. static void (*init_pmu_registers)(void);
  65. static void __restore_cpu_cpufeatures(void)
  66. {
  67. u64 lpcr;
  68. /*
  69. * LPCR is restored by the power on engine already. It can be changed
  70. * after early init e.g., by radix enable, and we have no unified API
  71. * for saving and restoring such SPRs.
  72. *
  73. * This ->restore hook should really be removed from idle and register
  74. * restore moved directly into the idle restore code, because this code
  75. * doesn't know how idle is implemented or what it needs restored here.
  76. *
  77. * The best we can do to accommodate secondary boot and idle restore
  78. * for now is "or" LPCR with existing.
  79. */
  80. lpcr = mfspr(SPRN_LPCR);
  81. lpcr |= system_registers.lpcr;
  82. lpcr &= ~system_registers.lpcr_clear;
  83. mtspr(SPRN_LPCR, lpcr);
  84. if (hv_mode) {
  85. mtspr(SPRN_LPID, 0);
  86. mtspr(SPRN_HFSCR, system_registers.hfscr);
  87. mtspr(SPRN_PCR, 0);
  88. }
  89. mtspr(SPRN_FSCR, system_registers.fscr);
  90. if (init_pmu_registers)
  91. init_pmu_registers();
  92. }
  93. static char dt_cpu_name[64];
  94. static struct cpu_spec __initdata base_cpu_spec = {
  95. .cpu_name = NULL,
  96. .cpu_features = CPU_FTRS_DT_CPU_BASE,
  97. .cpu_user_features = COMMON_USER_BASE,
  98. .cpu_user_features2 = COMMON_USER2_BASE,
  99. .mmu_features = 0,
  100. .icache_bsize = 32, /* minimum block size, fixed by */
  101. .dcache_bsize = 32, /* cache info init. */
  102. .num_pmcs = 0,
  103. .pmc_type = PPC_PMC_DEFAULT,
  104. .oprofile_cpu_type = NULL,
  105. .oprofile_type = PPC_OPROFILE_INVALID,
  106. .cpu_setup = NULL,
  107. .cpu_restore = __restore_cpu_cpufeatures,
  108. .machine_check_early = NULL,
  109. .platform = NULL,
  110. };
  111. static void __init cpufeatures_setup_cpu(void)
  112. {
  113. set_cur_cpu_spec(&base_cpu_spec);
  114. cur_cpu_spec->pvr_mask = -1;
  115. cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
  116. /* Initialize the base environment -- clear FSCR/HFSCR. */
  117. hv_mode = !!(mfmsr() & MSR_HV);
  118. if (hv_mode) {
  119. /* CPU_FTR_HVMODE is used early in PACA setup */
  120. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  121. mtspr(SPRN_HFSCR, 0);
  122. }
  123. mtspr(SPRN_FSCR, 0);
  124. /*
  125. * LPCR does not get cleared, to match behaviour with secondaries
  126. * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
  127. * could clear LPCR too.
  128. */
  129. }
  130. static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
  131. {
  132. if (f->hv_support == HV_SUPPORT_NONE) {
  133. } else if (f->hv_support & HV_SUPPORT_HFSCR) {
  134. u64 hfscr = mfspr(SPRN_HFSCR);
  135. hfscr |= 1UL << f->hfscr_bit_nr;
  136. mtspr(SPRN_HFSCR, hfscr);
  137. } else {
  138. /* Does not have a known recipe */
  139. return 0;
  140. }
  141. if (f->os_support == OS_SUPPORT_NONE) {
  142. } else if (f->os_support & OS_SUPPORT_FSCR) {
  143. u64 fscr = mfspr(SPRN_FSCR);
  144. fscr |= 1UL << f->fscr_bit_nr;
  145. mtspr(SPRN_FSCR, fscr);
  146. } else {
  147. /* Does not have a known recipe */
  148. return 0;
  149. }
  150. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  151. uint32_t word = f->hwcap_bit_nr / 32;
  152. uint32_t bit = f->hwcap_bit_nr % 32;
  153. if (word == 0)
  154. cur_cpu_spec->cpu_user_features |= 1U << bit;
  155. else if (word == 1)
  156. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  157. else
  158. pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
  159. }
  160. return 1;
  161. }
  162. static int __init feat_enable(struct dt_cpu_feature *f)
  163. {
  164. if (f->hv_support != HV_SUPPORT_NONE) {
  165. if (f->hfscr_bit_nr != -1) {
  166. u64 hfscr = mfspr(SPRN_HFSCR);
  167. hfscr |= 1UL << f->hfscr_bit_nr;
  168. mtspr(SPRN_HFSCR, hfscr);
  169. }
  170. }
  171. if (f->os_support != OS_SUPPORT_NONE) {
  172. if (f->fscr_bit_nr != -1) {
  173. u64 fscr = mfspr(SPRN_FSCR);
  174. fscr |= 1UL << f->fscr_bit_nr;
  175. mtspr(SPRN_FSCR, fscr);
  176. }
  177. }
  178. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  179. uint32_t word = f->hwcap_bit_nr / 32;
  180. uint32_t bit = f->hwcap_bit_nr % 32;
  181. if (word == 0)
  182. cur_cpu_spec->cpu_user_features |= 1U << bit;
  183. else if (word == 1)
  184. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  185. else
  186. pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
  187. }
  188. return 1;
  189. }
  190. static int __init feat_disable(struct dt_cpu_feature *f)
  191. {
  192. return 0;
  193. }
  194. static int __init feat_enable_hv(struct dt_cpu_feature *f)
  195. {
  196. u64 lpcr;
  197. if (!hv_mode) {
  198. pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
  199. return 0;
  200. }
  201. mtspr(SPRN_LPID, 0);
  202. lpcr = mfspr(SPRN_LPCR);
  203. lpcr &= ~LPCR_LPES0; /* HV external interrupts */
  204. mtspr(SPRN_LPCR, lpcr);
  205. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  206. return 1;
  207. }
  208. static int __init feat_enable_le(struct dt_cpu_feature *f)
  209. {
  210. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
  211. return 1;
  212. }
  213. static int __init feat_enable_smt(struct dt_cpu_feature *f)
  214. {
  215. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  216. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
  217. return 1;
  218. }
  219. static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
  220. {
  221. u64 lpcr;
  222. /* Set PECE wakeup modes for ISA 207 */
  223. lpcr = mfspr(SPRN_LPCR);
  224. lpcr |= LPCR_PECE0;
  225. lpcr |= LPCR_PECE1;
  226. lpcr |= LPCR_PECE2;
  227. mtspr(SPRN_LPCR, lpcr);
  228. return 1;
  229. }
  230. static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
  231. {
  232. cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
  233. return 1;
  234. }
  235. static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
  236. {
  237. u64 lpcr;
  238. /* Set PECE wakeup modes for ISAv3.0B */
  239. lpcr = mfspr(SPRN_LPCR);
  240. lpcr |= LPCR_PECE0;
  241. lpcr |= LPCR_PECE1;
  242. lpcr |= LPCR_PECE2;
  243. mtspr(SPRN_LPCR, lpcr);
  244. return 1;
  245. }
  246. static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
  247. {
  248. u64 lpcr;
  249. lpcr = mfspr(SPRN_LPCR);
  250. lpcr &= ~LPCR_ISL;
  251. /* VRMASD */
  252. lpcr |= LPCR_VPM0;
  253. lpcr &= ~LPCR_VPM1;
  254. lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
  255. mtspr(SPRN_LPCR, lpcr);
  256. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  257. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  258. return 1;
  259. }
  260. static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
  261. {
  262. u64 lpcr;
  263. system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR);
  264. lpcr = mfspr(SPRN_LPCR);
  265. lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
  266. mtspr(SPRN_LPCR, lpcr);
  267. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  268. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  269. return 1;
  270. }
  271. static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
  272. {
  273. #ifdef CONFIG_PPC_RADIX_MMU
  274. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  275. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  276. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  277. return 1;
  278. #endif
  279. return 0;
  280. }
  281. static int __init feat_enable_dscr(struct dt_cpu_feature *f)
  282. {
  283. u64 lpcr;
  284. /*
  285. * Linux relies on FSCR[DSCR] being clear, so that we can take the
  286. * facility unavailable interrupt and track the task's usage of DSCR.
  287. * See facility_unavailable_exception().
  288. * Clear the bit here so that feat_enable() doesn't set it.
  289. */
  290. f->fscr_bit_nr = -1;
  291. feat_enable(f);
  292. lpcr = mfspr(SPRN_LPCR);
  293. lpcr &= ~LPCR_DPFD;
  294. lpcr |= (4UL << LPCR_DPFD_SH);
  295. mtspr(SPRN_LPCR, lpcr);
  296. return 1;
  297. }
  298. static void hfscr_pmu_enable(void)
  299. {
  300. u64 hfscr = mfspr(SPRN_HFSCR);
  301. hfscr |= PPC_BIT(60);
  302. mtspr(SPRN_HFSCR, hfscr);
  303. }
  304. static void init_pmu_power8(void)
  305. {
  306. if (hv_mode) {
  307. mtspr(SPRN_MMCRC, 0);
  308. mtspr(SPRN_MMCRH, 0);
  309. }
  310. mtspr(SPRN_MMCRA, 0);
  311. mtspr(SPRN_MMCR0, 0);
  312. mtspr(SPRN_MMCR1, 0);
  313. mtspr(SPRN_MMCR2, 0);
  314. mtspr(SPRN_MMCRS, 0);
  315. }
  316. static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
  317. {
  318. cur_cpu_spec->platform = "power8";
  319. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
  320. return 1;
  321. }
  322. static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
  323. {
  324. hfscr_pmu_enable();
  325. init_pmu_power8();
  326. init_pmu_registers = init_pmu_power8;
  327. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  328. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  329. if (pvr_version_is(PVR_POWER8E))
  330. cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
  331. cur_cpu_spec->num_pmcs = 6;
  332. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  333. cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
  334. return 1;
  335. }
  336. static void init_pmu_power9(void)
  337. {
  338. if (hv_mode)
  339. mtspr(SPRN_MMCRC, 0);
  340. mtspr(SPRN_MMCRA, 0);
  341. mtspr(SPRN_MMCR0, 0);
  342. mtspr(SPRN_MMCR1, 0);
  343. mtspr(SPRN_MMCR2, 0);
  344. }
  345. static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
  346. {
  347. cur_cpu_spec->platform = "power9";
  348. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
  349. return 1;
  350. }
  351. static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
  352. {
  353. hfscr_pmu_enable();
  354. init_pmu_power9();
  355. init_pmu_registers = init_pmu_power9;
  356. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  357. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  358. cur_cpu_spec->num_pmcs = 6;
  359. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  360. cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
  361. return 1;
  362. }
  363. static int __init feat_enable_tm(struct dt_cpu_feature *f)
  364. {
  365. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  366. feat_enable(f);
  367. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
  368. return 1;
  369. #endif
  370. return 0;
  371. }
  372. static int __init feat_enable_fp(struct dt_cpu_feature *f)
  373. {
  374. feat_enable(f);
  375. cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
  376. return 1;
  377. }
  378. static int __init feat_enable_vector(struct dt_cpu_feature *f)
  379. {
  380. #ifdef CONFIG_ALTIVEC
  381. feat_enable(f);
  382. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  383. cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
  384. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  385. return 1;
  386. #endif
  387. return 0;
  388. }
  389. static int __init feat_enable_vsx(struct dt_cpu_feature *f)
  390. {
  391. #ifdef CONFIG_VSX
  392. feat_enable(f);
  393. cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
  394. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
  395. return 1;
  396. #endif
  397. return 0;
  398. }
  399. static int __init feat_enable_purr(struct dt_cpu_feature *f)
  400. {
  401. cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
  402. return 1;
  403. }
  404. static int __init feat_enable_ebb(struct dt_cpu_feature *f)
  405. {
  406. /*
  407. * PPC_FEATURE2_EBB is enabled in PMU init code because it has
  408. * historically been related to the PMU facility. This may have
  409. * to be decoupled if EBB becomes more generic. For now, follow
  410. * existing convention.
  411. */
  412. f->hwcap_bit_nr = -1;
  413. feat_enable(f);
  414. return 1;
  415. }
  416. static int __init feat_enable_dbell(struct dt_cpu_feature *f)
  417. {
  418. u64 lpcr;
  419. /* P9 has an HFSCR for privileged state */
  420. feat_enable(f);
  421. cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
  422. lpcr = mfspr(SPRN_LPCR);
  423. lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
  424. mtspr(SPRN_LPCR, lpcr);
  425. return 1;
  426. }
  427. static int __init feat_enable_hvi(struct dt_cpu_feature *f)
  428. {
  429. u64 lpcr;
  430. /*
  431. * POWER9 XIVE interrupts including in OPAL XICS compatibility
  432. * are always delivered as hypervisor virtualization interrupts (HVI)
  433. * rather than EE.
  434. *
  435. * However LPES0 is not set here, in the chance that an EE does get
  436. * delivered to the host somehow, the EE handler would not expect it
  437. * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
  438. * happen if there is a bug in interrupt controller code, or IC is
  439. * misconfigured in systemsim.
  440. */
  441. lpcr = mfspr(SPRN_LPCR);
  442. lpcr |= LPCR_HVICE; /* enable hvi interrupts */
  443. lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
  444. lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
  445. mtspr(SPRN_LPCR, lpcr);
  446. return 1;
  447. }
  448. static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
  449. {
  450. cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
  451. return 1;
  452. }
  453. struct dt_cpu_feature_match {
  454. const char *name;
  455. int (*enable)(struct dt_cpu_feature *f);
  456. u64 cpu_ftr_bit_mask;
  457. };
  458. static struct dt_cpu_feature_match __initdata
  459. dt_cpu_feature_match_table[] = {
  460. {"hypervisor", feat_enable_hv, 0},
  461. {"big-endian", feat_enable, 0},
  462. {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
  463. {"smt", feat_enable_smt, 0},
  464. {"interrupt-facilities", feat_enable, 0},
  465. {"timer-facilities", feat_enable, 0},
  466. {"timer-facilities-v3", feat_enable, 0},
  467. {"debug-facilities", feat_enable, 0},
  468. {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
  469. {"branch-tracing", feat_enable, 0},
  470. {"floating-point", feat_enable_fp, 0},
  471. {"vector", feat_enable_vector, 0},
  472. {"vector-scalar", feat_enable_vsx, 0},
  473. {"vector-scalar-v3", feat_enable, 0},
  474. {"decimal-floating-point", feat_enable, 0},
  475. {"decimal-integer", feat_enable, 0},
  476. {"quadword-load-store", feat_enable, 0},
  477. {"vector-crypto", feat_enable, 0},
  478. {"mmu-hash", feat_enable_mmu_hash, 0},
  479. {"mmu-radix", feat_enable_mmu_radix, 0},
  480. {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
  481. {"virtual-page-class-key-protection", feat_enable, 0},
  482. {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
  483. {"transactional-memory-v3", feat_enable_tm, 0},
  484. {"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
  485. {"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
  486. {"idle-nap", feat_enable_idle_nap, 0},
  487. {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
  488. {"idle-stop", feat_enable_idle_stop, 0},
  489. {"machine-check-power8", feat_enable_mce_power8, 0},
  490. {"performance-monitor-power8", feat_enable_pmu_power8, 0},
  491. {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
  492. {"event-based-branch", feat_enable_ebb, 0},
  493. {"target-address-register", feat_enable, 0},
  494. {"branch-history-rolling-buffer", feat_enable, 0},
  495. {"control-register", feat_enable, CPU_FTR_CTRL},
  496. {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
  497. {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
  498. {"processor-utilization-of-resources-register", feat_enable_purr, 0},
  499. {"no-execute", feat_enable, 0},
  500. {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
  501. {"cache-inhibited-large-page", feat_enable_large_ci, 0},
  502. {"coprocessor-icswx", feat_enable, 0},
  503. {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
  504. {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
  505. {"wait", feat_enable, 0},
  506. {"atomic-memory-operations", feat_enable, 0},
  507. {"branch-v3", feat_enable, 0},
  508. {"copy-paste", feat_enable, 0},
  509. {"decimal-floating-point-v3", feat_enable, 0},
  510. {"decimal-integer-v3", feat_enable, 0},
  511. {"fixed-point-v3", feat_enable, 0},
  512. {"floating-point-v3", feat_enable, 0},
  513. {"group-start-register", feat_enable, 0},
  514. {"pc-relative-addressing", feat_enable, 0},
  515. {"machine-check-power9", feat_enable_mce_power9, 0},
  516. {"performance-monitor-power9", feat_enable_pmu_power9, 0},
  517. {"event-based-branch-v3", feat_enable, 0},
  518. {"random-number-generator", feat_enable, 0},
  519. {"system-call-vectored", feat_disable, 0},
  520. {"trace-interrupt-v3", feat_enable, 0},
  521. {"vector-v3", feat_enable, 0},
  522. {"vector-binary128", feat_enable, 0},
  523. {"vector-binary16", feat_enable, 0},
  524. {"wait-v3", feat_enable, 0},
  525. };
  526. static bool __initdata using_dt_cpu_ftrs;
  527. static bool __initdata enable_unknown = true;
  528. static int __init dt_cpu_ftrs_parse(char *str)
  529. {
  530. if (!str)
  531. return 0;
  532. if (!strcmp(str, "off"))
  533. using_dt_cpu_ftrs = false;
  534. else if (!strcmp(str, "known"))
  535. enable_unknown = false;
  536. else
  537. return 1;
  538. return 0;
  539. }
  540. early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
  541. static void __init cpufeatures_setup_start(u32 isa)
  542. {
  543. pr_info("setup for ISA %d\n", isa);
  544. if (isa >= 3000) {
  545. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
  546. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
  547. }
  548. }
  549. static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
  550. {
  551. const struct dt_cpu_feature_match *m;
  552. bool known = false;
  553. int i;
  554. for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
  555. m = &dt_cpu_feature_match_table[i];
  556. if (!strcmp(f->name, m->name)) {
  557. known = true;
  558. if (m->enable(f)) {
  559. cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
  560. break;
  561. }
  562. pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
  563. f->name);
  564. return false;
  565. }
  566. }
  567. if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
  568. pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
  569. f->name);
  570. return false;
  571. }
  572. if (known)
  573. pr_debug("enabling: %s\n", f->name);
  574. else
  575. pr_debug("enabling: %s (unknown)\n", f->name);
  576. return true;
  577. }
  578. /*
  579. * Handle POWER9 broadcast tlbie invalidation issue using
  580. * cpu feature flag.
  581. */
  582. static __init void update_tlbie_feature_flag(unsigned long pvr)
  583. {
  584. if (PVR_VER(pvr) == PVR_POWER9) {
  585. /*
  586. * Set the tlbie feature flag for anything below
  587. * Nimbus DD 2.3 and Cumulus DD 1.3
  588. */
  589. if ((pvr & 0xe000) == 0) {
  590. /* Nimbus */
  591. if ((pvr & 0xfff) < 0x203)
  592. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  593. } else if ((pvr & 0xc000) == 0) {
  594. /* Cumulus */
  595. if ((pvr & 0xfff) < 0x103)
  596. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  597. } else {
  598. WARN_ONCE(1, "Unknown PVR");
  599. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  600. }
  601. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
  602. }
  603. }
  604. static __init void cpufeatures_cpu_quirks(void)
  605. {
  606. unsigned long version = mfspr(SPRN_PVR);
  607. /*
  608. * Not all quirks can be derived from the cpufeatures device tree.
  609. */
  610. if ((version & 0xffffefff) == 0x004e0200)
  611. ; /* DD2.0 has no feature flag */
  612. else if ((version & 0xffffefff) == 0x004e0201)
  613. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  614. else if ((version & 0xffffefff) == 0x004e0202) {
  615. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
  616. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
  617. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  618. } else if ((version & 0xffff0000) == 0x004e0000)
  619. /* DD2.1 and up have DD2_1 */
  620. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  621. if ((version & 0xffff0000) == 0x004e0000) {
  622. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  623. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
  624. }
  625. update_tlbie_feature_flag(version);
  626. /*
  627. * PKEY was not in the initial base or feature node
  628. * specification, but it should become optional in the next
  629. * cpu feature version sequence.
  630. */
  631. cur_cpu_spec->cpu_features |= CPU_FTR_PKEY;
  632. }
  633. static void __init cpufeatures_setup_finished(void)
  634. {
  635. cpufeatures_cpu_quirks();
  636. if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
  637. pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
  638. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  639. }
  640. /* Make sure powerpc_base_platform is non-NULL */
  641. powerpc_base_platform = cur_cpu_spec->platform;
  642. system_registers.lpcr = mfspr(SPRN_LPCR);
  643. system_registers.hfscr = mfspr(SPRN_HFSCR);
  644. system_registers.fscr = mfspr(SPRN_FSCR);
  645. pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
  646. cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
  647. }
  648. static int __init disabled_on_cmdline(void)
  649. {
  650. unsigned long root, chosen;
  651. const char *p;
  652. root = of_get_flat_dt_root();
  653. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  654. if (chosen == -FDT_ERR_NOTFOUND)
  655. return false;
  656. p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
  657. if (!p)
  658. return false;
  659. if (strstr(p, "dt_cpu_ftrs=off"))
  660. return true;
  661. return false;
  662. }
  663. static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
  664. int depth, void *data)
  665. {
  666. if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
  667. && of_get_flat_dt_prop(node, "isa", NULL))
  668. return 1;
  669. return 0;
  670. }
  671. bool __init dt_cpu_ftrs_in_use(void)
  672. {
  673. return using_dt_cpu_ftrs;
  674. }
  675. bool __init dt_cpu_ftrs_init(void *fdt)
  676. {
  677. using_dt_cpu_ftrs = false;
  678. /* Setup and verify the FDT, if it fails we just bail */
  679. if (!early_init_dt_verify(fdt))
  680. return false;
  681. if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
  682. return false;
  683. if (disabled_on_cmdline())
  684. return false;
  685. cpufeatures_setup_cpu();
  686. using_dt_cpu_ftrs = true;
  687. return true;
  688. }
  689. static int nr_dt_cpu_features;
  690. static struct dt_cpu_feature *dt_cpu_features;
  691. static int __init process_cpufeatures_node(unsigned long node,
  692. const char *uname, int i)
  693. {
  694. const __be32 *prop;
  695. struct dt_cpu_feature *f;
  696. int len;
  697. f = &dt_cpu_features[i];
  698. memset(f, 0, sizeof(struct dt_cpu_feature));
  699. f->node = node;
  700. f->name = uname;
  701. prop = of_get_flat_dt_prop(node, "isa", &len);
  702. if (!prop) {
  703. pr_warn("%s: missing isa property\n", uname);
  704. return 0;
  705. }
  706. f->isa = be32_to_cpup(prop);
  707. prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
  708. if (!prop) {
  709. pr_warn("%s: missing usable-privilege property", uname);
  710. return 0;
  711. }
  712. f->usable_privilege = be32_to_cpup(prop);
  713. prop = of_get_flat_dt_prop(node, "hv-support", &len);
  714. if (prop)
  715. f->hv_support = be32_to_cpup(prop);
  716. else
  717. f->hv_support = HV_SUPPORT_NONE;
  718. prop = of_get_flat_dt_prop(node, "os-support", &len);
  719. if (prop)
  720. f->os_support = be32_to_cpup(prop);
  721. else
  722. f->os_support = OS_SUPPORT_NONE;
  723. prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
  724. if (prop)
  725. f->hfscr_bit_nr = be32_to_cpup(prop);
  726. else
  727. f->hfscr_bit_nr = -1;
  728. prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
  729. if (prop)
  730. f->fscr_bit_nr = be32_to_cpup(prop);
  731. else
  732. f->fscr_bit_nr = -1;
  733. prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
  734. if (prop)
  735. f->hwcap_bit_nr = be32_to_cpup(prop);
  736. else
  737. f->hwcap_bit_nr = -1;
  738. if (f->usable_privilege & USABLE_HV) {
  739. if (!(mfmsr() & MSR_HV)) {
  740. pr_warn("%s: HV feature passed to guest\n", uname);
  741. return 0;
  742. }
  743. if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
  744. pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
  745. return 0;
  746. }
  747. if (f->hv_support == HV_SUPPORT_HFSCR) {
  748. if (f->hfscr_bit_nr == -1) {
  749. pr_warn("%s: missing hfscr_bit_nr\n", uname);
  750. return 0;
  751. }
  752. }
  753. } else {
  754. if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
  755. pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
  756. return 0;
  757. }
  758. }
  759. if (f->usable_privilege & USABLE_OS) {
  760. if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
  761. pr_warn("%s: unwanted fscr_bit_nr\n", uname);
  762. return 0;
  763. }
  764. if (f->os_support == OS_SUPPORT_FSCR) {
  765. if (f->fscr_bit_nr == -1) {
  766. pr_warn("%s: missing fscr_bit_nr\n", uname);
  767. return 0;
  768. }
  769. }
  770. } else {
  771. if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
  772. pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
  773. return 0;
  774. }
  775. }
  776. if (!(f->usable_privilege & USABLE_PR)) {
  777. if (f->hwcap_bit_nr != -1) {
  778. pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
  779. return 0;
  780. }
  781. }
  782. /* Do all the independent features in the first pass */
  783. if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
  784. if (cpufeatures_process_feature(f))
  785. f->enabled = 1;
  786. else
  787. f->disabled = 1;
  788. }
  789. return 0;
  790. }
  791. static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
  792. {
  793. const __be32 *prop;
  794. int len;
  795. int nr_deps;
  796. int i;
  797. if (f->enabled || f->disabled)
  798. return;
  799. prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
  800. if (!prop) {
  801. pr_warn("%s: missing dependencies property", f->name);
  802. return;
  803. }
  804. nr_deps = len / sizeof(int);
  805. for (i = 0; i < nr_deps; i++) {
  806. unsigned long phandle = be32_to_cpu(prop[i]);
  807. int j;
  808. for (j = 0; j < nr_dt_cpu_features; j++) {
  809. struct dt_cpu_feature *d = &dt_cpu_features[j];
  810. if (of_get_flat_dt_phandle(d->node) == phandle) {
  811. cpufeatures_deps_enable(d);
  812. if (d->disabled) {
  813. f->disabled = 1;
  814. return;
  815. }
  816. }
  817. }
  818. }
  819. if (cpufeatures_process_feature(f))
  820. f->enabled = 1;
  821. else
  822. f->disabled = 1;
  823. }
  824. static int __init scan_cpufeatures_subnodes(unsigned long node,
  825. const char *uname,
  826. void *data)
  827. {
  828. int *count = data;
  829. process_cpufeatures_node(node, uname, *count);
  830. (*count)++;
  831. return 0;
  832. }
  833. static int __init count_cpufeatures_subnodes(unsigned long node,
  834. const char *uname,
  835. void *data)
  836. {
  837. int *count = data;
  838. (*count)++;
  839. return 0;
  840. }
  841. static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
  842. *uname, int depth, void *data)
  843. {
  844. const __be32 *prop;
  845. int count, i;
  846. u32 isa;
  847. /* We are scanning "ibm,powerpc-cpu-features" nodes only */
  848. if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
  849. return 0;
  850. prop = of_get_flat_dt_prop(node, "isa", NULL);
  851. if (!prop)
  852. /* We checked before, "can't happen" */
  853. return 0;
  854. isa = be32_to_cpup(prop);
  855. /* Count and allocate space for cpu features */
  856. of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
  857. &nr_dt_cpu_features);
  858. dt_cpu_features = __va(
  859. memblock_alloc(sizeof(struct dt_cpu_feature)*
  860. nr_dt_cpu_features, PAGE_SIZE));
  861. cpufeatures_setup_start(isa);
  862. /* Scan nodes into dt_cpu_features and enable those without deps */
  863. count = 0;
  864. of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
  865. /* Recursive enable remaining features with dependencies */
  866. for (i = 0; i < nr_dt_cpu_features; i++) {
  867. struct dt_cpu_feature *f = &dt_cpu_features[i];
  868. cpufeatures_deps_enable(f);
  869. }
  870. prop = of_get_flat_dt_prop(node, "display-name", NULL);
  871. if (prop && strlen((char *)prop) != 0) {
  872. strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
  873. cur_cpu_spec->cpu_name = dt_cpu_name;
  874. }
  875. cpufeatures_setup_finished();
  876. memblock_free(__pa(dt_cpu_features),
  877. sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
  878. return 0;
  879. }
  880. void __init dt_cpu_ftrs_scan(void)
  881. {
  882. if (!using_dt_cpu_ftrs)
  883. return;
  884. of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
  885. }