dt_cpu_ftrs.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2017, Nicholas Piggin, IBM Corporation
  4. */
  5. #define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
  6. #include <linux/export.h>
  7. #include <linux/init.h>
  8. #include <linux/jump_label.h>
  9. #include <linux/libfdt.h>
  10. #include <linux/memblock.h>
  11. #include <linux/of_fdt.h>
  12. #include <linux/printk.h>
  13. #include <linux/sched.h>
  14. #include <linux/string.h>
  15. #include <linux/threads.h>
  16. #include <asm/cputable.h>
  17. #include <asm/dt_cpu_ftrs.h>
  18. #include <asm/mce.h>
  19. #include <asm/mmu.h>
  20. #include <asm/setup.h>
  21. /* Device-tree visible constants follow */
  22. #define ISA_V3_0B 3000
  23. #define ISA_V3_1 3100
  24. #define USABLE_PR (1U << 0)
  25. #define USABLE_OS (1U << 1)
  26. #define USABLE_HV (1U << 2)
  27. #define HV_SUPPORT_HFSCR (1U << 0)
  28. #define OS_SUPPORT_FSCR (1U << 0)
  29. /* For parsing, we define all bits set as "NONE" case */
  30. #define HV_SUPPORT_NONE 0xffffffffU
  31. #define OS_SUPPORT_NONE 0xffffffffU
  32. struct dt_cpu_feature {
  33. const char *name;
  34. uint32_t isa;
  35. uint32_t usable_privilege;
  36. uint32_t hv_support;
  37. uint32_t os_support;
  38. uint32_t hfscr_bit_nr;
  39. uint32_t fscr_bit_nr;
  40. uint32_t hwcap_bit_nr;
  41. /* fdt parsing */
  42. unsigned long node;
  43. int enabled;
  44. int disabled;
  45. };
  46. #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
  47. #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
  48. PPC_FEATURE_ARCH_2_06 |\
  49. PPC_FEATURE_ICACHE_SNOOP)
  50. #define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
  51. PPC_FEATURE2_ISEL)
  52. /*
  53. * Set up the base CPU
  54. */
  55. static int hv_mode;
  56. static struct {
  57. u64 lpcr;
  58. u64 hfscr;
  59. u64 fscr;
  60. u64 pcr;
  61. } system_registers;
  62. static void (*init_pmu_registers)(void);
  63. static void __restore_cpu_cpufeatures(void)
  64. {
  65. mtspr(SPRN_LPCR, system_registers.lpcr);
  66. if (hv_mode) {
  67. mtspr(SPRN_LPID, 0);
  68. mtspr(SPRN_AMOR, ~0);
  69. mtspr(SPRN_HFSCR, system_registers.hfscr);
  70. mtspr(SPRN_PCR, system_registers.pcr);
  71. }
  72. mtspr(SPRN_FSCR, system_registers.fscr);
  73. if (init_pmu_registers)
  74. init_pmu_registers();
  75. }
  76. static char dt_cpu_name[64];
  77. static struct cpu_spec __initdata base_cpu_spec = {
  78. .cpu_name = NULL,
  79. .cpu_features = CPU_FTRS_DT_CPU_BASE,
  80. .cpu_user_features = COMMON_USER_BASE,
  81. .cpu_user_features2 = COMMON_USER2_BASE,
  82. .mmu_features = 0,
  83. .icache_bsize = 32, /* minimum block size, fixed by */
  84. .dcache_bsize = 32, /* cache info init. */
  85. .num_pmcs = 0,
  86. .pmc_type = PPC_PMC_DEFAULT,
  87. .cpu_setup = NULL,
  88. .cpu_restore = __restore_cpu_cpufeatures,
  89. .machine_check_early = NULL,
  90. .platform = NULL,
  91. };
  92. static void __init cpufeatures_setup_cpu(void)
  93. {
  94. set_cur_cpu_spec(&base_cpu_spec);
  95. cur_cpu_spec->pvr_mask = -1;
  96. cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
  97. /* Initialize the base environment -- clear FSCR/HFSCR. */
  98. hv_mode = !!(mfmsr() & MSR_HV);
  99. if (hv_mode) {
  100. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  101. mtspr(SPRN_HFSCR, 0);
  102. }
  103. mtspr(SPRN_FSCR, 0);
  104. mtspr(SPRN_PCR, PCR_MASK);
  105. /*
  106. * LPCR does not get cleared, to match behaviour with secondaries
  107. * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
  108. * could clear LPCR too.
  109. */
  110. }
  111. static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
  112. {
  113. if (f->hv_support == HV_SUPPORT_NONE) {
  114. } else if (f->hv_support & HV_SUPPORT_HFSCR) {
  115. u64 hfscr = mfspr(SPRN_HFSCR);
  116. hfscr |= 1UL << f->hfscr_bit_nr;
  117. mtspr(SPRN_HFSCR, hfscr);
  118. } else {
  119. /* Does not have a known recipe */
  120. return 0;
  121. }
  122. if (f->os_support == OS_SUPPORT_NONE) {
  123. } else if (f->os_support & OS_SUPPORT_FSCR) {
  124. u64 fscr = mfspr(SPRN_FSCR);
  125. fscr |= 1UL << f->fscr_bit_nr;
  126. mtspr(SPRN_FSCR, fscr);
  127. } else {
  128. /* Does not have a known recipe */
  129. return 0;
  130. }
  131. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  132. uint32_t word = f->hwcap_bit_nr / 32;
  133. uint32_t bit = f->hwcap_bit_nr % 32;
  134. if (word == 0)
  135. cur_cpu_spec->cpu_user_features |= 1U << bit;
  136. else if (word == 1)
  137. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  138. else
  139. pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
  140. }
  141. return 1;
  142. }
  143. static int __init feat_enable(struct dt_cpu_feature *f)
  144. {
  145. if (f->hv_support != HV_SUPPORT_NONE) {
  146. if (f->hfscr_bit_nr != -1) {
  147. u64 hfscr = mfspr(SPRN_HFSCR);
  148. hfscr |= 1UL << f->hfscr_bit_nr;
  149. mtspr(SPRN_HFSCR, hfscr);
  150. }
  151. }
  152. if (f->os_support != OS_SUPPORT_NONE) {
  153. if (f->fscr_bit_nr != -1) {
  154. u64 fscr = mfspr(SPRN_FSCR);
  155. fscr |= 1UL << f->fscr_bit_nr;
  156. mtspr(SPRN_FSCR, fscr);
  157. }
  158. }
  159. if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
  160. uint32_t word = f->hwcap_bit_nr / 32;
  161. uint32_t bit = f->hwcap_bit_nr % 32;
  162. if (word == 0)
  163. cur_cpu_spec->cpu_user_features |= 1U << bit;
  164. else if (word == 1)
  165. cur_cpu_spec->cpu_user_features2 |= 1U << bit;
  166. else
  167. pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
  168. }
  169. return 1;
  170. }
  171. static int __init feat_disable(struct dt_cpu_feature *f)
  172. {
  173. return 0;
  174. }
  175. static int __init feat_enable_hv(struct dt_cpu_feature *f)
  176. {
  177. u64 lpcr;
  178. if (!hv_mode) {
  179. pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
  180. return 0;
  181. }
  182. mtspr(SPRN_LPID, 0);
  183. mtspr(SPRN_AMOR, ~0);
  184. lpcr = mfspr(SPRN_LPCR);
  185. lpcr &= ~LPCR_LPES0; /* HV external interrupts */
  186. mtspr(SPRN_LPCR, lpcr);
  187. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  188. return 1;
  189. }
  190. static int __init feat_enable_le(struct dt_cpu_feature *f)
  191. {
  192. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
  193. return 1;
  194. }
  195. static int __init feat_enable_smt(struct dt_cpu_feature *f)
  196. {
  197. cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
  198. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
  199. return 1;
  200. }
  201. static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
  202. {
  203. u64 lpcr;
  204. /* Set PECE wakeup modes for ISA 207 */
  205. lpcr = mfspr(SPRN_LPCR);
  206. lpcr |= LPCR_PECE0;
  207. lpcr |= LPCR_PECE1;
  208. lpcr |= LPCR_PECE2;
  209. mtspr(SPRN_LPCR, lpcr);
  210. return 1;
  211. }
  212. static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
  213. {
  214. u64 lpcr;
  215. /* Set PECE wakeup modes for ISAv3.0B */
  216. lpcr = mfspr(SPRN_LPCR);
  217. lpcr |= LPCR_PECE0;
  218. lpcr |= LPCR_PECE1;
  219. lpcr |= LPCR_PECE2;
  220. mtspr(SPRN_LPCR, lpcr);
  221. return 1;
  222. }
  223. static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
  224. {
  225. u64 lpcr;
  226. if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
  227. return 0;
  228. lpcr = mfspr(SPRN_LPCR);
  229. lpcr &= ~LPCR_ISL;
  230. /* VRMASD */
  231. lpcr |= LPCR_VPM0;
  232. lpcr &= ~LPCR_VPM1;
  233. lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
  234. mtspr(SPRN_LPCR, lpcr);
  235. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  236. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  237. return 1;
  238. }
  239. static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
  240. {
  241. u64 lpcr;
  242. if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
  243. return 0;
  244. lpcr = mfspr(SPRN_LPCR);
  245. lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
  246. mtspr(SPRN_LPCR, lpcr);
  247. cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
  248. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  249. return 1;
  250. }
  251. static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
  252. {
  253. if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
  254. return 0;
  255. cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
  256. cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
  257. cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
  258. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
  259. return 1;
  260. }
  261. static int __init feat_enable_dscr(struct dt_cpu_feature *f)
  262. {
  263. u64 lpcr;
  264. /*
  265. * Linux relies on FSCR[DSCR] being clear, so that we can take the
  266. * facility unavailable interrupt and track the task's usage of DSCR.
  267. * See facility_unavailable_exception().
  268. * Clear the bit here so that feat_enable() doesn't set it.
  269. */
  270. f->fscr_bit_nr = -1;
  271. feat_enable(f);
  272. lpcr = mfspr(SPRN_LPCR);
  273. lpcr &= ~LPCR_DPFD;
  274. lpcr |= (4UL << LPCR_DPFD_SH);
  275. mtspr(SPRN_LPCR, lpcr);
  276. return 1;
  277. }
  278. static void __init hfscr_pmu_enable(void)
  279. {
  280. u64 hfscr = mfspr(SPRN_HFSCR);
  281. hfscr |= PPC_BIT(60);
  282. mtspr(SPRN_HFSCR, hfscr);
  283. }
  284. static void init_pmu_power8(void)
  285. {
  286. if (hv_mode) {
  287. mtspr(SPRN_MMCRC, 0);
  288. mtspr(SPRN_MMCRH, 0);
  289. }
  290. mtspr(SPRN_MMCRA, 0);
  291. mtspr(SPRN_MMCR0, MMCR0_FC);
  292. mtspr(SPRN_MMCR1, 0);
  293. mtspr(SPRN_MMCR2, 0);
  294. mtspr(SPRN_MMCRS, 0);
  295. }
  296. static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
  297. {
  298. cur_cpu_spec->platform = "power8";
  299. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
  300. return 1;
  301. }
  302. static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
  303. {
  304. hfscr_pmu_enable();
  305. init_pmu_power8();
  306. init_pmu_registers = init_pmu_power8;
  307. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  308. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  309. if (pvr_version_is(PVR_POWER8E))
  310. cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
  311. cur_cpu_spec->num_pmcs = 6;
  312. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  313. return 1;
  314. }
  315. static void init_pmu_power9(void)
  316. {
  317. if (hv_mode)
  318. mtspr(SPRN_MMCRC, 0);
  319. mtspr(SPRN_MMCRA, 0);
  320. mtspr(SPRN_MMCR0, MMCR0_FC);
  321. mtspr(SPRN_MMCR1, 0);
  322. mtspr(SPRN_MMCR2, 0);
  323. }
  324. static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
  325. {
  326. cur_cpu_spec->platform = "power9";
  327. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
  328. return 1;
  329. }
  330. static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
  331. {
  332. hfscr_pmu_enable();
  333. init_pmu_power9();
  334. init_pmu_registers = init_pmu_power9;
  335. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  336. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  337. cur_cpu_spec->num_pmcs = 6;
  338. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  339. return 1;
  340. }
  341. static void init_pmu_power10(void)
  342. {
  343. init_pmu_power9();
  344. mtspr(SPRN_MMCR3, 0);
  345. mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
  346. mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
  347. }
  348. static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
  349. {
  350. hfscr_pmu_enable();
  351. init_pmu_power10();
  352. init_pmu_registers = init_pmu_power10;
  353. cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
  354. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
  355. cur_cpu_spec->num_pmcs = 6;
  356. cur_cpu_spec->pmc_type = PPC_PMC_IBM;
  357. return 1;
  358. }
  359. static int __init feat_enable_mce_power10(struct dt_cpu_feature *f)
  360. {
  361. cur_cpu_spec->platform = "power10";
  362. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
  363. return 1;
  364. }
  365. static int __init feat_enable_mce_power11(struct dt_cpu_feature *f)
  366. {
  367. cur_cpu_spec->platform = "power11";
  368. cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
  369. return 1;
  370. }
  371. static int __init feat_enable_tm(struct dt_cpu_feature *f)
  372. {
  373. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  374. feat_enable(f);
  375. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
  376. return 1;
  377. #endif
  378. return 0;
  379. }
  380. static int __init feat_enable_fp(struct dt_cpu_feature *f)
  381. {
  382. feat_enable(f);
  383. cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
  384. return 1;
  385. }
  386. static int __init feat_enable_vector(struct dt_cpu_feature *f)
  387. {
  388. #ifdef CONFIG_ALTIVEC
  389. feat_enable(f);
  390. cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
  391. cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
  392. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
  393. return 1;
  394. #endif
  395. return 0;
  396. }
  397. static int __init feat_enable_vsx(struct dt_cpu_feature *f)
  398. {
  399. #ifdef CONFIG_VSX
  400. feat_enable(f);
  401. cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
  402. cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
  403. return 1;
  404. #endif
  405. return 0;
  406. }
  407. static int __init feat_enable_purr(struct dt_cpu_feature *f)
  408. {
  409. cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
  410. return 1;
  411. }
  412. static int __init feat_enable_ebb(struct dt_cpu_feature *f)
  413. {
  414. /*
  415. * PPC_FEATURE2_EBB is enabled in PMU init code because it has
  416. * historically been related to the PMU facility. This may have
  417. * to be decoupled if EBB becomes more generic. For now, follow
  418. * existing convention.
  419. */
  420. f->hwcap_bit_nr = -1;
  421. feat_enable(f);
  422. return 1;
  423. }
  424. static int __init feat_enable_dbell(struct dt_cpu_feature *f)
  425. {
  426. u64 lpcr;
  427. /* P9 has an HFSCR for privileged state */
  428. feat_enable(f);
  429. cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
  430. lpcr = mfspr(SPRN_LPCR);
  431. lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
  432. mtspr(SPRN_LPCR, lpcr);
  433. return 1;
  434. }
  435. static int __init feat_enable_hvi(struct dt_cpu_feature *f)
  436. {
  437. u64 lpcr;
  438. /*
  439. * POWER9 XIVE interrupts including in OPAL XICS compatibility
  440. * are always delivered as hypervisor virtualization interrupts (HVI)
  441. * rather than EE.
  442. *
  443. * However LPES0 is not set here, in the chance that an EE does get
  444. * delivered to the host somehow, the EE handler would not expect it
  445. * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
  446. * happen if there is a bug in interrupt controller code, or IC is
  447. * misconfigured in systemsim.
  448. */
  449. lpcr = mfspr(SPRN_LPCR);
  450. lpcr |= LPCR_HVICE; /* enable hvi interrupts */
  451. lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
  452. lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
  453. mtspr(SPRN_LPCR, lpcr);
  454. return 1;
  455. }
  456. static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
  457. {
  458. cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
  459. return 1;
  460. }
  461. static int __init feat_enable_mma(struct dt_cpu_feature *f)
  462. {
  463. u64 pcr;
  464. feat_enable(f);
  465. pcr = mfspr(SPRN_PCR);
  466. pcr &= ~PCR_MMA_DIS;
  467. mtspr(SPRN_PCR, pcr);
  468. return 1;
  469. }
  470. struct dt_cpu_feature_match {
  471. const char *name;
  472. int (*enable)(struct dt_cpu_feature *f);
  473. u64 cpu_ftr_bit_mask;
  474. };
  475. static struct dt_cpu_feature_match __initdata
  476. dt_cpu_feature_match_table[] = {
  477. {"hypervisor", feat_enable_hv, 0},
  478. {"big-endian", feat_enable, 0},
  479. {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
  480. {"smt", feat_enable_smt, 0},
  481. {"interrupt-facilities", feat_enable, 0},
  482. {"system-call-vectored", feat_enable, 0},
  483. {"timer-facilities", feat_enable, 0},
  484. {"timer-facilities-v3", feat_enable, 0},
  485. {"debug-facilities", feat_enable, 0},
  486. {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
  487. {"branch-tracing", feat_enable, 0},
  488. {"floating-point", feat_enable_fp, 0},
  489. {"vector", feat_enable_vector, 0},
  490. {"vector-scalar", feat_enable_vsx, 0},
  491. {"vector-scalar-v3", feat_enable, 0},
  492. {"decimal-floating-point", feat_enable, 0},
  493. {"decimal-integer", feat_enable, 0},
  494. {"quadword-load-store", feat_enable, 0},
  495. {"vector-crypto", feat_enable, 0},
  496. {"mmu-hash", feat_enable_mmu_hash, 0},
  497. {"mmu-radix", feat_enable_mmu_radix, 0},
  498. {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
  499. {"virtual-page-class-key-protection", feat_enable, 0},
  500. {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
  501. {"transactional-memory-v3", feat_enable_tm, 0},
  502. {"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
  503. {"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
  504. {"idle-nap", feat_enable_idle_nap, 0},
  505. /* alignment-interrupt-dsisr ignored */
  506. {"idle-stop", feat_enable_idle_stop, 0},
  507. {"machine-check-power8", feat_enable_mce_power8, 0},
  508. {"performance-monitor-power8", feat_enable_pmu_power8, 0},
  509. {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
  510. {"event-based-branch", feat_enable_ebb, 0},
  511. {"target-address-register", feat_enable, 0},
  512. {"branch-history-rolling-buffer", feat_enable, 0},
  513. {"control-register", feat_enable, CPU_FTR_CTRL},
  514. {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
  515. {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
  516. {"processor-utilization-of-resources-register", feat_enable_purr, 0},
  517. {"no-execute", feat_enable, 0},
  518. {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
  519. {"cache-inhibited-large-page", feat_enable_large_ci, 0},
  520. {"coprocessor-icswx", feat_enable, 0},
  521. {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
  522. {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
  523. {"wait", feat_enable, 0},
  524. {"atomic-memory-operations", feat_enable, 0},
  525. {"branch-v3", feat_enable, 0},
  526. {"copy-paste", feat_enable, 0},
  527. {"decimal-floating-point-v3", feat_enable, 0},
  528. {"decimal-integer-v3", feat_enable, 0},
  529. {"fixed-point-v3", feat_enable, 0},
  530. {"floating-point-v3", feat_enable, 0},
  531. {"group-start-register", feat_enable, 0},
  532. {"pc-relative-addressing", feat_enable, 0},
  533. {"machine-check-power9", feat_enable_mce_power9, 0},
  534. {"machine-check-power10", feat_enable_mce_power10, 0},
  535. {"machine-check-power11", feat_enable_mce_power11, 0},
  536. {"performance-monitor-power9", feat_enable_pmu_power9, 0},
  537. {"performance-monitor-power10", feat_enable_pmu_power10, 0},
  538. {"performance-monitor-power11", feat_enable_pmu_power10, 0},
  539. {"event-based-branch-v3", feat_enable, 0},
  540. {"random-number-generator", feat_enable, 0},
  541. {"system-call-vectored", feat_disable, 0},
  542. {"trace-interrupt-v3", feat_enable, 0},
  543. {"vector-v3", feat_enable, 0},
  544. {"vector-binary128", feat_enable, 0},
  545. {"vector-binary16", feat_enable, 0},
  546. {"wait-v3", feat_enable, 0},
  547. {"prefix-instructions", feat_enable, 0},
  548. {"matrix-multiply-assist", feat_enable_mma, 0},
  549. {"debug-facilities-v31", feat_enable, CPU_FTR_DAWR1},
  550. };
  551. static bool __initdata using_dt_cpu_ftrs;
  552. static bool __initdata enable_unknown = true;
  553. static int __init dt_cpu_ftrs_parse(char *str)
  554. {
  555. if (!str)
  556. return 0;
  557. if (!strcmp(str, "off"))
  558. using_dt_cpu_ftrs = false;
  559. else if (!strcmp(str, "known"))
  560. enable_unknown = false;
  561. else
  562. return 1;
  563. return 0;
  564. }
  565. early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
  566. static void __init cpufeatures_setup_start(u32 isa)
  567. {
  568. pr_info("setup for ISA %d\n", isa);
  569. if (isa >= ISA_V3_0B) {
  570. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
  571. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
  572. }
  573. if (isa >= ISA_V3_1) {
  574. cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31;
  575. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1;
  576. }
  577. }
  578. static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
  579. {
  580. const struct dt_cpu_feature_match *m;
  581. bool known = false;
  582. int i;
  583. for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
  584. m = &dt_cpu_feature_match_table[i];
  585. if (!strcmp(f->name, m->name)) {
  586. known = true;
  587. if (m->enable(f)) {
  588. cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
  589. break;
  590. }
  591. pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
  592. f->name);
  593. return false;
  594. }
  595. }
  596. if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
  597. pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
  598. f->name);
  599. return false;
  600. }
  601. if (known)
  602. pr_debug("enabling: %s\n", f->name);
  603. else
  604. pr_debug("enabling: %s (unknown)\n", f->name);
  605. return true;
  606. }
  607. /*
  608. * Handle POWER9 broadcast tlbie invalidation issue using
  609. * cpu feature flag.
  610. */
  611. static __init void update_tlbie_feature_flag(unsigned long pvr)
  612. {
  613. if (PVR_VER(pvr) == PVR_POWER9) {
  614. /*
  615. * Set the tlbie feature flag for anything below
  616. * Nimbus DD 2.3 and Cumulus DD 1.3
  617. */
  618. if ((pvr & 0xe000) == 0) {
  619. /* Nimbus */
  620. if ((pvr & 0xfff) < 0x203)
  621. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  622. } else if ((pvr & 0xc000) == 0) {
  623. /* Cumulus */
  624. if ((pvr & 0xfff) < 0x103)
  625. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  626. } else {
  627. WARN_ONCE(1, "Unknown PVR");
  628. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
  629. }
  630. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
  631. }
  632. }
  633. static __init void cpufeatures_cpu_quirks(void)
  634. {
  635. unsigned long version = mfspr(SPRN_PVR);
  636. /*
  637. * Not all quirks can be derived from the cpufeatures device tree.
  638. */
  639. if ((version & 0xffffefff) == 0x004e0200) {
  640. /* DD2.0 has no feature flag */
  641. cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
  642. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  643. } else if ((version & 0xffffefff) == 0x004e0201) {
  644. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  645. cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
  646. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  647. } else if ((version & 0xffffefff) == 0x004e0202) {
  648. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
  649. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
  650. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  651. cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
  652. } else if ((version & 0xffffefff) == 0x004e0203) {
  653. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
  654. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
  655. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  656. } else if ((version & 0xffff0000) == 0x004e0000) {
  657. /* DD2.1 and up have DD2_1 */
  658. cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
  659. }
  660. if ((version & 0xffff0000) == 0x004e0000) {
  661. cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
  662. }
  663. update_tlbie_feature_flag(version);
  664. }
  665. static void __init cpufeatures_setup_finished(void)
  666. {
  667. cpufeatures_cpu_quirks();
  668. if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
  669. pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
  670. cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
  671. }
  672. /* Make sure powerpc_base_platform is non-NULL */
  673. powerpc_base_platform = cur_cpu_spec->platform;
  674. system_registers.lpcr = mfspr(SPRN_LPCR);
  675. system_registers.hfscr = mfspr(SPRN_HFSCR);
  676. system_registers.fscr = mfspr(SPRN_FSCR);
  677. system_registers.pcr = mfspr(SPRN_PCR);
  678. pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
  679. cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
  680. }
  681. static int __init disabled_on_cmdline(void)
  682. {
  683. unsigned long root, chosen;
  684. const char *p;
  685. root = of_get_flat_dt_root();
  686. chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
  687. if (chosen == -FDT_ERR_NOTFOUND)
  688. return false;
  689. p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
  690. if (!p)
  691. return false;
  692. if (strstr(p, "dt_cpu_ftrs=off"))
  693. return true;
  694. return false;
  695. }
  696. static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
  697. int depth, void *data)
  698. {
  699. if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
  700. && of_get_flat_dt_prop(node, "isa", NULL))
  701. return 1;
  702. return 0;
  703. }
  704. bool __init dt_cpu_ftrs_in_use(void)
  705. {
  706. return using_dt_cpu_ftrs;
  707. }
  708. bool __init dt_cpu_ftrs_init(void *fdt)
  709. {
  710. using_dt_cpu_ftrs = false;
  711. /* Setup and verify the FDT, if it fails we just bail */
  712. if (!early_init_dt_verify(fdt, __pa(fdt)))
  713. return false;
  714. if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
  715. return false;
  716. if (disabled_on_cmdline())
  717. return false;
  718. cpufeatures_setup_cpu();
  719. using_dt_cpu_ftrs = true;
  720. return true;
  721. }
  722. static int nr_dt_cpu_features;
  723. static struct dt_cpu_feature *dt_cpu_features;
  724. static int __init process_cpufeatures_node(unsigned long node,
  725. const char *uname, int i)
  726. {
  727. const __be32 *prop;
  728. struct dt_cpu_feature *f;
  729. int len;
  730. f = &dt_cpu_features[i];
  731. f->node = node;
  732. f->name = uname;
  733. prop = of_get_flat_dt_prop(node, "isa", &len);
  734. if (!prop) {
  735. pr_warn("%s: missing isa property\n", uname);
  736. return 0;
  737. }
  738. f->isa = be32_to_cpup(prop);
  739. prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
  740. if (!prop) {
  741. pr_warn("%s: missing usable-privilege property", uname);
  742. return 0;
  743. }
  744. f->usable_privilege = be32_to_cpup(prop);
  745. prop = of_get_flat_dt_prop(node, "hv-support", &len);
  746. if (prop)
  747. f->hv_support = be32_to_cpup(prop);
  748. else
  749. f->hv_support = HV_SUPPORT_NONE;
  750. prop = of_get_flat_dt_prop(node, "os-support", &len);
  751. if (prop)
  752. f->os_support = be32_to_cpup(prop);
  753. else
  754. f->os_support = OS_SUPPORT_NONE;
  755. prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
  756. if (prop)
  757. f->hfscr_bit_nr = be32_to_cpup(prop);
  758. else
  759. f->hfscr_bit_nr = -1;
  760. prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
  761. if (prop)
  762. f->fscr_bit_nr = be32_to_cpup(prop);
  763. else
  764. f->fscr_bit_nr = -1;
  765. prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
  766. if (prop)
  767. f->hwcap_bit_nr = be32_to_cpup(prop);
  768. else
  769. f->hwcap_bit_nr = -1;
  770. if (f->usable_privilege & USABLE_HV) {
  771. if (!(mfmsr() & MSR_HV)) {
  772. pr_warn("%s: HV feature passed to guest\n", uname);
  773. return 0;
  774. }
  775. if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
  776. pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
  777. return 0;
  778. }
  779. if (f->hv_support == HV_SUPPORT_HFSCR) {
  780. if (f->hfscr_bit_nr == -1) {
  781. pr_warn("%s: missing hfscr_bit_nr\n", uname);
  782. return 0;
  783. }
  784. }
  785. } else {
  786. if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
  787. pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
  788. return 0;
  789. }
  790. }
  791. if (f->usable_privilege & USABLE_OS) {
  792. if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
  793. pr_warn("%s: unwanted fscr_bit_nr\n", uname);
  794. return 0;
  795. }
  796. if (f->os_support == OS_SUPPORT_FSCR) {
  797. if (f->fscr_bit_nr == -1) {
  798. pr_warn("%s: missing fscr_bit_nr\n", uname);
  799. return 0;
  800. }
  801. }
  802. } else {
  803. if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
  804. pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
  805. return 0;
  806. }
  807. }
  808. if (!(f->usable_privilege & USABLE_PR)) {
  809. if (f->hwcap_bit_nr != -1) {
  810. pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
  811. return 0;
  812. }
  813. }
  814. /* Do all the independent features in the first pass */
  815. if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
  816. if (cpufeatures_process_feature(f))
  817. f->enabled = 1;
  818. else
  819. f->disabled = 1;
  820. }
  821. return 0;
  822. }
  823. static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
  824. {
  825. const __be32 *prop;
  826. int len;
  827. int nr_deps;
  828. int i;
  829. if (f->enabled || f->disabled)
  830. return;
  831. prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
  832. if (!prop) {
  833. pr_warn("%s: missing dependencies property", f->name);
  834. return;
  835. }
  836. nr_deps = len / sizeof(int);
  837. for (i = 0; i < nr_deps; i++) {
  838. unsigned long phandle = be32_to_cpu(prop[i]);
  839. int j;
  840. for (j = 0; j < nr_dt_cpu_features; j++) {
  841. struct dt_cpu_feature *d = &dt_cpu_features[j];
  842. if (of_get_flat_dt_phandle(d->node) == phandle) {
  843. cpufeatures_deps_enable(d);
  844. if (d->disabled) {
  845. f->disabled = 1;
  846. return;
  847. }
  848. }
  849. }
  850. }
  851. if (cpufeatures_process_feature(f))
  852. f->enabled = 1;
  853. else
  854. f->disabled = 1;
  855. }
  856. static int __init scan_cpufeatures_subnodes(unsigned long node,
  857. const char *uname,
  858. void *data)
  859. {
  860. int *count = data;
  861. process_cpufeatures_node(node, uname, *count);
  862. (*count)++;
  863. return 0;
  864. }
  865. static int __init count_cpufeatures_subnodes(unsigned long node,
  866. const char *uname,
  867. void *data)
  868. {
  869. int *count = data;
  870. (*count)++;
  871. return 0;
  872. }
  873. static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
  874. *uname, int depth, void *data)
  875. {
  876. const __be32 *prop;
  877. int count, i;
  878. u32 isa;
  879. /* We are scanning "ibm,powerpc-cpu-features" nodes only */
  880. if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
  881. return 0;
  882. prop = of_get_flat_dt_prop(node, "isa", NULL);
  883. if (!prop)
  884. /* We checked before, "can't happen" */
  885. return 0;
  886. isa = be32_to_cpup(prop);
  887. /* Count and allocate space for cpu features */
  888. of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
  889. &nr_dt_cpu_features);
  890. dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
  891. if (!dt_cpu_features)
  892. panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
  893. __func__,
  894. sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
  895. PAGE_SIZE);
  896. cpufeatures_setup_start(isa);
  897. /* Scan nodes into dt_cpu_features and enable those without deps */
  898. count = 0;
  899. of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
  900. /* Recursive enable remaining features with dependencies */
  901. for (i = 0; i < nr_dt_cpu_features; i++) {
  902. struct dt_cpu_feature *f = &dt_cpu_features[i];
  903. cpufeatures_deps_enable(f);
  904. }
  905. prop = of_get_flat_dt_prop(node, "display-name", NULL);
  906. if (prop && strlen((char *)prop) != 0) {
  907. strscpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
  908. cur_cpu_spec->cpu_name = dt_cpu_name;
  909. }
  910. cpufeatures_setup_finished();
  911. memblock_free(dt_cpu_features,
  912. sizeof(struct dt_cpu_feature) * nr_dt_cpu_features);
  913. return 0;
  914. }
  915. void __init dt_cpu_ftrs_scan(void)
  916. {
  917. if (!using_dt_cpu_ftrs)
  918. return;
  919. of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
  920. }