cpufeature.h 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  4. */
  5. #ifndef __ASM_CPUFEATURE_H
  6. #define __ASM_CPUFEATURE_H
  7. #include <asm/alternative-macros.h>
  8. #include <asm/cpucaps.h>
  9. #include <asm/cputype.h>
  10. #include <asm/hwcap.h>
  11. #include <asm/sysreg.h>
  12. #define MAX_CPU_FEATURES 128
  13. #define cpu_feature(x) KERNEL_HWCAP_ ## x
  14. #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0
  15. #define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
  16. #define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
  17. #ifndef __ASSEMBLY__
  18. #include <linux/bug.h>
  19. #include <linux/jump_label.h>
  20. #include <linux/kernel.h>
  21. #include <linux/cpumask.h>
  22. /*
  23. * CPU feature register tracking
  24. *
  25. * The safe value of a CPUID feature field is dependent on the implications
  26. * of the values assigned to it by the architecture. Based on the relationship
  27. * between the values, the features are classified into 3 types - LOWER_SAFE,
  28. * HIGHER_SAFE and EXACT.
  29. *
  30. * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
  31. * for HIGHER_SAFE. It is expected that all CPUs have the same value for
  32. * a field when EXACT is specified, failing which, the safe value specified
  33. * in the table is chosen.
  34. */
  35. enum ftr_type {
  36. FTR_EXACT, /* Use a predefined safe value */
  37. FTR_LOWER_SAFE, /* Smaller value is safe */
  38. FTR_HIGHER_SAFE, /* Bigger value is safe */
  39. FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
  40. };
  41. #define FTR_STRICT true /* SANITY check strict matching required */
  42. #define FTR_NONSTRICT false /* SANITY check ignored */
  43. #define FTR_SIGNED true /* Value should be treated as signed */
  44. #define FTR_UNSIGNED false /* Value should be treated as unsigned */
  45. #define FTR_VISIBLE true /* Feature visible to the user space */
  46. #define FTR_HIDDEN false /* Feature is hidden from the user */
  47. #define FTR_VISIBLE_IF_IS_ENABLED(config) \
  48. (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
  49. struct arm64_ftr_bits {
  50. bool sign; /* Value is signed ? */
  51. bool visible;
  52. bool strict; /* CPU Sanity check: strict matching required ? */
  53. enum ftr_type type;
  54. u8 shift;
  55. u8 width;
  56. s64 safe_val; /* safe value for FTR_EXACT features */
  57. };
  58. /*
  59. * Describe the early feature override to the core override code:
  60. *
  61. * @val Values that are to be merged into the final
  62. * sanitised value of the register. Only the bitfields
  63. * set to 1 in @mask are valid
  64. * @mask Mask of the features that are overridden by @val
  65. *
  66. * A @mask field set to full-1 indicates that the corresponding field
  67. * in @val is a valid override.
  68. *
  69. * A @mask field set to full-0 with the corresponding @val field set
  70. * to full-0 denotes that this field has no override
  71. *
  72. * A @mask field set to full-0 with the corresponding @val field set
  73. * to full-1 denotes that this field has an invalid override.
  74. */
  75. struct arm64_ftr_override {
  76. u64 val;
  77. u64 mask;
  78. };
  79. /*
  80. * @arm64_ftr_reg - Feature register
  81. * @strict_mask Bits which should match across all CPUs for sanity.
  82. * @sys_val Safe value across the CPUs (system view)
  83. */
  84. struct arm64_ftr_reg {
  85. const char *name;
  86. u64 strict_mask;
  87. u64 user_mask;
  88. u64 sys_val;
  89. u64 user_val;
  90. struct arm64_ftr_override *override;
  91. const struct arm64_ftr_bits *ftr_bits;
  92. };
  93. extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
  94. /*
  95. * CPU capabilities:
  96. *
  97. * We use arm64_cpu_capabilities to represent system features, errata work
  98. * arounds (both used internally by kernel and tracked in system_cpucaps) and
  99. * ELF HWCAPs (which are exposed to user).
  100. *
  101. * To support systems with heterogeneous CPUs, we need to make sure that we
  102. * detect the capabilities correctly on the system and take appropriate
  103. * measures to ensure there are no incompatibilities.
  104. *
  105. * This comment tries to explain how we treat the capabilities.
  106. * Each capability has the following list of attributes :
  107. *
  108. * 1) Scope of Detection : The system detects a given capability by
  109. * performing some checks at runtime. This could be, e.g, checking the
  110. * value of a field in CPU ID feature register or checking the cpu
  111. * model. The capability provides a call back ( @matches() ) to
  112. * perform the check. Scope defines how the checks should be performed.
  113. * There are three cases:
  114. *
  115. * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
  116. * matches. This implies, we have to run the check on all the
  117. * booting CPUs, until the system decides that state of the
  118. * capability is finalised. (See section 2 below)
  119. * Or
  120. * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
  121. * matches. This implies, we run the check only once, when the
  122. * system decides to finalise the state of the capability. If the
  123. * capability relies on a field in one of the CPU ID feature
  124. * registers, we use the sanitised value of the register from the
  125. * CPU feature infrastructure to make the decision.
  126. * Or
  127. * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
  128. * feature. This category is for features that are "finalised"
  129. * (or used) by the kernel very early even before the SMP cpus
  130. * are brought up.
  131. *
  132. * The process of detection is usually denoted by "update" capability
  133. * state in the code.
  134. *
  135. * 2) Finalise the state : The kernel should finalise the state of a
  136. * capability at some point during its execution and take necessary
  137. * actions if any. Usually, this is done, after all the boot-time
  138. * enabled CPUs are brought up by the kernel, so that it can make
  139. * better decision based on the available set of CPUs. However, there
  140. * are some special cases, where the action is taken during the early
  141. * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
  142. * Virtualisation Host Extensions). The kernel usually disallows any
  143. * changes to the state of a capability once it finalises the capability
  144. * and takes any action, as it may be impossible to execute the actions
  145. * safely. A CPU brought up after a capability is "finalised" is
  146. * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
  147. * CPUs are treated "late CPUs" for capabilities determined by the boot
  148. * CPU.
  149. *
  150. * At the moment there are two passes of finalising the capabilities.
  151. * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
  152. * setup_boot_cpu_capabilities().
  153. * b) Everything except (a) - Run via setup_system_capabilities().
  154. *
  155. * 3) Verification: When a CPU is brought online (e.g, by user or by the
  156. * kernel), the kernel should make sure that it is safe to use the CPU,
  157. * by verifying that the CPU is compliant with the state of the
  158. * capabilities finalised already. This happens via :
  159. *
  160. * secondary_start_kernel()-> check_local_cpu_capabilities()
  161. *
  162. * As explained in (2) above, capabilities could be finalised at
  163. * different points in the execution. Each newly booted CPU is verified
  164. * against the capabilities that have been finalised by the time it
  165. * boots.
  166. *
  167. * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
  168. * except for the primary boot CPU.
  169. *
  170. * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
  171. * user after the kernel boot are verified against the capability.
  172. *
  173. * If there is a conflict, the kernel takes an action, based on the
  174. * severity (e.g, a CPU could be prevented from booting or cause a
  175. * kernel panic). The CPU is allowed to "affect" the state of the
  176. * capability, if it has not been finalised already. See section 5
  177. * for more details on conflicts.
  178. *
  179. * 4) Action: As mentioned in (2), the kernel can take an action for each
  180. * detected capability, on all CPUs on the system. Appropriate actions
  181. * include, turning on an architectural feature, modifying the control
  182. * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
  183. * alternatives. The kernel patching is batched and performed at later
  184. * point. The actions are always initiated only after the capability
  185. * is finalised. This is usally denoted by "enabling" the capability.
  186. * The actions are initiated as follows :
  187. * a) Action is triggered on all online CPUs, after the capability is
  188. * finalised, invoked within the stop_machine() context from
  189. * enable_cpu_capabilitie().
  190. *
  191. * b) Any late CPU, brought up after (1), the action is triggered via:
  192. *
  193. * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
  194. *
  195. * 5) Conflicts: Based on the state of the capability on a late CPU vs.
  196. * the system state, we could have the following combinations :
  197. *
  198. * x-----------------------------x
  199. * | Type | System | Late CPU |
  200. * |-----------------------------|
  201. * | a | y | n |
  202. * |-----------------------------|
  203. * | b | n | y |
  204. * x-----------------------------x
  205. *
  206. * Two separate flag bits are defined to indicate whether each kind of
  207. * conflict can be allowed:
  208. * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
  209. * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
  210. *
  211. * Case (a) is not permitted for a capability that the system requires
  212. * all CPUs to have in order for the capability to be enabled. This is
  213. * typical for capabilities that represent enhanced functionality.
  214. *
  215. * Case (b) is not permitted for a capability that must be enabled
  216. * during boot if any CPU in the system requires it in order to run
  217. * safely. This is typical for erratum work arounds that cannot be
  218. * enabled after the corresponding capability is finalised.
  219. *
  220. * In some non-typical cases either both (a) and (b), or neither,
  221. * should be permitted. This can be described by including neither
  222. * or both flags in the capability's type field.
  223. *
  224. * In case of a conflict, the CPU is prevented from booting. If the
  225. * ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
  226. * then a kernel panic is triggered.
  227. */
  228. /*
  229. * Decide how the capability is detected.
  230. * On any local CPU vs System wide vs the primary boot CPU
  231. */
  232. #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
  233. #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
  234. /*
  235. * The capabilitiy is detected on the Boot CPU and is used by kernel
  236. * during early boot. i.e, the capability should be "detected" and
  237. * "enabled" as early as possibly on all booting CPUs.
  238. */
  239. #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
  240. #define ARM64_CPUCAP_SCOPE_MASK \
  241. (ARM64_CPUCAP_SCOPE_SYSTEM | \
  242. ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
  243. ARM64_CPUCAP_SCOPE_BOOT_CPU)
  244. #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
  245. #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
  246. #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
  247. #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
  248. /*
  249. * Is it permitted for a late CPU to have this capability when system
  250. * hasn't already enabled it ?
  251. */
  252. #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
  253. /* Is it safe for a late CPU to miss this capability when system has it */
  254. #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
  255. /* Panic when a conflict is detected */
  256. #define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
  257. /*
  258. * CPU errata workarounds that need to be enabled at boot time if one or
  259. * more CPUs in the system requires it. When one of these capabilities
  260. * has been enabled, it is safe to allow any CPU to boot that doesn't
  261. * require the workaround. However, it is not safe if a "late" CPU
  262. * requires a workaround and the system hasn't enabled it already.
  263. */
  264. #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
  265. (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
  266. /*
  267. * CPU feature detected at boot time based on system-wide value of a
  268. * feature. It is safe for a late CPU to have this feature even though
  269. * the system hasn't enabled it, although the feature will not be used
  270. * by Linux in this case. If the system has enabled this feature already,
  271. * then every late CPU must have it.
  272. */
  273. #define ARM64_CPUCAP_SYSTEM_FEATURE \
  274. (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
  275. /*
  276. * CPU feature detected at boot time based on feature of one or more CPUs.
  277. * All possible conflicts for a late CPU are ignored.
  278. * NOTE: this means that a late CPU with the feature will *not* cause the
  279. * capability to be advertised by cpus_have_*cap()!
  280. */
  281. #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
  282. (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
  283. ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
  284. ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
  285. /*
  286. * CPU feature detected at boot time, on one or more CPUs. A late CPU
  287. * is not allowed to have the capability when the system doesn't have it.
  288. * It is Ok for a late CPU to miss the feature.
  289. */
  290. #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
  291. (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
  292. ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
  293. /*
  294. * CPU feature used early in the boot based on the boot CPU. All secondary
  295. * CPUs must match the state of the capability as detected by the boot CPU. In
  296. * case of a conflict, a kernel panic is triggered.
  297. */
  298. #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE \
  299. (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
  300. /*
  301. * CPU feature used early in the boot based on the boot CPU. It is safe for a
  302. * late CPU to have this feature even though the boot CPU hasn't enabled it,
  303. * although the feature will not be used by Linux in this case. If the boot CPU
  304. * has enabled this feature already, then every late CPU must have it.
  305. */
  306. #define ARM64_CPUCAP_BOOT_CPU_FEATURE \
  307. (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
  308. struct arm64_cpu_capabilities {
  309. const char *desc;
  310. u16 capability;
  311. u16 type;
  312. bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
  313. /*
  314. * Take the appropriate actions to configure this capability
  315. * for this CPU. If the capability is detected by the kernel
  316. * this will be called on all the CPUs in the system,
  317. * including the hotplugged CPUs, regardless of whether the
  318. * capability is available on that specific CPU. This is
  319. * useful for some capabilities (e.g, working around CPU
  320. * errata), where all the CPUs must take some action (e.g,
  321. * changing system control/configuration). Thus, if an action
  322. * is required only if the CPU has the capability, then the
  323. * routine must check it before taking any action.
  324. */
  325. void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
  326. union {
  327. struct { /* To be used for erratum handling only */
  328. struct midr_range midr_range;
  329. const struct arm64_midr_revidr {
  330. u32 midr_rv; /* revision/variant */
  331. u32 revidr_mask;
  332. } * const fixed_revs;
  333. };
  334. const struct midr_range *midr_range_list;
  335. struct { /* Feature register checking */
  336. u32 sys_reg;
  337. u8 field_pos;
  338. u8 field_width;
  339. u8 min_field_value;
  340. u8 max_field_value;
  341. u8 hwcap_type;
  342. bool sign;
  343. unsigned long hwcap;
  344. };
  345. };
  346. /*
  347. * An optional list of "matches/cpu_enable" pair for the same
  348. * "capability" of the same "type" as described by the parent.
  349. * Only matches(), cpu_enable() and fields relevant to these
  350. * methods are significant in the list. The cpu_enable is
  351. * invoked only if the corresponding entry "matches()".
  352. * However, if a cpu_enable() method is associated
  353. * with multiple matches(), care should be taken that either
  354. * the match criteria are mutually exclusive, or that the
  355. * method is robust against being called multiple times.
  356. */
  357. const struct arm64_cpu_capabilities *match_list;
  358. const struct cpumask *cpus;
  359. };
  360. static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
  361. {
  362. return cap->type & ARM64_CPUCAP_SCOPE_MASK;
  363. }
  364. /*
  365. * Generic helper for handling capabilities with multiple (match,enable) pairs
  366. * of call backs, sharing the same capability bit.
  367. * Iterate over each entry to see if at least one matches.
  368. */
  369. static inline bool
  370. cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
  371. int scope)
  372. {
  373. const struct arm64_cpu_capabilities *caps;
  374. for (caps = entry->match_list; caps->matches; caps++)
  375. if (caps->matches(caps, scope))
  376. return true;
  377. return false;
  378. }
  379. static __always_inline bool is_vhe_hyp_code(void)
  380. {
  381. /* Only defined for code run in VHE hyp context */
  382. return __is_defined(__KVM_VHE_HYPERVISOR__);
  383. }
  384. static __always_inline bool is_nvhe_hyp_code(void)
  385. {
  386. /* Only defined for code run in NVHE hyp context */
  387. return __is_defined(__KVM_NVHE_HYPERVISOR__);
  388. }
  389. static __always_inline bool is_hyp_code(void)
  390. {
  391. return is_vhe_hyp_code() || is_nvhe_hyp_code();
  392. }
  393. extern DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
  394. extern DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);
  395. #define for_each_available_cap(cap) \
  396. for_each_set_bit(cap, system_cpucaps, ARM64_NCAPS)
  397. bool this_cpu_has_cap(unsigned int cap);
  398. void cpu_set_feature(unsigned int num);
  399. bool cpu_have_feature(unsigned int num);
  400. unsigned long cpu_get_elf_hwcap(void);
  401. unsigned long cpu_get_elf_hwcap2(void);
  402. #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
  403. #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
  404. static __always_inline bool boot_capabilities_finalized(void)
  405. {
  406. return alternative_has_cap_likely(ARM64_ALWAYS_BOOT);
  407. }
  408. static __always_inline bool system_capabilities_finalized(void)
  409. {
  410. return alternative_has_cap_likely(ARM64_ALWAYS_SYSTEM);
  411. }
  412. /*
  413. * Test for a capability with a runtime check.
  414. *
  415. * Before the capability is detected, this returns false.
  416. */
  417. static __always_inline bool cpus_have_cap(unsigned int num)
  418. {
  419. if (__builtin_constant_p(num) && !cpucap_is_possible(num))
  420. return false;
  421. if (num >= ARM64_NCAPS)
  422. return false;
  423. return arch_test_bit(num, system_cpucaps);
  424. }
  425. /*
  426. * Test for a capability without a runtime check.
  427. *
  428. * Before boot capabilities are finalized, this will BUG().
  429. * After boot capabilities are finalized, this is patched to avoid a runtime
  430. * check.
  431. *
  432. * @num must be a compile-time constant.
  433. */
  434. static __always_inline bool cpus_have_final_boot_cap(int num)
  435. {
  436. if (boot_capabilities_finalized())
  437. return alternative_has_cap_unlikely(num);
  438. else
  439. BUG();
  440. }
  441. /*
  442. * Test for a capability without a runtime check.
  443. *
  444. * Before system capabilities are finalized, this will BUG().
  445. * After system capabilities are finalized, this is patched to avoid a runtime
  446. * check.
  447. *
  448. * @num must be a compile-time constant.
  449. */
  450. static __always_inline bool cpus_have_final_cap(int num)
  451. {
  452. if (system_capabilities_finalized())
  453. return alternative_has_cap_unlikely(num);
  454. else
  455. BUG();
  456. }
  457. static inline int __attribute_const__
  458. cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
  459. {
  460. return (s64)(features << (64 - width - field)) >> (64 - width);
  461. }
  462. static inline int __attribute_const__
  463. cpuid_feature_extract_signed_field(u64 features, int field)
  464. {
  465. return cpuid_feature_extract_signed_field_width(features, field, 4);
  466. }
  467. static __always_inline unsigned int __attribute_const__
  468. cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
  469. {
  470. return (u64)(features << (64 - width - field)) >> (64 - width);
  471. }
  472. static __always_inline unsigned int __attribute_const__
  473. cpuid_feature_extract_unsigned_field(u64 features, int field)
  474. {
  475. return cpuid_feature_extract_unsigned_field_width(features, field, 4);
  476. }
  477. /*
  478. * Fields that identify the version of the Performance Monitors Extension do
  479. * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
  480. * "Alternative ID scheme used for the Performance Monitors Extension version".
  481. */
  482. static inline u64 __attribute_const__
  483. cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
  484. {
  485. u64 val = cpuid_feature_extract_unsigned_field(features, field);
  486. u64 mask = GENMASK_ULL(field + 3, field);
  487. /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
  488. if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
  489. val = 0;
  490. if (val > cap) {
  491. features &= ~mask;
  492. features |= (cap << field) & mask;
  493. }
  494. return features;
  495. }
  496. static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
  497. {
  498. return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
  499. }
  500. static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
  501. {
  502. return (reg->user_val | (reg->sys_val & reg->user_mask));
  503. }
  504. static inline int __attribute_const__
  505. cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
  506. {
  507. if (WARN_ON_ONCE(!width))
  508. width = 4;
  509. return (sign) ?
  510. cpuid_feature_extract_signed_field_width(features, field, width) :
  511. cpuid_feature_extract_unsigned_field_width(features, field, width);
  512. }
  513. static inline int __attribute_const__
  514. cpuid_feature_extract_field(u64 features, int field, bool sign)
  515. {
  516. return cpuid_feature_extract_field_width(features, field, 4, sign);
  517. }
  518. static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
  519. {
  520. return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
  521. }
  522. static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
  523. {
  524. return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
  525. cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
  526. }
  527. static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
  528. {
  529. u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
  530. return val == ID_AA64PFR0_EL1_EL1_AARCH32;
  531. }
  532. static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
  533. {
  534. u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
  535. return val == ID_AA64PFR0_EL1_EL0_AARCH32;
  536. }
  537. static inline bool id_aa64pfr0_sve(u64 pfr0)
  538. {
  539. u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
  540. return val > 0;
  541. }
  542. static inline bool id_aa64pfr1_sme(u64 pfr1)
  543. {
  544. u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
  545. return val > 0;
  546. }
  547. static inline bool id_aa64pfr1_mte(u64 pfr1)
  548. {
  549. u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
  550. return val >= ID_AA64PFR1_EL1_MTE_MTE2;
  551. }
  552. void __init setup_boot_cpu_features(void);
  553. void __init setup_system_features(void);
  554. void __init setup_user_features(void);
  555. void check_local_cpu_capabilities(void);
  556. u64 read_sanitised_ftr_reg(u32 id);
  557. u64 __read_sysreg_by_encoding(u32 sys_id);
  558. static inline bool cpu_supports_mixed_endian_el0(void)
  559. {
  560. return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
  561. }
  562. static inline bool supports_csv2p3(int scope)
  563. {
  564. u64 pfr0;
  565. u8 csv2_val;
  566. if (scope == SCOPE_LOCAL_CPU)
  567. pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
  568. else
  569. pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  570. csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
  571. ID_AA64PFR0_EL1_CSV2_SHIFT);
  572. return csv2_val == 3;
  573. }
  574. static inline bool supports_clearbhb(int scope)
  575. {
  576. u64 isar2;
  577. if (scope == SCOPE_LOCAL_CPU)
  578. isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
  579. else
  580. isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
  581. return cpuid_feature_extract_unsigned_field(isar2,
  582. ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
  583. }
  584. const struct cpumask *system_32bit_el0_cpumask(void);
  585. DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
  586. static inline bool system_supports_32bit_el0(void)
  587. {
  588. u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  589. return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
  590. id_aa64pfr0_32bit_el0(pfr0);
  591. }
  592. static inline bool system_supports_4kb_granule(void)
  593. {
  594. u64 mmfr0;
  595. u32 val;
  596. mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
  597. val = cpuid_feature_extract_unsigned_field(mmfr0,
  598. ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
  599. return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
  600. (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
  601. }
  602. static inline bool system_supports_64kb_granule(void)
  603. {
  604. u64 mmfr0;
  605. u32 val;
  606. mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
  607. val = cpuid_feature_extract_unsigned_field(mmfr0,
  608. ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
  609. return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
  610. (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
  611. }
  612. static inline bool system_supports_16kb_granule(void)
  613. {
  614. u64 mmfr0;
  615. u32 val;
  616. mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
  617. val = cpuid_feature_extract_unsigned_field(mmfr0,
  618. ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
  619. return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
  620. (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
  621. }
  622. static inline bool system_supports_mixed_endian_el0(void)
  623. {
  624. return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
  625. }
  626. static inline bool system_supports_mixed_endian(void)
  627. {
  628. u64 mmfr0;
  629. u32 val;
  630. mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
  631. val = cpuid_feature_extract_unsigned_field(mmfr0,
  632. ID_AA64MMFR0_EL1_BIGEND_SHIFT);
  633. return val == 0x1;
  634. }
  635. static __always_inline bool system_supports_fpsimd(void)
  636. {
  637. return alternative_has_cap_likely(ARM64_HAS_FPSIMD);
  638. }
  639. static inline bool system_uses_hw_pan(void)
  640. {
  641. return alternative_has_cap_unlikely(ARM64_HAS_PAN);
  642. }
  643. static inline bool system_uses_ttbr0_pan(void)
  644. {
  645. return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
  646. !system_uses_hw_pan();
  647. }
  648. static __always_inline bool system_supports_sve(void)
  649. {
  650. return alternative_has_cap_unlikely(ARM64_SVE);
  651. }
  652. static __always_inline bool system_supports_sme(void)
  653. {
  654. return alternative_has_cap_unlikely(ARM64_SME);
  655. }
  656. static __always_inline bool system_supports_sme2(void)
  657. {
  658. return alternative_has_cap_unlikely(ARM64_SME2);
  659. }
  660. static __always_inline bool system_supports_fa64(void)
  661. {
  662. return alternative_has_cap_unlikely(ARM64_SME_FA64);
  663. }
  664. static __always_inline bool system_supports_tpidr2(void)
  665. {
  666. return system_supports_sme();
  667. }
  668. static __always_inline bool system_supports_fpmr(void)
  669. {
  670. return alternative_has_cap_unlikely(ARM64_HAS_FPMR);
  671. }
  672. static __always_inline bool system_supports_cnp(void)
  673. {
  674. return alternative_has_cap_unlikely(ARM64_HAS_CNP);
  675. }
  676. static inline bool system_supports_address_auth(void)
  677. {
  678. return cpus_have_final_boot_cap(ARM64_HAS_ADDRESS_AUTH);
  679. }
  680. static inline bool system_supports_generic_auth(void)
  681. {
  682. return alternative_has_cap_unlikely(ARM64_HAS_GENERIC_AUTH);
  683. }
  684. static inline bool system_has_full_ptr_auth(void)
  685. {
  686. return system_supports_address_auth() && system_supports_generic_auth();
  687. }
  688. static __always_inline bool system_uses_irq_prio_masking(void)
  689. {
  690. return alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
  691. }
  692. static inline bool system_supports_mte(void)
  693. {
  694. return alternative_has_cap_unlikely(ARM64_MTE);
  695. }
  696. static inline bool system_has_prio_mask_debugging(void)
  697. {
  698. return IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING) &&
  699. system_uses_irq_prio_masking();
  700. }
  701. static inline bool system_supports_bti(void)
  702. {
  703. return cpus_have_final_cap(ARM64_BTI);
  704. }
  705. static inline bool system_supports_bti_kernel(void)
  706. {
  707. return IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
  708. cpus_have_final_boot_cap(ARM64_BTI);
  709. }
  710. static inline bool system_supports_tlb_range(void)
  711. {
  712. return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
  713. }
  714. static inline bool system_supports_lpa2(void)
  715. {
  716. return cpus_have_final_cap(ARM64_HAS_LPA2);
  717. }
  718. static inline bool system_supports_poe(void)
  719. {
  720. return IS_ENABLED(CONFIG_ARM64_POE) &&
  721. alternative_has_cap_unlikely(ARM64_HAS_S1POE);
  722. }
  723. int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
  724. bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
  725. static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
  726. {
  727. switch (parange) {
  728. case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
  729. case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
  730. case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
  731. case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
  732. case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
  733. case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
  734. case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
  735. /*
  736. * A future PE could use a value unknown to the kernel.
  737. * However, by the "D10.1.4 Principles of the ID scheme
  738. * for fields in ID registers", ARM DDI 0487C.a, any new
  739. * value is guaranteed to be higher than what we know already.
  740. * As a safe limit, we return the limit supported by the kernel.
  741. */
  742. default: return CONFIG_ARM64_PA_BITS;
  743. }
  744. }
  745. /* Check whether hardware update of the Access flag is supported */
  746. static inline bool cpu_has_hw_af(void)
  747. {
  748. u64 mmfr1;
  749. if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
  750. return false;
  751. /*
  752. * Use cached version to avoid emulated msr operation on KVM
  753. * guests.
  754. */
  755. mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
  756. return cpuid_feature_extract_unsigned_field(mmfr1,
  757. ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
  758. }
  759. static inline bool cpu_has_pan(void)
  760. {
  761. u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
  762. return cpuid_feature_extract_unsigned_field(mmfr1,
  763. ID_AA64MMFR1_EL1_PAN_SHIFT);
  764. }
  765. #ifdef CONFIG_ARM64_AMU_EXTN
  766. /* Check whether the cpu supports the Activity Monitors Unit (AMU) */
  767. extern bool cpu_has_amu_feat(int cpu);
  768. #else
  769. static inline bool cpu_has_amu_feat(int cpu)
  770. {
  771. return false;
  772. }
  773. #endif
  774. /* Get a cpu that supports the Activity Monitors Unit (AMU) */
  775. extern int get_cpu_with_amu_feat(void);
  776. static inline unsigned int get_vmid_bits(u64 mmfr1)
  777. {
  778. int vmid_bits;
  779. vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
  780. ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
  781. if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
  782. return 16;
  783. /*
  784. * Return the default here even if any reserved
  785. * value is fetched from the system register.
  786. */
  787. return 8;
  788. }
  789. s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
  790. struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
  791. extern struct arm64_ftr_override id_aa64mmfr0_override;
  792. extern struct arm64_ftr_override id_aa64mmfr1_override;
  793. extern struct arm64_ftr_override id_aa64mmfr2_override;
  794. extern struct arm64_ftr_override id_aa64pfr0_override;
  795. extern struct arm64_ftr_override id_aa64pfr1_override;
  796. extern struct arm64_ftr_override id_aa64zfr0_override;
  797. extern struct arm64_ftr_override id_aa64smfr0_override;
  798. extern struct arm64_ftr_override id_aa64isar1_override;
  799. extern struct arm64_ftr_override id_aa64isar2_override;
  800. extern struct arm64_ftr_override arm64_sw_feature_override;
  801. static inline
  802. u64 arm64_apply_feature_override(u64 val, int feat, int width,
  803. const struct arm64_ftr_override *override)
  804. {
  805. u64 oval = override->val;
  806. /*
  807. * When it encounters an invalid override (e.g., an override that
  808. * cannot be honoured due to a missing CPU feature), the early idreg
  809. * override code will set the mask to 0x0 and the value to non-zero for
  810. * the field in question. In order to determine whether the override is
  811. * valid or not for the field we are interested in, we first need to
  812. * disregard bits belonging to other fields.
  813. */
  814. oval &= GENMASK_ULL(feat + width - 1, feat);
  815. /*
  816. * The override is valid if all value bits are accounted for in the
  817. * mask. If so, replace the masked bits with the override value.
  818. */
  819. if (oval == (oval & override->mask)) {
  820. val &= ~override->mask;
  821. val |= oval;
  822. }
  823. /* Extract the field from the updated value */
  824. return cpuid_feature_extract_unsigned_field(val, feat);
  825. }
  826. static inline bool arm64_test_sw_feature_override(int feat)
  827. {
  828. /*
  829. * Software features are pseudo CPU features that have no underlying
  830. * CPUID system register value to apply the override to.
  831. */
  832. return arm64_apply_feature_override(0, feat, 4,
  833. &arm64_sw_feature_override);
  834. }
  835. static inline bool kaslr_disabled_cmdline(void)
  836. {
  837. return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOKASLR);
  838. }
  839. u32 get_kvm_ipa_limit(void);
  840. void dump_cpu_features(void);
  841. static inline bool cpu_has_bti(void)
  842. {
  843. if (!IS_ENABLED(CONFIG_ARM64_BTI))
  844. return false;
  845. return arm64_apply_feature_override(read_cpuid(ID_AA64PFR1_EL1),
  846. ID_AA64PFR1_EL1_BT_SHIFT, 4,
  847. &id_aa64pfr1_override);
  848. }
  849. static inline bool cpu_has_pac(void)
  850. {
  851. u64 isar1, isar2;
  852. if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
  853. return false;
  854. isar1 = read_cpuid(ID_AA64ISAR1_EL1);
  855. isar2 = read_cpuid(ID_AA64ISAR2_EL1);
  856. if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_APA_SHIFT, 4,
  857. &id_aa64isar1_override))
  858. return true;
  859. if (arm64_apply_feature_override(isar1, ID_AA64ISAR1_EL1_API_SHIFT, 4,
  860. &id_aa64isar1_override))
  861. return true;
  862. return arm64_apply_feature_override(isar2, ID_AA64ISAR2_EL1_APA3_SHIFT, 4,
  863. &id_aa64isar2_override);
  864. }
  865. static inline bool cpu_has_lva(void)
  866. {
  867. u64 mmfr2;
  868. mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
  869. mmfr2 &= ~id_aa64mmfr2_override.mask;
  870. mmfr2 |= id_aa64mmfr2_override.val;
  871. return cpuid_feature_extract_unsigned_field(mmfr2,
  872. ID_AA64MMFR2_EL1_VARange_SHIFT);
  873. }
  874. static inline bool cpu_has_lpa2(void)
  875. {
  876. #ifdef CONFIG_ARM64_LPA2
  877. u64 mmfr0;
  878. int feat;
  879. mmfr0 = read_sysreg(id_aa64mmfr0_el1);
  880. mmfr0 &= ~id_aa64mmfr0_override.mask;
  881. mmfr0 |= id_aa64mmfr0_override.val;
  882. feat = cpuid_feature_extract_signed_field(mmfr0,
  883. ID_AA64MMFR0_EL1_TGRAN_SHIFT);
  884. return feat >= ID_AA64MMFR0_EL1_TGRAN_LPA2;
  885. #else
  886. return false;
  887. #endif
  888. }
  889. #endif /* __ASSEMBLY__ */
  890. #endif