centaur.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/sched.h>
  3. #include <linux/sched/clock.h>
  4. #include <asm/cpufeature.h>
  5. #include <asm/e820/api.h>
  6. #include <asm/mtrr.h>
  7. #include <asm/msr.h>
  8. #include "cpu.h"
  9. #define ACE_PRESENT (1 << 6)
  10. #define ACE_ENABLED (1 << 7)
  11. #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
  12. #define RNG_PRESENT (1 << 2)
  13. #define RNG_ENABLED (1 << 3)
  14. #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
  15. #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
  16. #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
  17. #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
  18. #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
  19. #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
  20. #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
  21. static void init_c3(struct cpuinfo_x86 *c)
  22. {
  23. u32 lo, hi;
  24. /* Test for Centaur Extended Feature Flags presence */
  25. if (cpuid_eax(0xC0000000) >= 0xC0000001) {
  26. u32 tmp = cpuid_edx(0xC0000001);
  27. /* enable ACE unit, if present and disabled */
  28. if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
  29. rdmsr(MSR_VIA_FCR, lo, hi);
  30. lo |= ACE_FCR; /* enable ACE unit */
  31. wrmsr(MSR_VIA_FCR, lo, hi);
  32. pr_info("CPU: Enabled ACE h/w crypto\n");
  33. }
  34. /* enable RNG unit, if present and disabled */
  35. if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
  36. rdmsr(MSR_VIA_RNG, lo, hi);
  37. lo |= RNG_ENABLE; /* enable RNG unit */
  38. wrmsr(MSR_VIA_RNG, lo, hi);
  39. pr_info("CPU: Enabled h/w RNG\n");
  40. }
  41. /* store Centaur Extended Feature Flags as
  42. * word 5 of the CPU capability bit array
  43. */
  44. c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
  45. }
  46. #ifdef CONFIG_X86_32
  47. /* Cyrix III family needs CX8 & PGE explicitly enabled. */
  48. if (c->x86_model >= 6 && c->x86_model <= 13) {
  49. rdmsr(MSR_VIA_FCR, lo, hi);
  50. lo |= (1<<1 | 1<<7);
  51. wrmsr(MSR_VIA_FCR, lo, hi);
  52. set_cpu_cap(c, X86_FEATURE_CX8);
  53. }
  54. /* Before Nehemiah, the C3's had 3dNOW! */
  55. if (c->x86_model >= 6 && c->x86_model < 9)
  56. set_cpu_cap(c, X86_FEATURE_3DNOW);
  57. #endif
  58. if (c->x86 == 0x6 && c->x86_model >= 0xf) {
  59. c->x86_cache_alignment = c->x86_clflush_size * 2;
  60. set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  61. }
  62. cpu_detect_cache_sizes(c);
  63. }
  64. enum {
  65. ECX8 = 1<<1,
  66. EIERRINT = 1<<2,
  67. DPM = 1<<3,
  68. DMCE = 1<<4,
  69. DSTPCLK = 1<<5,
  70. ELINEAR = 1<<6,
  71. DSMC = 1<<7,
  72. DTLOCK = 1<<8,
  73. EDCTLB = 1<<8,
  74. EMMX = 1<<9,
  75. DPDC = 1<<11,
  76. EBRPRED = 1<<12,
  77. DIC = 1<<13,
  78. DDC = 1<<14,
  79. DNA = 1<<15,
  80. ERETSTK = 1<<16,
  81. E2MMX = 1<<19,
  82. EAMD3D = 1<<20,
  83. };
  84. static void early_init_centaur(struct cpuinfo_x86 *c)
  85. {
  86. switch (c->x86) {
  87. #ifdef CONFIG_X86_32
  88. case 5:
  89. /* Emulate MTRRs using Centaur's MCR. */
  90. set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
  91. break;
  92. #endif
  93. case 6:
  94. if (c->x86_model >= 0xf)
  95. set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  96. break;
  97. }
  98. #ifdef CONFIG_X86_64
  99. set_cpu_cap(c, X86_FEATURE_SYSENTER32);
  100. #endif
  101. if (c->x86_power & (1 << 8)) {
  102. set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
  103. set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
  104. }
  105. }
  106. static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c)
  107. {
  108. u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
  109. rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
  110. msr_ctl = vmx_msr_high | vmx_msr_low;
  111. if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
  112. set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
  113. if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
  114. set_cpu_cap(c, X86_FEATURE_VNMI);
  115. if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
  116. rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
  117. vmx_msr_low, vmx_msr_high);
  118. msr_ctl2 = vmx_msr_high | vmx_msr_low;
  119. if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
  120. (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
  121. set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
  122. if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
  123. set_cpu_cap(c, X86_FEATURE_EPT);
  124. if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
  125. set_cpu_cap(c, X86_FEATURE_VPID);
  126. }
  127. }
  128. static void init_centaur(struct cpuinfo_x86 *c)
  129. {
  130. #ifdef CONFIG_X86_32
  131. char *name;
  132. u32 fcr_set = 0;
  133. u32 fcr_clr = 0;
  134. u32 lo, hi, newlo;
  135. u32 aa, bb, cc, dd;
  136. /*
  137. * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
  138. * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
  139. */
  140. clear_cpu_cap(c, 0*32+31);
  141. #endif
  142. early_init_centaur(c);
  143. init_intel_cacheinfo(c);
  144. detect_num_cpu_cores(c);
  145. #ifdef CONFIG_X86_32
  146. detect_ht(c);
  147. #endif
  148. if (c->cpuid_level > 9) {
  149. unsigned int eax = cpuid_eax(10);
  150. /*
  151. * Check for version and the number of counters
  152. * Version(eax[7:0]) can't be 0;
  153. * Counters(eax[15:8]) should be greater than 1;
  154. */
  155. if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
  156. set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
  157. }
  158. switch (c->x86) {
  159. #ifdef CONFIG_X86_32
  160. case 5:
  161. switch (c->x86_model) {
  162. case 4:
  163. name = "C6";
  164. fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
  165. fcr_clr = DPDC;
  166. pr_notice("Disabling bugged TSC.\n");
  167. clear_cpu_cap(c, X86_FEATURE_TSC);
  168. break;
  169. case 8:
  170. switch (c->x86_stepping) {
  171. default:
  172. name = "2";
  173. break;
  174. case 7 ... 9:
  175. name = "2A";
  176. break;
  177. case 10 ... 15:
  178. name = "2B";
  179. break;
  180. }
  181. fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
  182. E2MMX|EAMD3D;
  183. fcr_clr = DPDC;
  184. break;
  185. case 9:
  186. name = "3";
  187. fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
  188. E2MMX|EAMD3D;
  189. fcr_clr = DPDC;
  190. break;
  191. default:
  192. name = "??";
  193. }
  194. rdmsr(MSR_IDT_FCR1, lo, hi);
  195. newlo = (lo|fcr_set) & (~fcr_clr);
  196. if (newlo != lo) {
  197. pr_info("Centaur FCR was 0x%X now 0x%X\n",
  198. lo, newlo);
  199. wrmsr(MSR_IDT_FCR1, newlo, hi);
  200. } else {
  201. pr_info("Centaur FCR is 0x%X\n", lo);
  202. }
  203. /* Emulate MTRRs using Centaur's MCR. */
  204. set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
  205. /* Report CX8 */
  206. set_cpu_cap(c, X86_FEATURE_CX8);
  207. /* Set 3DNow! on Winchip 2 and above. */
  208. if (c->x86_model >= 8)
  209. set_cpu_cap(c, X86_FEATURE_3DNOW);
  210. /* See if we can find out some more. */
  211. if (cpuid_eax(0x80000000) >= 0x80000005) {
  212. /* Yes, we can. */
  213. cpuid(0x80000005, &aa, &bb, &cc, &dd);
  214. /* Add L1 data and code cache sizes. */
  215. c->x86_cache_size = (cc>>24)+(dd>>24);
  216. }
  217. sprintf(c->x86_model_id, "WinChip %s", name);
  218. break;
  219. #endif
  220. case 6:
  221. init_c3(c);
  222. break;
  223. }
  224. #ifdef CONFIG_X86_64
  225. set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
  226. #endif
  227. if (cpu_has(c, X86_FEATURE_VMX))
  228. centaur_detect_vmx_virtcap(c);
  229. }
  230. #ifdef CONFIG_X86_32
  231. static unsigned int
  232. centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
  233. {
  234. /* VIA C3 CPUs (670-68F) need further shifting. */
  235. if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
  236. size >>= 8;
  237. /*
  238. * There's also an erratum in Nehemiah stepping 1, which
  239. * returns '65KB' instead of '64KB'
  240. * - Note, it seems this may only be in engineering samples.
  241. */
  242. if ((c->x86 == 6) && (c->x86_model == 9) &&
  243. (c->x86_stepping == 1) && (size == 65))
  244. size -= 1;
  245. return size;
  246. }
  247. #endif
  248. static const struct cpu_dev centaur_cpu_dev = {
  249. .c_vendor = "Centaur",
  250. .c_ident = { "CentaurHauls" },
  251. .c_early_init = early_init_centaur,
  252. .c_init = init_centaur,
  253. #ifdef CONFIG_X86_32
  254. .legacy_cache_size = centaur_size_cache,
  255. #endif
  256. .c_x86_vendor = X86_VENDOR_CENTAUR,
  257. };
  258. cpu_dev_register(centaur_cpu_dev);