setup.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. */
  5. #include <linux/seq_file.h>
  6. #include <linux/fs.h>
  7. #include <linux/delay.h>
  8. #include <linux/root_dev.h>
  9. #include <linux/clk.h>
  10. #include <linux/clocksource.h>
  11. #include <linux/console.h>
  12. #include <linux/module.h>
  13. #include <linux/sizes.h>
  14. #include <linux/cpu.h>
  15. #include <linux/of_clk.h>
  16. #include <linux/of_fdt.h>
  17. #include <linux/of.h>
  18. #include <linux/cache.h>
  19. #include <uapi/linux/mount.h>
  20. #include <asm/sections.h>
  21. #include <asm/arcregs.h>
  22. #include <asm/asserts.h>
  23. #include <asm/tlb.h>
  24. #include <asm/setup.h>
  25. #include <asm/page.h>
  26. #include <asm/irq.h>
  27. #include <asm/unwind.h>
  28. #include <asm/mach_desc.h>
  29. #include <asm/smp.h>
  30. #include <asm/dsp-impl.h>
  31. #include <soc/arc/mcip.h>
  32. #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
  33. unsigned int intr_to_DE_cnt;
  34. /* Part of U-boot ABI: see head.S */
  35. int __initdata uboot_tag;
  36. int __initdata uboot_magic;
  37. char __initdata *uboot_arg;
  38. const struct machine_desc *machine_desc;
  39. struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
  40. struct cpuinfo_arc {
  41. int arcver;
  42. unsigned int t0:1, t1:1;
  43. struct {
  44. unsigned long base;
  45. unsigned int sz;
  46. } iccm, dccm;
  47. };
  48. #ifdef CONFIG_ISA_ARCV2
  49. static const struct id_to_str arc_hs_rel[] = {
  50. /* ID.ARCVER, Release */
  51. { 0x51, "R2.0" },
  52. { 0x52, "R2.1" },
  53. { 0x53, "R3.0" },
  54. };
  55. static const struct id_to_str arc_hs_ver54_rel[] = {
  56. /* UARCH.MAJOR, Release */
  57. { 0, "R3.10a"},
  58. { 1, "R3.50a"},
  59. { 2, "R3.60a"},
  60. { 3, "R4.00a"},
  61. { 0xFF, NULL }
  62. };
  63. #endif
  64. static int
  65. arcompact_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
  66. {
  67. int n = 0;
  68. #ifdef CONFIG_ISA_ARCOMPACT
  69. char *cpu_nm, *isa_nm = "ARCompact";
  70. struct bcr_fp_arcompact fpu_sp, fpu_dp;
  71. int atomic = 0, be, present;
  72. int bpu_full, bpu_cache, bpu_pred;
  73. struct bcr_bpu_arcompact bpu;
  74. struct bcr_iccm_arcompact iccm;
  75. struct bcr_dccm_arcompact dccm;
  76. struct bcr_generic isa;
  77. READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
  78. if (!isa.ver) /* ISA BCR absent, use Kconfig info */
  79. atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
  80. else {
  81. /* ARC700_BUILD only has 2 bits of isa info */
  82. atomic = isa.info & 1;
  83. }
  84. be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
  85. if (info->arcver < 0x34)
  86. cpu_nm = "ARC750";
  87. else
  88. cpu_nm = "ARC770";
  89. n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s%s%s\n",
  90. c, cpu_nm, isa_nm,
  91. IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
  92. IS_AVAIL1(be, "[Big-Endian]"));
  93. READ_BCR(ARC_REG_FP_BCR, fpu_sp);
  94. READ_BCR(ARC_REG_DPFP_BCR, fpu_dp);
  95. if (fpu_sp.ver | fpu_dp.ver)
  96. n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
  97. IS_AVAIL1(fpu_sp.ver, "SP "),
  98. IS_AVAIL1(fpu_dp.ver, "DP "));
  99. READ_BCR(ARC_REG_BPU_BCR, bpu);
  100. bpu_full = bpu.fam ? 1 : 0;
  101. bpu_cache = 256 << (bpu.ent - 1);
  102. bpu_pred = 256 << (bpu.ent - 1);
  103. n += scnprintf(buf + n, len - n,
  104. "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
  105. IS_AVAIL1(bpu_full, "full"),
  106. IS_AVAIL1(!bpu_full, "partial"),
  107. bpu_cache, bpu_pred);
  108. READ_BCR(ARC_REG_ICCM_BUILD, iccm);
  109. if (iccm.ver) {
  110. info->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
  111. info->iccm.base = iccm.base << 16;
  112. }
  113. READ_BCR(ARC_REG_DCCM_BUILD, dccm);
  114. if (dccm.ver) {
  115. unsigned long base;
  116. info->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
  117. base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
  118. info->dccm.base = base & ~0xF;
  119. }
  120. /* ARCompact ISA specific sanity checks */
  121. present = fpu_dp.ver; /* SP has no arch visible regs */
  122. CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
  123. #endif
  124. return n;
  125. }
  126. static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
  127. {
  128. int n = 0;
  129. #ifdef CONFIG_ISA_ARCV2
  130. const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
  131. int dual_issue = 0, dual_enb = 0, mpy_opt, present;
  132. int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
  133. char mpy_nm[16], lpb_nm[32];
  134. struct bcr_isa_arcv2 isa;
  135. struct bcr_mpy mpy;
  136. struct bcr_fp_arcv2 fpu;
  137. struct bcr_bpu_arcv2 bpu;
  138. struct bcr_lpb lpb;
  139. struct bcr_iccm_arcv2 iccm;
  140. struct bcr_dccm_arcv2 dccm;
  141. struct bcr_erp erp;
  142. /*
  143. * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
  144. * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
  145. * releases only update it.
  146. */
  147. if (info->arcver > 0x50 && info->arcver <= 0x53) {
  148. release = arc_hs_rel[info->arcver - 0x51].str;
  149. } else {
  150. const struct id_to_str *tbl;
  151. struct bcr_uarch_build uarch;
  152. READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
  153. for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
  154. if (uarch.maj == tbl->id) {
  155. release = tbl->str;
  156. break;
  157. }
  158. }
  159. if (uarch.prod == 4) {
  160. unsigned int exec_ctrl;
  161. cpu_nm = "HS48";
  162. dual_issue = 1;
  163. /* if dual issue hardware, is it enabled ? */
  164. READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
  165. dual_enb = !(exec_ctrl & 1);
  166. }
  167. }
  168. READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
  169. n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
  170. c, cpu_nm, release, isa_nm,
  171. IS_AVAIL1(isa.be, "[Big-Endian]"),
  172. IS_AVAIL3(dual_issue, dual_enb, " Dual-Issue "));
  173. READ_BCR(ARC_REG_MPY_BCR, mpy);
  174. mpy_opt = 2; /* stock MPY/MPYH */
  175. if (mpy.dsp) /* OPT 7-9 */
  176. mpy_opt = mpy.dsp + 6;
  177. scnprintf(mpy_nm, 16, "mpy[opt %d] ", mpy_opt);
  178. READ_BCR(ARC_REG_FP_V2_BCR, fpu);
  179. n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
  180. IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
  181. IS_AVAIL2(isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
  182. IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
  183. IS_AVAIL1(mpy.ver, mpy_nm),
  184. IS_AVAIL1(isa.div_rem, "div_rem "),
  185. IS_AVAIL1((fpu.sp | fpu.dp), " FPU:"),
  186. IS_AVAIL1(fpu.sp, " sp"),
  187. IS_AVAIL1(fpu.dp, " dp"));
  188. READ_BCR(ARC_REG_BPU_BCR, bpu);
  189. bpu_full = bpu.ft;
  190. bpu_cache = 256 << bpu.bce;
  191. bpu_pred = 2048 << bpu.pte;
  192. bpu_ret_stk = 4 << bpu.rse;
  193. READ_BCR(ARC_REG_LPB_BUILD, lpb);
  194. if (lpb.ver) {
  195. unsigned int ctl;
  196. ctl = read_aux_reg(ARC_REG_LPB_CTRL);
  197. scnprintf(lpb_nm, sizeof(lpb_nm), " Loop Buffer:%d %s",
  198. lpb.entries, IS_DISABLED_RUN(!ctl));
  199. }
  200. n += scnprintf(buf + n, len - n,
  201. "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d%s\n",
  202. IS_AVAIL1(bpu_full, "full"),
  203. IS_AVAIL1(!bpu_full, "partial"),
  204. bpu_cache, bpu_pred, bpu_ret_stk,
  205. lpb_nm);
  206. READ_BCR(ARC_REG_ICCM_BUILD, iccm);
  207. if (iccm.ver) {
  208. unsigned long base;
  209. info->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
  210. if (iccm.sz00 == 0xF && iccm.sz01 > 0)
  211. info->iccm.sz <<= iccm.sz01;
  212. base = read_aux_reg(ARC_REG_AUX_ICCM);
  213. info->iccm.base = base & 0xF0000000;
  214. }
  215. READ_BCR(ARC_REG_DCCM_BUILD, dccm);
  216. if (dccm.ver) {
  217. unsigned long base;
  218. info->dccm.sz = 256 << dccm.sz0;
  219. if (dccm.sz0 == 0xF && dccm.sz1 > 0)
  220. info->dccm.sz <<= dccm.sz1;
  221. base = read_aux_reg(ARC_REG_AUX_DCCM);
  222. info->dccm.base = base & 0xF0000000;
  223. }
  224. /* Error Protection: ECC/Parity */
  225. READ_BCR(ARC_REG_ERP_BUILD, erp);
  226. if (erp.ver) {
  227. struct ctl_erp ctl;
  228. READ_BCR(ARC_REG_ERP_CTRL, ctl);
  229. /* inverted bits: 0 means enabled */
  230. n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
  231. IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
  232. IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
  233. IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
  234. }
  235. /* ARCv2 ISA specific sanity checks */
  236. present = fpu.sp | fpu.dp | mpy.dsp; /* DSP and/or FPU */
  237. CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
  238. dsp_config_check();
  239. #endif
  240. return n;
  241. }
  242. static char *arc_cpu_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
  243. {
  244. struct bcr_identity ident;
  245. struct bcr_timer timer;
  246. struct bcr_generic bcr;
  247. struct mcip_bcr mp;
  248. struct bcr_actionpoint ap;
  249. unsigned long vec_base;
  250. int ap_num, ap_full, smart, rtt, n;
  251. memset(info, 0, sizeof(struct cpuinfo_arc));
  252. READ_BCR(AUX_IDENTITY, ident);
  253. info->arcver = ident.family;
  254. n = scnprintf(buf, len,
  255. "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
  256. ident.family, ident.cpu_id, ident.chip_id);
  257. if (is_isa_arcompact()) {
  258. n += arcompact_mumbojumbo(c, info, buf + n, len - n);
  259. } else if (is_isa_arcv2()){
  260. n += arcv2_mumbojumbo(c, info, buf + n, len - n);
  261. }
  262. n += arc_mmu_mumbojumbo(c, buf + n, len - n);
  263. n += arc_cache_mumbojumbo(c, buf + n, len - n);
  264. READ_BCR(ARC_REG_TIMERS_BCR, timer);
  265. info->t0 = timer.t0;
  266. info->t1 = timer.t1;
  267. READ_BCR(ARC_REG_MCIP_BCR, mp);
  268. vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
  269. n += scnprintf(buf + n, len - n,
  270. "Timers\t\t: %s%s%s%s%s%s\nVector Table\t: %#lx\n",
  271. IS_AVAIL1(timer.t0, "Timer0 "),
  272. IS_AVAIL1(timer.t1, "Timer1 "),
  273. IS_AVAIL2(timer.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
  274. IS_AVAIL2(mp.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
  275. vec_base);
  276. READ_BCR(ARC_REG_AP_BCR, ap);
  277. if (ap.ver) {
  278. ap_num = 2 << ap.num;
  279. ap_full = !ap.min;
  280. }
  281. READ_BCR(ARC_REG_SMART_BCR, bcr);
  282. smart = bcr.ver ? 1 : 0;
  283. READ_BCR(ARC_REG_RTT_BCR, bcr);
  284. rtt = bcr.ver ? 1 : 0;
  285. if (ap.ver | smart | rtt) {
  286. n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
  287. IS_AVAIL1(smart, "smaRT "),
  288. IS_AVAIL1(rtt, "RTT "));
  289. if (ap.ver) {
  290. n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
  291. ap_num,
  292. ap_full ? "full":"min");
  293. }
  294. n += scnprintf(buf + n, len - n, "\n");
  295. }
  296. if (info->dccm.sz || info->iccm.sz)
  297. n += scnprintf(buf + n, len - n,
  298. "Extn [CCM]\t: DCCM @ %lx, %d KB / ICCM: @ %lx, %d KB\n",
  299. info->dccm.base, TO_KB(info->dccm.sz),
  300. info->iccm.base, TO_KB(info->iccm.sz));
  301. return buf;
  302. }
  303. void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
  304. {
  305. if (hw_exists && !opt_ena)
  306. pr_warn(" ! Enable %s for working apps\n", opt_name);
  307. else if (!hw_exists && opt_ena)
  308. panic("Disable %s, hardware NOT present\n", opt_name);
  309. }
  310. void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
  311. {
  312. if (!hw_exists && opt_ena)
  313. panic("Disable %s, hardware NOT present\n", opt_name);
  314. }
  315. /*
  316. * ISA agnostic sanity checks
  317. */
  318. static void arc_chk_core_config(struct cpuinfo_arc *info)
  319. {
  320. if (!info->t0)
  321. panic("Timer0 is not present!\n");
  322. if (!info->t1)
  323. panic("Timer1 is not present!\n");
  324. #ifdef CONFIG_ARC_HAS_DCCM
  325. /*
  326. * DCCM can be arbit placed in hardware.
  327. * Make sure its placement/sz matches what Linux is built with
  328. */
  329. if ((unsigned int)__arc_dccm_base != info->dccm.base)
  330. panic("Linux built with incorrect DCCM Base address\n");
  331. if (CONFIG_ARC_DCCM_SZ * SZ_1K != info->dccm.sz)
  332. panic("Linux built with incorrect DCCM Size\n");
  333. #endif
  334. #ifdef CONFIG_ARC_HAS_ICCM
  335. if (CONFIG_ARC_ICCM_SZ * SZ_1K != info->iccm.sz)
  336. panic("Linux built with incorrect ICCM Size\n");
  337. #endif
  338. }
  339. /*
  340. * Initialize and setup the processor core
  341. * This is called by all the CPUs thus should not do special case stuff
  342. * such as only for boot CPU etc
  343. */
  344. void setup_processor(void)
  345. {
  346. struct cpuinfo_arc info;
  347. int c = smp_processor_id();
  348. char str[512];
  349. pr_info("%s", arc_cpu_mumbojumbo(c, &info, str, sizeof(str)));
  350. pr_info("%s", arc_platform_smp_cpuinfo());
  351. arc_chk_core_config(&info);
  352. arc_init_IRQ();
  353. arc_mmu_init();
  354. arc_cache_init();
  355. }
  356. static inline bool uboot_arg_invalid(unsigned long addr)
  357. {
  358. /*
  359. * Check that it is a untranslated address (although MMU is not enabled
  360. * yet, it being a high address ensures this is not by fluke)
  361. */
  362. if (addr < PAGE_OFFSET)
  363. return true;
  364. /* Check that address doesn't clobber resident kernel image */
  365. return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
  366. }
  367. #define IGNORE_ARGS "Ignore U-boot args: "
  368. /* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
  369. #define UBOOT_TAG_NONE 0
  370. #define UBOOT_TAG_CMDLINE 1
  371. #define UBOOT_TAG_DTB 2
  372. /* We always pass 0 as magic from U-boot */
  373. #define UBOOT_MAGIC_VALUE 0
  374. void __init handle_uboot_args(void)
  375. {
  376. bool use_embedded_dtb = true;
  377. bool append_cmdline = false;
  378. /* check that we know this tag */
  379. if (uboot_tag != UBOOT_TAG_NONE &&
  380. uboot_tag != UBOOT_TAG_CMDLINE &&
  381. uboot_tag != UBOOT_TAG_DTB) {
  382. pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
  383. goto ignore_uboot_args;
  384. }
  385. if (uboot_magic != UBOOT_MAGIC_VALUE) {
  386. pr_warn(IGNORE_ARGS "non zero uboot magic\n");
  387. goto ignore_uboot_args;
  388. }
  389. if (uboot_tag != UBOOT_TAG_NONE &&
  390. uboot_arg_invalid((unsigned long)uboot_arg)) {
  391. pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
  392. goto ignore_uboot_args;
  393. }
  394. /* see if U-boot passed an external Device Tree blob */
  395. if (uboot_tag == UBOOT_TAG_DTB) {
  396. machine_desc = setup_machine_fdt((void *)uboot_arg);
  397. /* external Device Tree blob is invalid - use embedded one */
  398. use_embedded_dtb = !machine_desc;
  399. }
  400. if (uboot_tag == UBOOT_TAG_CMDLINE)
  401. append_cmdline = true;
  402. ignore_uboot_args:
  403. if (use_embedded_dtb) {
  404. machine_desc = setup_machine_fdt(__dtb_start);
  405. if (!machine_desc)
  406. panic("Embedded DT invalid\n");
  407. }
  408. /*
  409. * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
  410. * append processing can only happen after.
  411. */
  412. if (append_cmdline) {
  413. /* Ensure a whitespace between the 2 cmdlines */
  414. strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
  415. strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
  416. }
  417. }
  418. void __init setup_arch(char **cmdline_p)
  419. {
  420. handle_uboot_args();
  421. /* Save unparsed command line copy for /proc/cmdline */
  422. *cmdline_p = boot_command_line;
  423. /* To force early parsing of things like mem=xxx */
  424. parse_early_param();
  425. /* Platform/board specific: e.g. early console registration */
  426. if (machine_desc->init_early)
  427. machine_desc->init_early();
  428. smp_init_cpus();
  429. setup_processor();
  430. setup_arch_memory();
  431. /* copy flat DT out of .init and then unflatten it */
  432. unflatten_and_copy_device_tree();
  433. /* Can be issue if someone passes cmd line arg "ro"
  434. * But that is unlikely so keeping it as it is
  435. */
  436. root_mountflags &= ~MS_RDONLY;
  437. arc_unwind_init();
  438. }
  439. /*
  440. * Called from start_kernel() - boot CPU only
  441. */
  442. void __init time_init(void)
  443. {
  444. of_clk_init(NULL);
  445. timer_probe();
  446. }
  447. static int __init customize_machine(void)
  448. {
  449. if (machine_desc->init_machine)
  450. machine_desc->init_machine();
  451. return 0;
  452. }
  453. arch_initcall(customize_machine);
  454. static int __init init_late_machine(void)
  455. {
  456. if (machine_desc->init_late)
  457. machine_desc->init_late();
  458. return 0;
  459. }
  460. late_initcall(init_late_machine);
  461. /*
  462. * Get CPU information for use by the procfs.
  463. */
  464. #define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
  465. #define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
  466. static int show_cpuinfo(struct seq_file *m, void *v)
  467. {
  468. char *str;
  469. int cpu_id = ptr_to_cpu(v);
  470. struct device *cpu_dev = get_cpu_device(cpu_id);
  471. struct cpuinfo_arc info;
  472. struct clk *cpu_clk;
  473. unsigned long freq = 0;
  474. if (!cpu_online(cpu_id)) {
  475. seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
  476. goto done;
  477. }
  478. str = (char *)__get_free_page(GFP_KERNEL);
  479. if (!str)
  480. goto done;
  481. seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
  482. cpu_clk = clk_get(cpu_dev, NULL);
  483. if (IS_ERR(cpu_clk)) {
  484. seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
  485. cpu_id);
  486. } else {
  487. freq = clk_get_rate(cpu_clk);
  488. }
  489. if (freq)
  490. seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
  491. freq / 1000000, (freq / 10000) % 100);
  492. seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
  493. loops_per_jiffy / (500000 / HZ),
  494. (loops_per_jiffy / (5000 / HZ)) % 100);
  495. seq_printf(m, arc_platform_smp_cpuinfo());
  496. free_page((unsigned long)str);
  497. done:
  498. seq_printf(m, "\n");
  499. return 0;
  500. }
  501. static void *c_start(struct seq_file *m, loff_t *pos)
  502. {
  503. /*
  504. * Callback returns cpu-id to iterator for show routine, NULL to stop.
  505. * However since NULL is also a valid cpu-id (0), we use a round-about
  506. * way to pass it w/o having to kmalloc/free a 2 byte string.
  507. * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
  508. */
  509. return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
  510. }
  511. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  512. {
  513. ++*pos;
  514. return c_start(m, pos);
  515. }
  516. static void c_stop(struct seq_file *m, void *v)
  517. {
  518. }
  519. const struct seq_operations cpuinfo_op = {
  520. .start = c_start,
  521. .next = c_next,
  522. .stop = c_stop,
  523. .show = show_cpuinfo
  524. };
  525. static DEFINE_PER_CPU(struct cpu, cpu_topology);
  526. static int __init topology_init(void)
  527. {
  528. int cpu;
  529. for_each_present_cpu(cpu)
  530. register_cpu(&per_cpu(cpu_topology, cpu), cpu);
  531. return 0;
  532. }
  533. subsys_initcall(topology_init);