mc.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/delay.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/export.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/of.h>
  13. #include <linux/of_platform.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/sort.h>
  17. #include <linux/tegra-icc.h>
  18. #include <soc/tegra/fuse.h>
  19. #include "mc.h"
  20. static const struct of_device_id tegra_mc_of_match[] = {
  21. #ifdef CONFIG_ARCH_TEGRA_2x_SOC
  22. { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
  23. #endif
  24. #ifdef CONFIG_ARCH_TEGRA_3x_SOC
  25. { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc },
  26. #endif
  27. #ifdef CONFIG_ARCH_TEGRA_114_SOC
  28. { .compatible = "nvidia,tegra114-mc", .data = &tegra114_mc_soc },
  29. #endif
  30. #ifdef CONFIG_ARCH_TEGRA_124_SOC
  31. { .compatible = "nvidia,tegra124-mc", .data = &tegra124_mc_soc },
  32. #endif
  33. #ifdef CONFIG_ARCH_TEGRA_132_SOC
  34. { .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
  35. #endif
  36. #ifdef CONFIG_ARCH_TEGRA_210_SOC
  37. { .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
  38. #endif
  39. #ifdef CONFIG_ARCH_TEGRA_186_SOC
  40. { .compatible = "nvidia,tegra186-mc", .data = &tegra186_mc_soc },
  41. #endif
  42. #ifdef CONFIG_ARCH_TEGRA_194_SOC
  43. { .compatible = "nvidia,tegra194-mc", .data = &tegra194_mc_soc },
  44. #endif
  45. #ifdef CONFIG_ARCH_TEGRA_234_SOC
  46. { .compatible = "nvidia,tegra234-mc", .data = &tegra234_mc_soc },
  47. #endif
  48. { /* sentinel */ }
  49. };
  50. MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
  51. static void tegra_mc_devm_action_put_device(void *data)
  52. {
  53. struct tegra_mc *mc = data;
  54. put_device(mc->dev);
  55. }
  56. /**
  57. * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
  58. * @dev: device pointer for the consumer device
  59. *
  60. * This function will search for the Memory Controller node in a device-tree
  61. * and retrieve the Memory Controller handle.
  62. *
  63. * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
  64. */
  65. struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
  66. {
  67. struct platform_device *pdev;
  68. struct device_node *np;
  69. struct tegra_mc *mc;
  70. int err;
  71. np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
  72. if (!np)
  73. return ERR_PTR(-ENOENT);
  74. pdev = of_find_device_by_node(np);
  75. of_node_put(np);
  76. if (!pdev)
  77. return ERR_PTR(-ENODEV);
  78. mc = platform_get_drvdata(pdev);
  79. if (!mc) {
  80. put_device(&pdev->dev);
  81. return ERR_PTR(-EPROBE_DEFER);
  82. }
  83. err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
  84. if (err)
  85. return ERR_PTR(err);
  86. return mc;
  87. }
  88. EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
  89. int tegra_mc_probe_device(struct tegra_mc *mc, struct device *dev)
  90. {
  91. if (mc->soc->ops && mc->soc->ops->probe_device)
  92. return mc->soc->ops->probe_device(mc, dev);
  93. return 0;
  94. }
  95. EXPORT_SYMBOL_GPL(tegra_mc_probe_device);
  96. int tegra_mc_get_carveout_info(struct tegra_mc *mc, unsigned int id,
  97. phys_addr_t *base, u64 *size)
  98. {
  99. u32 offset;
  100. if (id < 1 || id >= mc->soc->num_carveouts)
  101. return -EINVAL;
  102. if (id < 6)
  103. offset = 0xc0c + 0x50 * (id - 1);
  104. else
  105. offset = 0x2004 + 0x50 * (id - 6);
  106. *base = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x0);
  107. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  108. *base |= (phys_addr_t)mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x4) << 32;
  109. #endif
  110. if (size)
  111. *size = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, offset + 0x8) << 17;
  112. return 0;
  113. }
  114. EXPORT_SYMBOL_GPL(tegra_mc_get_carveout_info);
  115. static int tegra_mc_block_dma_common(struct tegra_mc *mc,
  116. const struct tegra_mc_reset *rst)
  117. {
  118. unsigned long flags;
  119. u32 value;
  120. spin_lock_irqsave(&mc->lock, flags);
  121. value = mc_readl(mc, rst->control) | BIT(rst->bit);
  122. mc_writel(mc, value, rst->control);
  123. spin_unlock_irqrestore(&mc->lock, flags);
  124. return 0;
  125. }
  126. static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
  127. const struct tegra_mc_reset *rst)
  128. {
  129. return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
  130. }
  131. static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
  132. const struct tegra_mc_reset *rst)
  133. {
  134. unsigned long flags;
  135. u32 value;
  136. spin_lock_irqsave(&mc->lock, flags);
  137. value = mc_readl(mc, rst->control) & ~BIT(rst->bit);
  138. mc_writel(mc, value, rst->control);
  139. spin_unlock_irqrestore(&mc->lock, flags);
  140. return 0;
  141. }
  142. static int tegra_mc_reset_status_common(struct tegra_mc *mc,
  143. const struct tegra_mc_reset *rst)
  144. {
  145. return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
  146. }
  147. const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
  148. .block_dma = tegra_mc_block_dma_common,
  149. .dma_idling = tegra_mc_dma_idling_common,
  150. .unblock_dma = tegra_mc_unblock_dma_common,
  151. .reset_status = tegra_mc_reset_status_common,
  152. };
  153. static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
  154. {
  155. return container_of(rcdev, struct tegra_mc, reset);
  156. }
  157. static const struct tegra_mc_reset *tegra_mc_reset_find(struct tegra_mc *mc,
  158. unsigned long id)
  159. {
  160. unsigned int i;
  161. for (i = 0; i < mc->soc->num_resets; i++)
  162. if (mc->soc->resets[i].id == id)
  163. return &mc->soc->resets[i];
  164. return NULL;
  165. }
  166. static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev,
  167. unsigned long id)
  168. {
  169. struct tegra_mc *mc = reset_to_mc(rcdev);
  170. const struct tegra_mc_reset_ops *rst_ops;
  171. const struct tegra_mc_reset *rst;
  172. int retries = 500;
  173. int err;
  174. rst = tegra_mc_reset_find(mc, id);
  175. if (!rst)
  176. return -ENODEV;
  177. rst_ops = mc->soc->reset_ops;
  178. if (!rst_ops)
  179. return -ENODEV;
  180. /* DMA flushing will fail if reset is already asserted */
  181. if (rst_ops->reset_status) {
  182. /* check whether reset is asserted */
  183. if (rst_ops->reset_status(mc, rst))
  184. return 0;
  185. }
  186. if (rst_ops->block_dma) {
  187. /* block clients DMA requests */
  188. err = rst_ops->block_dma(mc, rst);
  189. if (err) {
  190. dev_err(mc->dev, "failed to block %s DMA: %d\n",
  191. rst->name, err);
  192. return err;
  193. }
  194. }
  195. if (rst_ops->dma_idling) {
  196. /* wait for completion of the outstanding DMA requests */
  197. while (!rst_ops->dma_idling(mc, rst)) {
  198. if (!retries--) {
  199. dev_err(mc->dev, "failed to flush %s DMA\n",
  200. rst->name);
  201. return -EBUSY;
  202. }
  203. usleep_range(10, 100);
  204. }
  205. }
  206. if (rst_ops->hotreset_assert) {
  207. /* clear clients DMA requests sitting before arbitration */
  208. err = rst_ops->hotreset_assert(mc, rst);
  209. if (err) {
  210. dev_err(mc->dev, "failed to hot reset %s: %d\n",
  211. rst->name, err);
  212. return err;
  213. }
  214. }
  215. return 0;
  216. }
  217. static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev,
  218. unsigned long id)
  219. {
  220. struct tegra_mc *mc = reset_to_mc(rcdev);
  221. const struct tegra_mc_reset_ops *rst_ops;
  222. const struct tegra_mc_reset *rst;
  223. int err;
  224. rst = tegra_mc_reset_find(mc, id);
  225. if (!rst)
  226. return -ENODEV;
  227. rst_ops = mc->soc->reset_ops;
  228. if (!rst_ops)
  229. return -ENODEV;
  230. if (rst_ops->hotreset_deassert) {
  231. /* take out client from hot reset */
  232. err = rst_ops->hotreset_deassert(mc, rst);
  233. if (err) {
  234. dev_err(mc->dev, "failed to deassert hot reset %s: %d\n",
  235. rst->name, err);
  236. return err;
  237. }
  238. }
  239. if (rst_ops->unblock_dma) {
  240. /* allow new DMA requests to proceed to arbitration */
  241. err = rst_ops->unblock_dma(mc, rst);
  242. if (err) {
  243. dev_err(mc->dev, "failed to unblock %s DMA : %d\n",
  244. rst->name, err);
  245. return err;
  246. }
  247. }
  248. return 0;
  249. }
  250. static int tegra_mc_hotreset_status(struct reset_controller_dev *rcdev,
  251. unsigned long id)
  252. {
  253. struct tegra_mc *mc = reset_to_mc(rcdev);
  254. const struct tegra_mc_reset_ops *rst_ops;
  255. const struct tegra_mc_reset *rst;
  256. rst = tegra_mc_reset_find(mc, id);
  257. if (!rst)
  258. return -ENODEV;
  259. rst_ops = mc->soc->reset_ops;
  260. if (!rst_ops)
  261. return -ENODEV;
  262. return rst_ops->reset_status(mc, rst);
  263. }
  264. static const struct reset_control_ops tegra_mc_reset_ops = {
  265. .assert = tegra_mc_hotreset_assert,
  266. .deassert = tegra_mc_hotreset_deassert,
  267. .status = tegra_mc_hotreset_status,
  268. };
  269. static int tegra_mc_reset_setup(struct tegra_mc *mc)
  270. {
  271. int err;
  272. mc->reset.ops = &tegra_mc_reset_ops;
  273. mc->reset.owner = THIS_MODULE;
  274. mc->reset.of_node = mc->dev->of_node;
  275. mc->reset.of_reset_n_cells = 1;
  276. mc->reset.nr_resets = mc->soc->num_resets;
  277. err = reset_controller_register(&mc->reset);
  278. if (err < 0)
  279. return err;
  280. return 0;
  281. }
  282. int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
  283. {
  284. unsigned int i;
  285. struct tegra_mc_timing *timing = NULL;
  286. for (i = 0; i < mc->num_timings; i++) {
  287. if (mc->timings[i].rate == rate) {
  288. timing = &mc->timings[i];
  289. break;
  290. }
  291. }
  292. if (!timing) {
  293. dev_err(mc->dev, "no memory timing registered for rate %lu\n",
  294. rate);
  295. return -EINVAL;
  296. }
  297. for (i = 0; i < mc->soc->num_emem_regs; ++i)
  298. mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
  299. return 0;
  300. }
  301. EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
  302. unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
  303. {
  304. u8 dram_count;
  305. dram_count = mc_readl(mc, MC_EMEM_ADR_CFG);
  306. dram_count &= MC_EMEM_ADR_CFG_EMEM_NUMDEV;
  307. dram_count++;
  308. return dram_count;
  309. }
  310. EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
  311. #if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
  312. defined(CONFIG_ARCH_TEGRA_114_SOC) || \
  313. defined(CONFIG_ARCH_TEGRA_124_SOC) || \
  314. defined(CONFIG_ARCH_TEGRA_132_SOC) || \
  315. defined(CONFIG_ARCH_TEGRA_210_SOC)
  316. static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
  317. {
  318. unsigned long long tick;
  319. unsigned int i;
  320. u32 value;
  321. /* compute the number of MC clock cycles per tick */
  322. tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
  323. do_div(tick, NSEC_PER_SEC);
  324. value = mc_readl(mc, MC_EMEM_ARB_CFG);
  325. value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
  326. value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
  327. mc_writel(mc, value, MC_EMEM_ARB_CFG);
  328. /* write latency allowance defaults */
  329. for (i = 0; i < mc->soc->num_clients; i++) {
  330. const struct tegra_mc_client *client = &mc->soc->clients[i];
  331. u32 value;
  332. value = mc_readl(mc, client->regs.la.reg);
  333. value &= ~(client->regs.la.mask << client->regs.la.shift);
  334. value |= (client->regs.la.def & client->regs.la.mask) << client->regs.la.shift;
  335. mc_writel(mc, value, client->regs.la.reg);
  336. }
  337. /* latch new values */
  338. mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
  339. return 0;
  340. }
  341. static int load_one_timing(struct tegra_mc *mc,
  342. struct tegra_mc_timing *timing,
  343. struct device_node *node)
  344. {
  345. int err;
  346. u32 tmp;
  347. err = of_property_read_u32(node, "clock-frequency", &tmp);
  348. if (err) {
  349. dev_err(mc->dev,
  350. "timing %pOFn: failed to read rate\n", node);
  351. return err;
  352. }
  353. timing->rate = tmp;
  354. timing->emem_data = devm_kcalloc(mc->dev, mc->soc->num_emem_regs,
  355. sizeof(u32), GFP_KERNEL);
  356. if (!timing->emem_data)
  357. return -ENOMEM;
  358. err = of_property_read_u32_array(node, "nvidia,emem-configuration",
  359. timing->emem_data,
  360. mc->soc->num_emem_regs);
  361. if (err) {
  362. dev_err(mc->dev,
  363. "timing %pOFn: failed to read EMEM configuration\n",
  364. node);
  365. return err;
  366. }
  367. return 0;
  368. }
  369. static int load_timings(struct tegra_mc *mc, struct device_node *node)
  370. {
  371. struct tegra_mc_timing *timing;
  372. int child_count = of_get_child_count(node);
  373. int i = 0, err;
  374. mc->timings = devm_kcalloc(mc->dev, child_count, sizeof(*timing),
  375. GFP_KERNEL);
  376. if (!mc->timings)
  377. return -ENOMEM;
  378. mc->num_timings = child_count;
  379. for_each_child_of_node_scoped(node, child) {
  380. timing = &mc->timings[i++];
  381. err = load_one_timing(mc, timing, child);
  382. if (err)
  383. return err;
  384. }
  385. return 0;
  386. }
  387. static int tegra_mc_setup_timings(struct tegra_mc *mc)
  388. {
  389. u32 ram_code, node_ram_code;
  390. int err;
  391. ram_code = tegra_read_ram_code();
  392. mc->num_timings = 0;
  393. for_each_child_of_node_scoped(mc->dev->of_node, node) {
  394. err = of_property_read_u32(node, "nvidia,ram-code",
  395. &node_ram_code);
  396. if (err || (node_ram_code != ram_code))
  397. continue;
  398. err = load_timings(mc, node);
  399. if (err)
  400. return err;
  401. break;
  402. }
  403. if (mc->num_timings == 0)
  404. dev_warn(mc->dev,
  405. "no memory timings for RAM code %u registered\n",
  406. ram_code);
  407. return 0;
  408. }
  409. int tegra30_mc_probe(struct tegra_mc *mc)
  410. {
  411. int err;
  412. mc->clk = devm_clk_get_optional(mc->dev, "mc");
  413. if (IS_ERR(mc->clk)) {
  414. dev_err(mc->dev, "failed to get MC clock: %ld\n", PTR_ERR(mc->clk));
  415. return PTR_ERR(mc->clk);
  416. }
  417. /* ensure that debug features are disabled */
  418. mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
  419. err = tegra_mc_setup_latency_allowance(mc);
  420. if (err < 0) {
  421. dev_err(mc->dev, "failed to setup latency allowance: %d\n", err);
  422. return err;
  423. }
  424. err = tegra_mc_setup_timings(mc);
  425. if (err < 0) {
  426. dev_err(mc->dev, "failed to setup timings: %d\n", err);
  427. return err;
  428. }
  429. return 0;
  430. }
  431. const struct tegra_mc_ops tegra30_mc_ops = {
  432. .probe = tegra30_mc_probe,
  433. .handle_irq = tegra30_mc_handle_irq,
  434. };
  435. #endif
  436. static int mc_global_intstatus_to_channel(const struct tegra_mc *mc, u32 status,
  437. unsigned int *mc_channel)
  438. {
  439. if ((status & mc->soc->ch_intmask) == 0)
  440. return -EINVAL;
  441. *mc_channel = __ffs((status & mc->soc->ch_intmask) >>
  442. mc->soc->global_intstatus_channel_shift);
  443. return 0;
  444. }
  445. static u32 mc_channel_to_global_intstatus(const struct tegra_mc *mc,
  446. unsigned int channel)
  447. {
  448. return BIT(channel) << mc->soc->global_intstatus_channel_shift;
  449. }
  450. irqreturn_t tegra30_mc_handle_irq(int irq, void *data)
  451. {
  452. struct tegra_mc *mc = data;
  453. unsigned int bit, channel;
  454. unsigned long status;
  455. if (mc->soc->num_channels) {
  456. u32 global_status;
  457. int err;
  458. global_status = mc_ch_readl(mc, MC_BROADCAST_CHANNEL, MC_GLOBAL_INTSTATUS);
  459. err = mc_global_intstatus_to_channel(mc, global_status, &channel);
  460. if (err < 0) {
  461. dev_err_ratelimited(mc->dev, "unknown interrupt channel 0x%08x\n",
  462. global_status);
  463. return IRQ_NONE;
  464. }
  465. /* mask all interrupts to avoid flooding */
  466. status = mc_ch_readl(mc, channel, MC_INTSTATUS) & mc->soc->intmask;
  467. } else {
  468. status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
  469. }
  470. if (!status)
  471. return IRQ_NONE;
  472. for_each_set_bit(bit, &status, 32) {
  473. const char *error = tegra_mc_status_names[bit] ?: "unknown";
  474. const char *client = "unknown", *desc;
  475. const char *direction, *secure;
  476. u32 status_reg, addr_reg;
  477. u32 intmask = BIT(bit);
  478. phys_addr_t addr = 0;
  479. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  480. u32 addr_hi_reg = 0;
  481. #endif
  482. unsigned int i;
  483. char perm[7];
  484. u8 id, type;
  485. u32 value;
  486. switch (intmask) {
  487. case MC_INT_DECERR_VPR:
  488. status_reg = MC_ERR_VPR_STATUS;
  489. addr_reg = MC_ERR_VPR_ADR;
  490. break;
  491. case MC_INT_SECERR_SEC:
  492. status_reg = MC_ERR_SEC_STATUS;
  493. addr_reg = MC_ERR_SEC_ADR;
  494. break;
  495. case MC_INT_DECERR_MTS:
  496. status_reg = MC_ERR_MTS_STATUS;
  497. addr_reg = MC_ERR_MTS_ADR;
  498. break;
  499. case MC_INT_DECERR_GENERALIZED_CARVEOUT:
  500. status_reg = MC_ERR_GENERALIZED_CARVEOUT_STATUS;
  501. addr_reg = MC_ERR_GENERALIZED_CARVEOUT_ADR;
  502. break;
  503. case MC_INT_DECERR_ROUTE_SANITY:
  504. status_reg = MC_ERR_ROUTE_SANITY_STATUS;
  505. addr_reg = MC_ERR_ROUTE_SANITY_ADR;
  506. break;
  507. default:
  508. status_reg = MC_ERR_STATUS;
  509. addr_reg = MC_ERR_ADR;
  510. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  511. if (mc->soc->has_addr_hi_reg)
  512. addr_hi_reg = MC_ERR_ADR_HI;
  513. #endif
  514. break;
  515. }
  516. if (mc->soc->num_channels)
  517. value = mc_ch_readl(mc, channel, status_reg);
  518. else
  519. value = mc_readl(mc, status_reg);
  520. #ifdef CONFIG_PHYS_ADDR_T_64BIT
  521. if (mc->soc->num_address_bits > 32) {
  522. if (addr_hi_reg) {
  523. if (mc->soc->num_channels)
  524. addr = mc_ch_readl(mc, channel, addr_hi_reg);
  525. else
  526. addr = mc_readl(mc, addr_hi_reg);
  527. } else {
  528. addr = ((value >> MC_ERR_STATUS_ADR_HI_SHIFT) &
  529. MC_ERR_STATUS_ADR_HI_MASK);
  530. }
  531. addr <<= 32;
  532. }
  533. #endif
  534. if (value & MC_ERR_STATUS_RW)
  535. direction = "write";
  536. else
  537. direction = "read";
  538. if (value & MC_ERR_STATUS_SECURITY)
  539. secure = "secure ";
  540. else
  541. secure = "";
  542. id = value & mc->soc->client_id_mask;
  543. for (i = 0; i < mc->soc->num_clients; i++) {
  544. if (mc->soc->clients[i].id == id) {
  545. client = mc->soc->clients[i].name;
  546. break;
  547. }
  548. }
  549. type = (value & MC_ERR_STATUS_TYPE_MASK) >>
  550. MC_ERR_STATUS_TYPE_SHIFT;
  551. desc = tegra_mc_error_names[type];
  552. switch (value & MC_ERR_STATUS_TYPE_MASK) {
  553. case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE:
  554. perm[0] = ' ';
  555. perm[1] = '[';
  556. if (value & MC_ERR_STATUS_READABLE)
  557. perm[2] = 'R';
  558. else
  559. perm[2] = '-';
  560. if (value & MC_ERR_STATUS_WRITABLE)
  561. perm[3] = 'W';
  562. else
  563. perm[3] = '-';
  564. if (value & MC_ERR_STATUS_NONSECURE)
  565. perm[4] = '-';
  566. else
  567. perm[4] = 'S';
  568. perm[5] = ']';
  569. perm[6] = '\0';
  570. break;
  571. default:
  572. perm[0] = '\0';
  573. break;
  574. }
  575. if (mc->soc->num_channels)
  576. value = mc_ch_readl(mc, channel, addr_reg);
  577. else
  578. value = mc_readl(mc, addr_reg);
  579. addr |= value;
  580. dev_err_ratelimited(mc->dev, "%s: %s%s @%pa: %s (%s%s)\n",
  581. client, secure, direction, &addr, error,
  582. desc, perm);
  583. }
  584. /* clear interrupts */
  585. if (mc->soc->num_channels) {
  586. mc_ch_writel(mc, channel, status, MC_INTSTATUS);
  587. mc_ch_writel(mc, MC_BROADCAST_CHANNEL,
  588. mc_channel_to_global_intstatus(mc, channel),
  589. MC_GLOBAL_INTSTATUS);
  590. } else {
  591. mc_writel(mc, status, MC_INTSTATUS);
  592. }
  593. return IRQ_HANDLED;
  594. }
  595. const char *const tegra_mc_status_names[32] = {
  596. [ 1] = "External interrupt",
  597. [ 6] = "EMEM address decode error",
  598. [ 7] = "GART page fault",
  599. [ 8] = "Security violation",
  600. [ 9] = "EMEM arbitration error",
  601. [10] = "Page fault",
  602. [11] = "Invalid APB ASID update",
  603. [12] = "VPR violation",
  604. [13] = "Secure carveout violation",
  605. [16] = "MTS carveout violation",
  606. [17] = "Generalized carveout violation",
  607. [20] = "Route Sanity error",
  608. };
  609. const char *const tegra_mc_error_names[8] = {
  610. [2] = "EMEM decode error",
  611. [3] = "TrustZone violation",
  612. [4] = "Carveout violation",
  613. [6] = "SMMU translation error",
  614. };
  615. struct icc_node *tegra_mc_icc_xlate(const struct of_phandle_args *spec, void *data)
  616. {
  617. struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
  618. struct icc_node *node;
  619. list_for_each_entry(node, &mc->provider.nodes, node_list) {
  620. if (node->id == spec->args[0])
  621. return node;
  622. }
  623. /*
  624. * If a client driver calls devm_of_icc_get() before the MC driver
  625. * is probed, then return EPROBE_DEFER to the client driver.
  626. */
  627. return ERR_PTR(-EPROBE_DEFER);
  628. }
  629. static int tegra_mc_icc_get(struct icc_node *node, u32 *average, u32 *peak)
  630. {
  631. *average = 0;
  632. *peak = 0;
  633. return 0;
  634. }
  635. static int tegra_mc_icc_set(struct icc_node *src, struct icc_node *dst)
  636. {
  637. return 0;
  638. }
  639. const struct tegra_mc_icc_ops tegra_mc_icc_ops = {
  640. .xlate = tegra_mc_icc_xlate,
  641. .aggregate = icc_std_aggregate,
  642. .get_bw = tegra_mc_icc_get,
  643. .set = tegra_mc_icc_set,
  644. };
  645. /*
  646. * Memory Controller (MC) has few Memory Clients that are issuing memory
  647. * bandwidth allocation requests to the MC interconnect provider. The MC
  648. * provider aggregates the requests and then sends the aggregated request
  649. * up to the External Memory Controller (EMC) interconnect provider which
  650. * re-configures hardware interface to External Memory (EMEM) in accordance
  651. * to the required bandwidth. Each MC interconnect node represents an
  652. * individual Memory Client.
  653. *
  654. * Memory interconnect topology:
  655. *
  656. * +----+
  657. * +--------+ | |
  658. * | TEXSRD +--->+ |
  659. * +--------+ | |
  660. * | | +-----+ +------+
  661. * ... | MC +--->+ EMC +--->+ EMEM |
  662. * | | +-----+ +------+
  663. * +--------+ | |
  664. * | DISP.. +--->+ |
  665. * +--------+ | |
  666. * +----+
  667. */
  668. static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
  669. {
  670. struct icc_node *node;
  671. unsigned int i;
  672. int err;
  673. /* older device-trees don't have interconnect properties */
  674. if (!device_property_present(mc->dev, "#interconnect-cells") ||
  675. !mc->soc->icc_ops)
  676. return 0;
  677. mc->provider.dev = mc->dev;
  678. mc->provider.data = &mc->provider;
  679. mc->provider.set = mc->soc->icc_ops->set;
  680. mc->provider.aggregate = mc->soc->icc_ops->aggregate;
  681. mc->provider.get_bw = mc->soc->icc_ops->get_bw;
  682. mc->provider.xlate = mc->soc->icc_ops->xlate;
  683. mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
  684. icc_provider_init(&mc->provider);
  685. /* create Memory Controller node */
  686. node = icc_node_create(TEGRA_ICC_MC);
  687. if (IS_ERR(node))
  688. return PTR_ERR(node);
  689. node->name = "Memory Controller";
  690. icc_node_add(node, &mc->provider);
  691. /* link Memory Controller to External Memory Controller */
  692. err = icc_link_create(node, TEGRA_ICC_EMC);
  693. if (err)
  694. goto remove_nodes;
  695. for (i = 0; i < mc->soc->num_clients; i++) {
  696. /* create MC client node */
  697. node = icc_node_create(mc->soc->clients[i].id);
  698. if (IS_ERR(node)) {
  699. err = PTR_ERR(node);
  700. goto remove_nodes;
  701. }
  702. node->name = mc->soc->clients[i].name;
  703. icc_node_add(node, &mc->provider);
  704. /* link Memory Client to Memory Controller */
  705. err = icc_link_create(node, TEGRA_ICC_MC);
  706. if (err)
  707. goto remove_nodes;
  708. node->data = (struct tegra_mc_client *)&(mc->soc->clients[i]);
  709. }
  710. err = icc_provider_register(&mc->provider);
  711. if (err)
  712. goto remove_nodes;
  713. return 0;
  714. remove_nodes:
  715. icc_nodes_remove(&mc->provider);
  716. return err;
  717. }
  718. static void tegra_mc_num_channel_enabled(struct tegra_mc *mc)
  719. {
  720. unsigned int i;
  721. u32 value;
  722. value = mc_ch_readl(mc, 0, MC_EMEM_ADR_CFG_CHANNEL_ENABLE);
  723. if (value <= 0) {
  724. mc->num_channels = mc->soc->num_channels;
  725. return;
  726. }
  727. for (i = 0; i < 32; i++) {
  728. if (value & BIT(i))
  729. mc->num_channels++;
  730. }
  731. }
  732. static int tegra_mc_probe(struct platform_device *pdev)
  733. {
  734. struct tegra_mc *mc;
  735. u64 mask;
  736. int err;
  737. mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
  738. if (!mc)
  739. return -ENOMEM;
  740. platform_set_drvdata(pdev, mc);
  741. spin_lock_init(&mc->lock);
  742. mc->soc = of_device_get_match_data(&pdev->dev);
  743. mc->dev = &pdev->dev;
  744. mask = DMA_BIT_MASK(mc->soc->num_address_bits);
  745. err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
  746. if (err < 0) {
  747. dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
  748. return err;
  749. }
  750. /* length of MC tick in nanoseconds */
  751. mc->tick = 30;
  752. mc->regs = devm_platform_ioremap_resource(pdev, 0);
  753. if (IS_ERR(mc->regs))
  754. return PTR_ERR(mc->regs);
  755. mc->debugfs.root = debugfs_create_dir("mc", NULL);
  756. if (mc->soc->ops && mc->soc->ops->probe) {
  757. err = mc->soc->ops->probe(mc);
  758. if (err < 0)
  759. return err;
  760. }
  761. tegra_mc_num_channel_enabled(mc);
  762. if (mc->soc->ops && mc->soc->ops->handle_irq) {
  763. mc->irq = platform_get_irq(pdev, 0);
  764. if (mc->irq < 0)
  765. return mc->irq;
  766. WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
  767. if (mc->soc->num_channels)
  768. mc_ch_writel(mc, MC_BROADCAST_CHANNEL, mc->soc->intmask,
  769. MC_INTMASK);
  770. else
  771. mc_writel(mc, mc->soc->intmask, MC_INTMASK);
  772. err = devm_request_irq(&pdev->dev, mc->irq, mc->soc->ops->handle_irq, 0,
  773. dev_name(&pdev->dev), mc);
  774. if (err < 0) {
  775. dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq,
  776. err);
  777. return err;
  778. }
  779. }
  780. if (mc->soc->reset_ops) {
  781. err = tegra_mc_reset_setup(mc);
  782. if (err < 0)
  783. dev_err(&pdev->dev, "failed to register reset controller: %d\n", err);
  784. }
  785. err = tegra_mc_interconnect_setup(mc);
  786. if (err < 0)
  787. dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
  788. err);
  789. if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
  790. mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
  791. if (IS_ERR(mc->smmu)) {
  792. dev_err(&pdev->dev, "failed to probe SMMU: %ld\n",
  793. PTR_ERR(mc->smmu));
  794. mc->smmu = NULL;
  795. }
  796. }
  797. return 0;
  798. }
  799. static void tegra_mc_sync_state(struct device *dev)
  800. {
  801. struct tegra_mc *mc = dev_get_drvdata(dev);
  802. /* check whether ICC provider is registered */
  803. if (mc->provider.dev == dev)
  804. icc_sync_state(dev);
  805. }
  806. static struct platform_driver tegra_mc_driver = {
  807. .driver = {
  808. .name = "tegra-mc",
  809. .of_match_table = tegra_mc_of_match,
  810. .suppress_bind_attrs = true,
  811. .sync_state = tegra_mc_sync_state,
  812. },
  813. .prevent_deferred_probe = true,
  814. .probe = tegra_mc_probe,
  815. };
  816. static int tegra_mc_init(void)
  817. {
  818. return platform_driver_register(&tegra_mc_driver);
  819. }
  820. arch_initcall(tegra_mc_init);
  821. MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
  822. MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");