msm_iommu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  3. *
  4. * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/errno.h>
  11. #include <linux/io.h>
  12. #include <linux/io-pgtable.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/list.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/slab.h>
  17. #include <linux/iommu.h>
  18. #include <linux/clk.h>
  19. #include <linux/err.h>
  20. #include <asm/cacheflush.h>
  21. #include <linux/sizes.h>
  22. #include "msm_iommu_hw-8xxx.h"
  23. #include "msm_iommu.h"
  24. #define MRC(reg, processor, op1, crn, crm, op2) \
  25. __asm__ __volatile__ ( \
  26. " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
  27. : "=r" (reg))
  28. /* bitmap of the page sizes currently supported */
  29. #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  30. static DEFINE_SPINLOCK(msm_iommu_lock);
  31. static LIST_HEAD(qcom_iommu_devices);
  32. static struct iommu_ops msm_iommu_ops;
  33. struct msm_priv {
  34. struct list_head list_attached;
  35. struct iommu_domain domain;
  36. struct io_pgtable_cfg cfg;
  37. struct io_pgtable_ops *iop;
  38. struct device *dev;
  39. spinlock_t pgtlock; /* pagetable lock */
  40. };
  41. static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
  42. {
  43. return container_of(dom, struct msm_priv, domain);
  44. }
  45. static int __enable_clocks(struct msm_iommu_dev *iommu)
  46. {
  47. int ret;
  48. ret = clk_enable(iommu->pclk);
  49. if (ret)
  50. goto fail;
  51. if (iommu->clk) {
  52. ret = clk_enable(iommu->clk);
  53. if (ret)
  54. clk_disable(iommu->pclk);
  55. }
  56. fail:
  57. return ret;
  58. }
  59. static void __disable_clocks(struct msm_iommu_dev *iommu)
  60. {
  61. if (iommu->clk)
  62. clk_disable(iommu->clk);
  63. clk_disable(iommu->pclk);
  64. }
  65. static void msm_iommu_reset(void __iomem *base, int ncb)
  66. {
  67. int ctx;
  68. SET_RPUE(base, 0);
  69. SET_RPUEIE(base, 0);
  70. SET_ESRRESTORE(base, 0);
  71. SET_TBE(base, 0);
  72. SET_CR(base, 0);
  73. SET_SPDMBE(base, 0);
  74. SET_TESTBUSCR(base, 0);
  75. SET_TLBRSW(base, 0);
  76. SET_GLOBAL_TLBIALL(base, 0);
  77. SET_RPU_ACR(base, 0);
  78. SET_TLBLKCRWE(base, 1);
  79. for (ctx = 0; ctx < ncb; ctx++) {
  80. SET_BPRCOSH(base, ctx, 0);
  81. SET_BPRCISH(base, ctx, 0);
  82. SET_BPRCNSH(base, ctx, 0);
  83. SET_BPSHCFG(base, ctx, 0);
  84. SET_BPMTCFG(base, ctx, 0);
  85. SET_ACTLR(base, ctx, 0);
  86. SET_SCTLR(base, ctx, 0);
  87. SET_FSRRESTORE(base, ctx, 0);
  88. SET_TTBR0(base, ctx, 0);
  89. SET_TTBR1(base, ctx, 0);
  90. SET_TTBCR(base, ctx, 0);
  91. SET_BFBCR(base, ctx, 0);
  92. SET_PAR(base, ctx, 0);
  93. SET_FAR(base, ctx, 0);
  94. SET_CTX_TLBIALL(base, ctx, 0);
  95. SET_TLBFLPTER(base, ctx, 0);
  96. SET_TLBSLPTER(base, ctx, 0);
  97. SET_TLBLKCR(base, ctx, 0);
  98. SET_CONTEXTIDR(base, ctx, 0);
  99. }
  100. }
  101. static void __flush_iotlb(void *cookie)
  102. {
  103. struct msm_priv *priv = cookie;
  104. struct msm_iommu_dev *iommu = NULL;
  105. struct msm_iommu_ctx_dev *master;
  106. int ret = 0;
  107. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  108. ret = __enable_clocks(iommu);
  109. if (ret)
  110. goto fail;
  111. list_for_each_entry(master, &iommu->ctx_list, list)
  112. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  113. __disable_clocks(iommu);
  114. }
  115. fail:
  116. return;
  117. }
  118. static void __flush_iotlb_range(unsigned long iova, size_t size,
  119. size_t granule, bool leaf, void *cookie)
  120. {
  121. struct msm_priv *priv = cookie;
  122. struct msm_iommu_dev *iommu = NULL;
  123. struct msm_iommu_ctx_dev *master;
  124. int ret = 0;
  125. int temp_size;
  126. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  127. ret = __enable_clocks(iommu);
  128. if (ret)
  129. goto fail;
  130. list_for_each_entry(master, &iommu->ctx_list, list) {
  131. temp_size = size;
  132. do {
  133. iova &= TLBIVA_VA;
  134. iova |= GET_CONTEXTIDR_ASID(iommu->base,
  135. master->num);
  136. SET_TLBIVA(iommu->base, master->num, iova);
  137. iova += granule;
  138. } while (temp_size -= granule);
  139. }
  140. __disable_clocks(iommu);
  141. }
  142. fail:
  143. return;
  144. }
  145. static void __flush_iotlb_walk(unsigned long iova, size_t size,
  146. size_t granule, void *cookie)
  147. {
  148. __flush_iotlb_range(iova, size, granule, false, cookie);
  149. }
  150. static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
  151. unsigned long iova, size_t granule, void *cookie)
  152. {
  153. __flush_iotlb_range(iova, granule, granule, true, cookie);
  154. }
  155. static const struct iommu_flush_ops msm_iommu_flush_ops = {
  156. .tlb_flush_all = __flush_iotlb,
  157. .tlb_flush_walk = __flush_iotlb_walk,
  158. .tlb_add_page = __flush_iotlb_page,
  159. };
  160. static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
  161. {
  162. int idx;
  163. do {
  164. idx = find_next_zero_bit(map, end, start);
  165. if (idx == end)
  166. return -ENOSPC;
  167. } while (test_and_set_bit(idx, map));
  168. return idx;
  169. }
  170. static void msm_iommu_free_ctx(unsigned long *map, int idx)
  171. {
  172. clear_bit(idx, map);
  173. }
  174. static void config_mids(struct msm_iommu_dev *iommu,
  175. struct msm_iommu_ctx_dev *master)
  176. {
  177. int mid, ctx, i;
  178. for (i = 0; i < master->num_mids; i++) {
  179. mid = master->mids[i];
  180. ctx = master->num;
  181. SET_M2VCBR_N(iommu->base, mid, 0);
  182. SET_CBACR_N(iommu->base, ctx, 0);
  183. /* Set VMID = 0 */
  184. SET_VMID(iommu->base, mid, 0);
  185. /* Set the context number for that MID to this context */
  186. SET_CBNDX(iommu->base, mid, ctx);
  187. /* Set MID associated with this context bank to 0*/
  188. SET_CBVMID(iommu->base, ctx, 0);
  189. /* Set the ASID for TLB tagging for this context */
  190. SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
  191. /* Set security bit override to be Non-secure */
  192. SET_NSCFG(iommu->base, mid, 3);
  193. }
  194. }
  195. static void __reset_context(void __iomem *base, int ctx)
  196. {
  197. SET_BPRCOSH(base, ctx, 0);
  198. SET_BPRCISH(base, ctx, 0);
  199. SET_BPRCNSH(base, ctx, 0);
  200. SET_BPSHCFG(base, ctx, 0);
  201. SET_BPMTCFG(base, ctx, 0);
  202. SET_ACTLR(base, ctx, 0);
  203. SET_SCTLR(base, ctx, 0);
  204. SET_FSRRESTORE(base, ctx, 0);
  205. SET_TTBR0(base, ctx, 0);
  206. SET_TTBR1(base, ctx, 0);
  207. SET_TTBCR(base, ctx, 0);
  208. SET_BFBCR(base, ctx, 0);
  209. SET_PAR(base, ctx, 0);
  210. SET_FAR(base, ctx, 0);
  211. SET_CTX_TLBIALL(base, ctx, 0);
  212. SET_TLBFLPTER(base, ctx, 0);
  213. SET_TLBSLPTER(base, ctx, 0);
  214. SET_TLBLKCR(base, ctx, 0);
  215. }
  216. static void __program_context(void __iomem *base, int ctx,
  217. struct msm_priv *priv)
  218. {
  219. __reset_context(base, ctx);
  220. /* Turn on TEX Remap */
  221. SET_TRE(base, ctx, 1);
  222. SET_AFE(base, ctx, 1);
  223. /* Set up HTW mode */
  224. /* TLB miss configuration: perform HTW on miss */
  225. SET_TLBMCFG(base, ctx, 0x3);
  226. /* V2P configuration: HTW for access */
  227. SET_V2PCFG(base, ctx, 0x3);
  228. SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
  229. SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
  230. SET_TTBR1(base, ctx, 0);
  231. /* Set prrr and nmrr */
  232. SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
  233. SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
  234. /* Invalidate the TLB for this context */
  235. SET_CTX_TLBIALL(base, ctx, 0);
  236. /* Set interrupt number to "secure" interrupt */
  237. SET_IRPTNDX(base, ctx, 0);
  238. /* Enable context fault interrupt */
  239. SET_CFEIE(base, ctx, 1);
  240. /* Stall access on a context fault and let the handler deal with it */
  241. SET_CFCFG(base, ctx, 1);
  242. /* Redirect all cacheable requests to L2 slave port. */
  243. SET_RCISH(base, ctx, 1);
  244. SET_RCOSH(base, ctx, 1);
  245. SET_RCNSH(base, ctx, 1);
  246. /* Turn on BFB prefetch */
  247. SET_BFBDFE(base, ctx, 1);
  248. /* Enable the MMU */
  249. SET_M(base, ctx, 1);
  250. }
  251. static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
  252. {
  253. struct msm_priv *priv;
  254. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  255. if (!priv)
  256. goto fail_nomem;
  257. INIT_LIST_HEAD(&priv->list_attached);
  258. priv->domain.geometry.aperture_start = 0;
  259. priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
  260. priv->domain.geometry.force_aperture = true;
  261. return &priv->domain;
  262. fail_nomem:
  263. kfree(priv);
  264. return NULL;
  265. }
  266. static void msm_iommu_domain_free(struct iommu_domain *domain)
  267. {
  268. struct msm_priv *priv;
  269. unsigned long flags;
  270. spin_lock_irqsave(&msm_iommu_lock, flags);
  271. priv = to_msm_priv(domain);
  272. kfree(priv);
  273. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  274. }
  275. static int msm_iommu_domain_config(struct msm_priv *priv)
  276. {
  277. spin_lock_init(&priv->pgtlock);
  278. priv->cfg = (struct io_pgtable_cfg) {
  279. .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
  280. .ias = 32,
  281. .oas = 32,
  282. .tlb = &msm_iommu_flush_ops,
  283. .iommu_dev = priv->dev,
  284. };
  285. priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
  286. if (!priv->iop) {
  287. dev_err(priv->dev, "Failed to allocate pgtable\n");
  288. return -EINVAL;
  289. }
  290. msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
  291. return 0;
  292. }
  293. /* Must be called under msm_iommu_lock */
  294. static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
  295. {
  296. struct msm_iommu_dev *iommu, *ret = NULL;
  297. struct msm_iommu_ctx_dev *master;
  298. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  299. master = list_first_entry(&iommu->ctx_list,
  300. struct msm_iommu_ctx_dev,
  301. list);
  302. if (master->of_node == dev->of_node) {
  303. ret = iommu;
  304. break;
  305. }
  306. }
  307. return ret;
  308. }
  309. static struct iommu_device *msm_iommu_probe_device(struct device *dev)
  310. {
  311. struct msm_iommu_dev *iommu;
  312. unsigned long flags;
  313. spin_lock_irqsave(&msm_iommu_lock, flags);
  314. iommu = find_iommu_for_dev(dev);
  315. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  316. if (!iommu)
  317. return ERR_PTR(-ENODEV);
  318. return &iommu->iommu;
  319. }
  320. static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  321. {
  322. int ret = 0;
  323. unsigned long flags;
  324. struct msm_iommu_dev *iommu;
  325. struct msm_priv *priv = to_msm_priv(domain);
  326. struct msm_iommu_ctx_dev *master;
  327. priv->dev = dev;
  328. msm_iommu_domain_config(priv);
  329. spin_lock_irqsave(&msm_iommu_lock, flags);
  330. list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
  331. master = list_first_entry(&iommu->ctx_list,
  332. struct msm_iommu_ctx_dev,
  333. list);
  334. if (master->of_node == dev->of_node) {
  335. ret = __enable_clocks(iommu);
  336. if (ret)
  337. goto fail;
  338. list_for_each_entry(master, &iommu->ctx_list, list) {
  339. if (master->num) {
  340. dev_err(dev, "domain already attached");
  341. ret = -EEXIST;
  342. goto fail;
  343. }
  344. master->num =
  345. msm_iommu_alloc_ctx(iommu->context_map,
  346. 0, iommu->ncb);
  347. if (IS_ERR_VALUE(master->num)) {
  348. ret = -ENODEV;
  349. goto fail;
  350. }
  351. config_mids(iommu, master);
  352. __program_context(iommu->base, master->num,
  353. priv);
  354. }
  355. __disable_clocks(iommu);
  356. list_add(&iommu->dom_node, &priv->list_attached);
  357. }
  358. }
  359. fail:
  360. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  361. return ret;
  362. }
  363. static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
  364. struct device *dev)
  365. {
  366. struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
  367. struct msm_priv *priv;
  368. unsigned long flags;
  369. struct msm_iommu_dev *iommu;
  370. struct msm_iommu_ctx_dev *master;
  371. int ret = 0;
  372. if (domain == identity_domain || !domain)
  373. return 0;
  374. priv = to_msm_priv(domain);
  375. free_io_pgtable_ops(priv->iop);
  376. spin_lock_irqsave(&msm_iommu_lock, flags);
  377. list_for_each_entry(iommu, &priv->list_attached, dom_node) {
  378. ret = __enable_clocks(iommu);
  379. if (ret)
  380. goto fail;
  381. list_for_each_entry(master, &iommu->ctx_list, list) {
  382. msm_iommu_free_ctx(iommu->context_map, master->num);
  383. __reset_context(iommu->base, master->num);
  384. }
  385. __disable_clocks(iommu);
  386. }
  387. fail:
  388. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  389. return ret;
  390. }
  391. static struct iommu_domain_ops msm_iommu_identity_ops = {
  392. .attach_dev = msm_iommu_identity_attach,
  393. };
  394. static struct iommu_domain msm_iommu_identity_domain = {
  395. .type = IOMMU_DOMAIN_IDENTITY,
  396. .ops = &msm_iommu_identity_ops,
  397. };
  398. static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
  399. phys_addr_t pa, size_t pgsize, size_t pgcount,
  400. int prot, gfp_t gfp, size_t *mapped)
  401. {
  402. struct msm_priv *priv = to_msm_priv(domain);
  403. unsigned long flags;
  404. int ret;
  405. spin_lock_irqsave(&priv->pgtlock, flags);
  406. ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
  407. GFP_ATOMIC, mapped);
  408. spin_unlock_irqrestore(&priv->pgtlock, flags);
  409. return ret;
  410. }
  411. static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
  412. size_t size)
  413. {
  414. struct msm_priv *priv = to_msm_priv(domain);
  415. __flush_iotlb_range(iova, size, SZ_4K, false, priv);
  416. return 0;
  417. }
  418. static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
  419. size_t pgsize, size_t pgcount,
  420. struct iommu_iotlb_gather *gather)
  421. {
  422. struct msm_priv *priv = to_msm_priv(domain);
  423. unsigned long flags;
  424. size_t ret;
  425. spin_lock_irqsave(&priv->pgtlock, flags);
  426. ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
  427. spin_unlock_irqrestore(&priv->pgtlock, flags);
  428. return ret;
  429. }
  430. static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
  431. dma_addr_t va)
  432. {
  433. struct msm_priv *priv;
  434. struct msm_iommu_dev *iommu;
  435. struct msm_iommu_ctx_dev *master;
  436. unsigned int par;
  437. unsigned long flags;
  438. phys_addr_t ret = 0;
  439. spin_lock_irqsave(&msm_iommu_lock, flags);
  440. priv = to_msm_priv(domain);
  441. iommu = list_first_entry(&priv->list_attached,
  442. struct msm_iommu_dev, dom_node);
  443. if (list_empty(&iommu->ctx_list))
  444. goto fail;
  445. master = list_first_entry(&iommu->ctx_list,
  446. struct msm_iommu_ctx_dev, list);
  447. if (!master)
  448. goto fail;
  449. ret = __enable_clocks(iommu);
  450. if (ret)
  451. goto fail;
  452. /* Invalidate context TLB */
  453. SET_CTX_TLBIALL(iommu->base, master->num, 0);
  454. SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
  455. par = GET_PAR(iommu->base, master->num);
  456. /* We are dealing with a supersection */
  457. if (GET_NOFAULT_SS(iommu->base, master->num))
  458. ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
  459. else /* Upper 20 bits from PAR, lower 12 from VA */
  460. ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
  461. if (GET_FAULT(iommu->base, master->num))
  462. ret = 0;
  463. __disable_clocks(iommu);
  464. fail:
  465. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  466. return ret;
  467. }
  468. static void print_ctx_regs(void __iomem *base, int ctx)
  469. {
  470. unsigned int fsr = GET_FSR(base, ctx);
  471. pr_err("FAR = %08x PAR = %08x\n",
  472. GET_FAR(base, ctx), GET_PAR(base, ctx));
  473. pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
  474. (fsr & 0x02) ? "TF " : "",
  475. (fsr & 0x04) ? "AFF " : "",
  476. (fsr & 0x08) ? "APF " : "",
  477. (fsr & 0x10) ? "TLBMF " : "",
  478. (fsr & 0x20) ? "HTWDEEF " : "",
  479. (fsr & 0x40) ? "HTWSEEF " : "",
  480. (fsr & 0x80) ? "MHF " : "",
  481. (fsr & 0x10000) ? "SL " : "",
  482. (fsr & 0x40000000) ? "SS " : "",
  483. (fsr & 0x80000000) ? "MULTI " : "");
  484. pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
  485. GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
  486. pr_err("TTBR0 = %08x TTBR1 = %08x\n",
  487. GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
  488. pr_err("SCTLR = %08x ACTLR = %08x\n",
  489. GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
  490. }
  491. static int insert_iommu_master(struct device *dev,
  492. struct msm_iommu_dev **iommu,
  493. const struct of_phandle_args *spec)
  494. {
  495. struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
  496. int sid;
  497. if (list_empty(&(*iommu)->ctx_list)) {
  498. master = kzalloc(sizeof(*master), GFP_ATOMIC);
  499. if (!master) {
  500. dev_err(dev, "Failed to allocate iommu_master\n");
  501. return -ENOMEM;
  502. }
  503. master->of_node = dev->of_node;
  504. list_add(&master->list, &(*iommu)->ctx_list);
  505. dev_iommu_priv_set(dev, master);
  506. }
  507. for (sid = 0; sid < master->num_mids; sid++)
  508. if (master->mids[sid] == spec->args[0]) {
  509. dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
  510. sid);
  511. return 0;
  512. }
  513. master->mids[master->num_mids++] = spec->args[0];
  514. return 0;
  515. }
  516. static int qcom_iommu_of_xlate(struct device *dev,
  517. const struct of_phandle_args *spec)
  518. {
  519. struct msm_iommu_dev *iommu = NULL, *iter;
  520. unsigned long flags;
  521. int ret = 0;
  522. spin_lock_irqsave(&msm_iommu_lock, flags);
  523. list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
  524. if (iter->dev->of_node == spec->np) {
  525. iommu = iter;
  526. break;
  527. }
  528. }
  529. if (!iommu) {
  530. ret = -ENODEV;
  531. goto fail;
  532. }
  533. ret = insert_iommu_master(dev, &iommu, spec);
  534. fail:
  535. spin_unlock_irqrestore(&msm_iommu_lock, flags);
  536. return ret;
  537. }
  538. irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
  539. {
  540. struct msm_iommu_dev *iommu = dev_id;
  541. unsigned int fsr;
  542. int i, ret;
  543. spin_lock(&msm_iommu_lock);
  544. if (!iommu) {
  545. pr_err("Invalid device ID in context interrupt handler\n");
  546. goto fail;
  547. }
  548. pr_err("Unexpected IOMMU page fault!\n");
  549. pr_err("base = %08x\n", (unsigned int)iommu->base);
  550. ret = __enable_clocks(iommu);
  551. if (ret)
  552. goto fail;
  553. for (i = 0; i < iommu->ncb; i++) {
  554. fsr = GET_FSR(iommu->base, i);
  555. if (fsr) {
  556. pr_err("Fault occurred in context %d.\n", i);
  557. pr_err("Interesting registers:\n");
  558. print_ctx_regs(iommu->base, i);
  559. SET_FSR(iommu->base, i, 0x4000000F);
  560. }
  561. }
  562. __disable_clocks(iommu);
  563. fail:
  564. spin_unlock(&msm_iommu_lock);
  565. return 0;
  566. }
  567. static struct iommu_ops msm_iommu_ops = {
  568. .identity_domain = &msm_iommu_identity_domain,
  569. .domain_alloc_paging = msm_iommu_domain_alloc_paging,
  570. .probe_device = msm_iommu_probe_device,
  571. .device_group = generic_device_group,
  572. .pgsize_bitmap = MSM_IOMMU_PGSIZES,
  573. .of_xlate = qcom_iommu_of_xlate,
  574. .default_domain_ops = &(const struct iommu_domain_ops) {
  575. .attach_dev = msm_iommu_attach_dev,
  576. .map_pages = msm_iommu_map,
  577. .unmap_pages = msm_iommu_unmap,
  578. /*
  579. * Nothing is needed here, the barrier to guarantee
  580. * completion of the tlb sync operation is implicitly
  581. * taken care when the iommu client does a writel before
  582. * kick starting the other master.
  583. */
  584. .iotlb_sync = NULL,
  585. .iotlb_sync_map = msm_iommu_sync_map,
  586. .iova_to_phys = msm_iommu_iova_to_phys,
  587. .free = msm_iommu_domain_free,
  588. }
  589. };
  590. static int msm_iommu_probe(struct platform_device *pdev)
  591. {
  592. struct resource *r;
  593. resource_size_t ioaddr;
  594. struct msm_iommu_dev *iommu;
  595. int ret, par, val;
  596. iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
  597. if (!iommu)
  598. return -ENODEV;
  599. iommu->dev = &pdev->dev;
  600. INIT_LIST_HEAD(&iommu->ctx_list);
  601. iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
  602. if (IS_ERR(iommu->pclk))
  603. return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
  604. "could not get smmu_pclk\n");
  605. ret = clk_prepare(iommu->pclk);
  606. if (ret)
  607. return dev_err_probe(iommu->dev, ret,
  608. "could not prepare smmu_pclk\n");
  609. iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
  610. if (IS_ERR(iommu->clk)) {
  611. clk_unprepare(iommu->pclk);
  612. return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
  613. "could not get iommu_clk\n");
  614. }
  615. ret = clk_prepare(iommu->clk);
  616. if (ret) {
  617. clk_unprepare(iommu->pclk);
  618. return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
  619. }
  620. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  621. iommu->base = devm_ioremap_resource(iommu->dev, r);
  622. if (IS_ERR(iommu->base)) {
  623. ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
  624. goto fail;
  625. }
  626. ioaddr = r->start;
  627. iommu->irq = platform_get_irq(pdev, 0);
  628. if (iommu->irq < 0) {
  629. ret = -ENODEV;
  630. goto fail;
  631. }
  632. ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
  633. if (ret) {
  634. dev_err(iommu->dev, "could not get ncb\n");
  635. goto fail;
  636. }
  637. iommu->ncb = val;
  638. msm_iommu_reset(iommu->base, iommu->ncb);
  639. SET_M(iommu->base, 0, 1);
  640. SET_PAR(iommu->base, 0, 0);
  641. SET_V2PCFG(iommu->base, 0, 1);
  642. SET_V2PPR(iommu->base, 0, 0);
  643. par = GET_PAR(iommu->base, 0);
  644. SET_V2PCFG(iommu->base, 0, 0);
  645. SET_M(iommu->base, 0, 0);
  646. if (!par) {
  647. pr_err("Invalid PAR value detected\n");
  648. ret = -ENODEV;
  649. goto fail;
  650. }
  651. ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
  652. msm_iommu_fault_handler,
  653. IRQF_ONESHOT | IRQF_SHARED,
  654. "msm_iommu_secure_irpt_handler",
  655. iommu);
  656. if (ret) {
  657. pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
  658. goto fail;
  659. }
  660. list_add(&iommu->dev_node, &qcom_iommu_devices);
  661. ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
  662. "msm-smmu.%pa", &ioaddr);
  663. if (ret) {
  664. pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
  665. goto fail;
  666. }
  667. ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
  668. if (ret) {
  669. pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
  670. goto fail;
  671. }
  672. pr_info("device mapped at %p, irq %d with %d ctx banks\n",
  673. iommu->base, iommu->irq, iommu->ncb);
  674. return ret;
  675. fail:
  676. clk_unprepare(iommu->clk);
  677. clk_unprepare(iommu->pclk);
  678. return ret;
  679. }
  680. static const struct of_device_id msm_iommu_dt_match[] = {
  681. { .compatible = "qcom,apq8064-iommu" },
  682. {}
  683. };
  684. static void msm_iommu_remove(struct platform_device *pdev)
  685. {
  686. struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
  687. clk_unprepare(iommu->clk);
  688. clk_unprepare(iommu->pclk);
  689. }
  690. static struct platform_driver msm_iommu_driver = {
  691. .driver = {
  692. .name = "msm_iommu",
  693. .of_match_table = msm_iommu_dt_match,
  694. },
  695. .probe = msm_iommu_probe,
  696. .remove_new = msm_iommu_remove,
  697. };
  698. builtin_platform_driver(msm_iommu_driver);