fsl_pamu_domain.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16. * Author: Varun Sethi <varun.sethi@freescale.com>
  17. *
  18. */
  19. #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
  20. #include "fsl_pamu_domain.h"
  21. #include <sysdev/fsl_pci.h>
  22. /*
  23. * Global spinlock that needs to be held while
  24. * configuring PAMU.
  25. */
  26. static DEFINE_SPINLOCK(iommu_lock);
  27. static struct kmem_cache *fsl_pamu_domain_cache;
  28. static struct kmem_cache *iommu_devinfo_cache;
  29. static DEFINE_SPINLOCK(device_domain_lock);
  30. struct iommu_device pamu_iommu; /* IOMMU core code handle */
  31. static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
  32. {
  33. return container_of(dom, struct fsl_dma_domain, iommu_domain);
  34. }
  35. static int __init iommu_init_mempool(void)
  36. {
  37. fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
  38. sizeof(struct fsl_dma_domain),
  39. 0,
  40. SLAB_HWCACHE_ALIGN,
  41. NULL);
  42. if (!fsl_pamu_domain_cache) {
  43. pr_debug("Couldn't create fsl iommu_domain cache\n");
  44. return -ENOMEM;
  45. }
  46. iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  47. sizeof(struct device_domain_info),
  48. 0,
  49. SLAB_HWCACHE_ALIGN,
  50. NULL);
  51. if (!iommu_devinfo_cache) {
  52. pr_debug("Couldn't create devinfo cache\n");
  53. kmem_cache_destroy(fsl_pamu_domain_cache);
  54. return -ENOMEM;
  55. }
  56. return 0;
  57. }
  58. static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
  59. {
  60. u32 win_cnt = dma_domain->win_cnt;
  61. struct dma_window *win_ptr = &dma_domain->win_arr[0];
  62. struct iommu_domain_geometry *geom;
  63. geom = &dma_domain->iommu_domain.geometry;
  64. if (!win_cnt || !dma_domain->geom_size) {
  65. pr_debug("Number of windows/geometry not configured for the domain\n");
  66. return 0;
  67. }
  68. if (win_cnt > 1) {
  69. u64 subwin_size;
  70. dma_addr_t subwin_iova;
  71. u32 wnd;
  72. subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
  73. subwin_iova = iova & ~(subwin_size - 1);
  74. wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
  75. win_ptr = &dma_domain->win_arr[wnd];
  76. }
  77. if (win_ptr->valid)
  78. return win_ptr->paddr + (iova & (win_ptr->size - 1));
  79. return 0;
  80. }
  81. static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
  82. {
  83. struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
  84. int i, ret;
  85. unsigned long rpn, flags;
  86. for (i = 0; i < dma_domain->win_cnt; i++) {
  87. if (sub_win_ptr[i].valid) {
  88. rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
  89. spin_lock_irqsave(&iommu_lock, flags);
  90. ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
  91. sub_win_ptr[i].size,
  92. ~(u32)0,
  93. rpn,
  94. dma_domain->snoop_id,
  95. dma_domain->stash_id,
  96. (i > 0) ? 1 : 0,
  97. sub_win_ptr[i].prot);
  98. spin_unlock_irqrestore(&iommu_lock, flags);
  99. if (ret) {
  100. pr_debug("SPAACE configuration failed for liodn %d\n",
  101. liodn);
  102. return ret;
  103. }
  104. }
  105. }
  106. return ret;
  107. }
  108. static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
  109. {
  110. int ret;
  111. struct dma_window *wnd = &dma_domain->win_arr[0];
  112. phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
  113. unsigned long flags;
  114. spin_lock_irqsave(&iommu_lock, flags);
  115. ret = pamu_config_ppaace(liodn, wnd_addr,
  116. wnd->size,
  117. ~(u32)0,
  118. wnd->paddr >> PAMU_PAGE_SHIFT,
  119. dma_domain->snoop_id, dma_domain->stash_id,
  120. 0, wnd->prot);
  121. spin_unlock_irqrestore(&iommu_lock, flags);
  122. if (ret)
  123. pr_debug("PAACE configuration failed for liodn %d\n", liodn);
  124. return ret;
  125. }
  126. /* Map the DMA window corresponding to the LIODN */
  127. static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
  128. {
  129. if (dma_domain->win_cnt > 1)
  130. return map_subwins(liodn, dma_domain);
  131. else
  132. return map_win(liodn, dma_domain);
  133. }
  134. /* Update window/subwindow mapping for the LIODN */
  135. static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  136. {
  137. int ret;
  138. struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
  139. unsigned long flags;
  140. spin_lock_irqsave(&iommu_lock, flags);
  141. if (dma_domain->win_cnt > 1) {
  142. ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
  143. wnd->size,
  144. ~(u32)0,
  145. wnd->paddr >> PAMU_PAGE_SHIFT,
  146. dma_domain->snoop_id,
  147. dma_domain->stash_id,
  148. (wnd_nr > 0) ? 1 : 0,
  149. wnd->prot);
  150. if (ret)
  151. pr_debug("Subwindow reconfiguration failed for liodn %d\n",
  152. liodn);
  153. } else {
  154. phys_addr_t wnd_addr;
  155. wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
  156. ret = pamu_config_ppaace(liodn, wnd_addr,
  157. wnd->size,
  158. ~(u32)0,
  159. wnd->paddr >> PAMU_PAGE_SHIFT,
  160. dma_domain->snoop_id, dma_domain->stash_id,
  161. 0, wnd->prot);
  162. if (ret)
  163. pr_debug("Window reconfiguration failed for liodn %d\n",
  164. liodn);
  165. }
  166. spin_unlock_irqrestore(&iommu_lock, flags);
  167. return ret;
  168. }
  169. static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
  170. u32 val)
  171. {
  172. int ret = 0, i;
  173. unsigned long flags;
  174. spin_lock_irqsave(&iommu_lock, flags);
  175. if (!dma_domain->win_arr) {
  176. pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
  177. liodn);
  178. spin_unlock_irqrestore(&iommu_lock, flags);
  179. return -EINVAL;
  180. }
  181. for (i = 0; i < dma_domain->win_cnt; i++) {
  182. ret = pamu_update_paace_stash(liodn, i, val);
  183. if (ret) {
  184. pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
  185. i, liodn);
  186. spin_unlock_irqrestore(&iommu_lock, flags);
  187. return ret;
  188. }
  189. }
  190. spin_unlock_irqrestore(&iommu_lock, flags);
  191. return ret;
  192. }
  193. /* Set the geometry parameters for a LIODN */
  194. static int pamu_set_liodn(int liodn, struct device *dev,
  195. struct fsl_dma_domain *dma_domain,
  196. struct iommu_domain_geometry *geom_attr,
  197. u32 win_cnt)
  198. {
  199. phys_addr_t window_addr, window_size;
  200. phys_addr_t subwin_size;
  201. int ret = 0, i;
  202. u32 omi_index = ~(u32)0;
  203. unsigned long flags;
  204. /*
  205. * Configure the omi_index at the geometry setup time.
  206. * This is a static value which depends on the type of
  207. * device and would not change thereafter.
  208. */
  209. get_ome_index(&omi_index, dev);
  210. window_addr = geom_attr->aperture_start;
  211. window_size = dma_domain->geom_size;
  212. spin_lock_irqsave(&iommu_lock, flags);
  213. ret = pamu_disable_liodn(liodn);
  214. if (!ret)
  215. ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
  216. 0, dma_domain->snoop_id,
  217. dma_domain->stash_id, win_cnt, 0);
  218. spin_unlock_irqrestore(&iommu_lock, flags);
  219. if (ret) {
  220. pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
  221. liodn, win_cnt);
  222. return ret;
  223. }
  224. if (win_cnt > 1) {
  225. subwin_size = window_size >> ilog2(win_cnt);
  226. for (i = 0; i < win_cnt; i++) {
  227. spin_lock_irqsave(&iommu_lock, flags);
  228. ret = pamu_disable_spaace(liodn, i);
  229. if (!ret)
  230. ret = pamu_config_spaace(liodn, win_cnt, i,
  231. subwin_size, omi_index,
  232. 0, dma_domain->snoop_id,
  233. dma_domain->stash_id,
  234. 0, 0);
  235. spin_unlock_irqrestore(&iommu_lock, flags);
  236. if (ret) {
  237. pr_debug("SPAACE configuration failed for liodn %d\n",
  238. liodn);
  239. return ret;
  240. }
  241. }
  242. }
  243. return ret;
  244. }
  245. static int check_size(u64 size, dma_addr_t iova)
  246. {
  247. /*
  248. * Size must be a power of two and at least be equal
  249. * to PAMU page size.
  250. */
  251. if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
  252. pr_debug("Size too small or not a power of two\n");
  253. return -EINVAL;
  254. }
  255. /* iova must be page size aligned */
  256. if (iova & (size - 1)) {
  257. pr_debug("Address is not aligned with window size\n");
  258. return -EINVAL;
  259. }
  260. return 0;
  261. }
  262. static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
  263. {
  264. struct fsl_dma_domain *domain;
  265. domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
  266. if (!domain)
  267. return NULL;
  268. domain->stash_id = ~(u32)0;
  269. domain->snoop_id = ~(u32)0;
  270. domain->win_cnt = pamu_get_max_subwin_cnt();
  271. domain->geom_size = 0;
  272. INIT_LIST_HEAD(&domain->devices);
  273. spin_lock_init(&domain->domain_lock);
  274. return domain;
  275. }
  276. static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
  277. {
  278. unsigned long flags;
  279. list_del(&info->link);
  280. spin_lock_irqsave(&iommu_lock, flags);
  281. if (win_cnt > 1)
  282. pamu_free_subwins(info->liodn);
  283. pamu_disable_liodn(info->liodn);
  284. spin_unlock_irqrestore(&iommu_lock, flags);
  285. spin_lock_irqsave(&device_domain_lock, flags);
  286. info->dev->archdata.iommu_domain = NULL;
  287. kmem_cache_free(iommu_devinfo_cache, info);
  288. spin_unlock_irqrestore(&device_domain_lock, flags);
  289. }
  290. static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
  291. {
  292. struct device_domain_info *info, *tmp;
  293. unsigned long flags;
  294. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  295. /* Remove the device from the domain device list */
  296. list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
  297. if (!dev || (info->dev == dev))
  298. remove_device_ref(info, dma_domain->win_cnt);
  299. }
  300. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  301. }
  302. static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
  303. {
  304. struct device_domain_info *info, *old_domain_info;
  305. unsigned long flags;
  306. spin_lock_irqsave(&device_domain_lock, flags);
  307. /*
  308. * Check here if the device is already attached to domain or not.
  309. * If the device is already attached to a domain detach it.
  310. */
  311. old_domain_info = dev->archdata.iommu_domain;
  312. if (old_domain_info && old_domain_info->domain != dma_domain) {
  313. spin_unlock_irqrestore(&device_domain_lock, flags);
  314. detach_device(dev, old_domain_info->domain);
  315. spin_lock_irqsave(&device_domain_lock, flags);
  316. }
  317. info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
  318. info->dev = dev;
  319. info->liodn = liodn;
  320. info->domain = dma_domain;
  321. list_add(&info->link, &dma_domain->devices);
  322. /*
  323. * In case of devices with multiple LIODNs just store
  324. * the info for the first LIODN as all
  325. * LIODNs share the same domain
  326. */
  327. if (!dev->archdata.iommu_domain)
  328. dev->archdata.iommu_domain = info;
  329. spin_unlock_irqrestore(&device_domain_lock, flags);
  330. }
  331. static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
  332. dma_addr_t iova)
  333. {
  334. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  335. if (iova < domain->geometry.aperture_start ||
  336. iova > domain->geometry.aperture_end)
  337. return 0;
  338. return get_phys_addr(dma_domain, iova);
  339. }
  340. static bool fsl_pamu_capable(enum iommu_cap cap)
  341. {
  342. return cap == IOMMU_CAP_CACHE_COHERENCY;
  343. }
  344. static void fsl_pamu_domain_free(struct iommu_domain *domain)
  345. {
  346. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  347. /* remove all the devices from the device list */
  348. detach_device(NULL, dma_domain);
  349. dma_domain->enabled = 0;
  350. dma_domain->mapped = 0;
  351. kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
  352. }
  353. static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
  354. {
  355. struct fsl_dma_domain *dma_domain;
  356. if (type != IOMMU_DOMAIN_UNMANAGED)
  357. return NULL;
  358. dma_domain = iommu_alloc_dma_domain();
  359. if (!dma_domain) {
  360. pr_debug("dma_domain allocation failed\n");
  361. return NULL;
  362. }
  363. /* defaul geometry 64 GB i.e. maximum system address */
  364. dma_domain->iommu_domain. geometry.aperture_start = 0;
  365. dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
  366. dma_domain->iommu_domain.geometry.force_aperture = true;
  367. return &dma_domain->iommu_domain;
  368. }
  369. /* Configure geometry settings for all LIODNs associated with domain */
  370. static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
  371. struct iommu_domain_geometry *geom_attr,
  372. u32 win_cnt)
  373. {
  374. struct device_domain_info *info;
  375. int ret = 0;
  376. list_for_each_entry(info, &dma_domain->devices, link) {
  377. ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
  378. geom_attr, win_cnt);
  379. if (ret)
  380. break;
  381. }
  382. return ret;
  383. }
  384. /* Update stash destination for all LIODNs associated with the domain */
  385. static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
  386. {
  387. struct device_domain_info *info;
  388. int ret = 0;
  389. list_for_each_entry(info, &dma_domain->devices, link) {
  390. ret = update_liodn_stash(info->liodn, dma_domain, val);
  391. if (ret)
  392. break;
  393. }
  394. return ret;
  395. }
  396. /* Update domain mappings for all LIODNs associated with the domain */
  397. static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  398. {
  399. struct device_domain_info *info;
  400. int ret = 0;
  401. list_for_each_entry(info, &dma_domain->devices, link) {
  402. ret = update_liodn(info->liodn, dma_domain, wnd_nr);
  403. if (ret)
  404. break;
  405. }
  406. return ret;
  407. }
  408. static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
  409. {
  410. struct device_domain_info *info;
  411. int ret = 0;
  412. list_for_each_entry(info, &dma_domain->devices, link) {
  413. if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
  414. ret = pamu_disable_liodn(info->liodn);
  415. if (!ret)
  416. dma_domain->enabled = 0;
  417. } else {
  418. ret = pamu_disable_spaace(info->liodn, wnd_nr);
  419. }
  420. }
  421. return ret;
  422. }
  423. static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
  424. {
  425. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  426. unsigned long flags;
  427. int ret;
  428. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  429. if (!dma_domain->win_arr) {
  430. pr_debug("Number of windows not configured\n");
  431. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  432. return;
  433. }
  434. if (wnd_nr >= dma_domain->win_cnt) {
  435. pr_debug("Invalid window index\n");
  436. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  437. return;
  438. }
  439. if (dma_domain->win_arr[wnd_nr].valid) {
  440. ret = disable_domain_win(dma_domain, wnd_nr);
  441. if (!ret) {
  442. dma_domain->win_arr[wnd_nr].valid = 0;
  443. dma_domain->mapped--;
  444. }
  445. }
  446. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  447. }
  448. static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
  449. phys_addr_t paddr, u64 size, int prot)
  450. {
  451. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  452. struct dma_window *wnd;
  453. int pamu_prot = 0;
  454. int ret;
  455. unsigned long flags;
  456. u64 win_size;
  457. if (prot & IOMMU_READ)
  458. pamu_prot |= PAACE_AP_PERMS_QUERY;
  459. if (prot & IOMMU_WRITE)
  460. pamu_prot |= PAACE_AP_PERMS_UPDATE;
  461. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  462. if (!dma_domain->win_arr) {
  463. pr_debug("Number of windows not configured\n");
  464. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  465. return -ENODEV;
  466. }
  467. if (wnd_nr >= dma_domain->win_cnt) {
  468. pr_debug("Invalid window index\n");
  469. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  470. return -EINVAL;
  471. }
  472. win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
  473. if (size > win_size) {
  474. pr_debug("Invalid window size\n");
  475. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  476. return -EINVAL;
  477. }
  478. if (dma_domain->win_cnt == 1) {
  479. if (dma_domain->enabled) {
  480. pr_debug("Disable the window before updating the mapping\n");
  481. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  482. return -EBUSY;
  483. }
  484. ret = check_size(size, domain->geometry.aperture_start);
  485. if (ret) {
  486. pr_debug("Aperture start not aligned to the size\n");
  487. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  488. return -EINVAL;
  489. }
  490. }
  491. wnd = &dma_domain->win_arr[wnd_nr];
  492. if (!wnd->valid) {
  493. wnd->paddr = paddr;
  494. wnd->size = size;
  495. wnd->prot = pamu_prot;
  496. ret = update_domain_mapping(dma_domain, wnd_nr);
  497. if (!ret) {
  498. wnd->valid = 1;
  499. dma_domain->mapped++;
  500. }
  501. } else {
  502. pr_debug("Disable the window before updating the mapping\n");
  503. ret = -EBUSY;
  504. }
  505. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  506. return ret;
  507. }
  508. /*
  509. * Attach the LIODN to the DMA domain and configure the geometry
  510. * and window mappings.
  511. */
  512. static int handle_attach_device(struct fsl_dma_domain *dma_domain,
  513. struct device *dev, const u32 *liodn,
  514. int num)
  515. {
  516. unsigned long flags;
  517. struct iommu_domain *domain = &dma_domain->iommu_domain;
  518. int ret = 0;
  519. int i;
  520. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  521. for (i = 0; i < num; i++) {
  522. /* Ensure that LIODN value is valid */
  523. if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
  524. pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
  525. liodn[i], dev->of_node);
  526. ret = -EINVAL;
  527. break;
  528. }
  529. attach_device(dma_domain, liodn[i], dev);
  530. /*
  531. * Check if geometry has already been configured
  532. * for the domain. If yes, set the geometry for
  533. * the LIODN.
  534. */
  535. if (dma_domain->win_arr) {
  536. u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
  537. ret = pamu_set_liodn(liodn[i], dev, dma_domain,
  538. &domain->geometry, win_cnt);
  539. if (ret)
  540. break;
  541. if (dma_domain->mapped) {
  542. /*
  543. * Create window/subwindow mapping for
  544. * the LIODN.
  545. */
  546. ret = map_liodn(liodn[i], dma_domain);
  547. if (ret)
  548. break;
  549. }
  550. }
  551. }
  552. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  553. return ret;
  554. }
  555. static int fsl_pamu_attach_device(struct iommu_domain *domain,
  556. struct device *dev)
  557. {
  558. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  559. const u32 *liodn;
  560. u32 liodn_cnt;
  561. int len, ret = 0;
  562. struct pci_dev *pdev = NULL;
  563. struct pci_controller *pci_ctl;
  564. /*
  565. * Use LIODN of the PCI controller while attaching a
  566. * PCI device.
  567. */
  568. if (dev_is_pci(dev)) {
  569. pdev = to_pci_dev(dev);
  570. pci_ctl = pci_bus_to_host(pdev->bus);
  571. /*
  572. * make dev point to pci controller device
  573. * so we can get the LIODN programmed by
  574. * u-boot.
  575. */
  576. dev = pci_ctl->parent;
  577. }
  578. liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
  579. if (liodn) {
  580. liodn_cnt = len / sizeof(u32);
  581. ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
  582. } else {
  583. pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
  584. ret = -EINVAL;
  585. }
  586. return ret;
  587. }
  588. static void fsl_pamu_detach_device(struct iommu_domain *domain,
  589. struct device *dev)
  590. {
  591. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  592. const u32 *prop;
  593. int len;
  594. struct pci_dev *pdev = NULL;
  595. struct pci_controller *pci_ctl;
  596. /*
  597. * Use LIODN of the PCI controller while detaching a
  598. * PCI device.
  599. */
  600. if (dev_is_pci(dev)) {
  601. pdev = to_pci_dev(dev);
  602. pci_ctl = pci_bus_to_host(pdev->bus);
  603. /*
  604. * make dev point to pci controller device
  605. * so we can get the LIODN programmed by
  606. * u-boot.
  607. */
  608. dev = pci_ctl->parent;
  609. }
  610. prop = of_get_property(dev->of_node, "fsl,liodn", &len);
  611. if (prop)
  612. detach_device(dev, dma_domain);
  613. else
  614. pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
  615. }
  616. static int configure_domain_geometry(struct iommu_domain *domain, void *data)
  617. {
  618. struct iommu_domain_geometry *geom_attr = data;
  619. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  620. dma_addr_t geom_size;
  621. unsigned long flags;
  622. geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
  623. /*
  624. * Sanity check the geometry size. Also, we do not support
  625. * DMA outside of the geometry.
  626. */
  627. if (check_size(geom_size, geom_attr->aperture_start) ||
  628. !geom_attr->force_aperture) {
  629. pr_debug("Invalid PAMU geometry attributes\n");
  630. return -EINVAL;
  631. }
  632. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  633. if (dma_domain->enabled) {
  634. pr_debug("Can't set geometry attributes as domain is active\n");
  635. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  636. return -EBUSY;
  637. }
  638. /* Copy the domain geometry information */
  639. memcpy(&domain->geometry, geom_attr,
  640. sizeof(struct iommu_domain_geometry));
  641. dma_domain->geom_size = geom_size;
  642. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  643. return 0;
  644. }
  645. /* Set the domain stash attribute */
  646. static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
  647. {
  648. struct pamu_stash_attribute *stash_attr = data;
  649. unsigned long flags;
  650. int ret;
  651. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  652. memcpy(&dma_domain->dma_stash, stash_attr,
  653. sizeof(struct pamu_stash_attribute));
  654. dma_domain->stash_id = get_stash_id(stash_attr->cache,
  655. stash_attr->cpu);
  656. if (dma_domain->stash_id == ~(u32)0) {
  657. pr_debug("Invalid stash attributes\n");
  658. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  659. return -EINVAL;
  660. }
  661. ret = update_domain_stash(dma_domain, dma_domain->stash_id);
  662. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  663. return ret;
  664. }
  665. /* Configure domain dma state i.e. enable/disable DMA */
  666. static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
  667. {
  668. struct device_domain_info *info;
  669. unsigned long flags;
  670. int ret;
  671. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  672. if (enable && !dma_domain->mapped) {
  673. pr_debug("Can't enable DMA domain without valid mapping\n");
  674. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  675. return -ENODEV;
  676. }
  677. dma_domain->enabled = enable;
  678. list_for_each_entry(info, &dma_domain->devices, link) {
  679. ret = (enable) ? pamu_enable_liodn(info->liodn) :
  680. pamu_disable_liodn(info->liodn);
  681. if (ret)
  682. pr_debug("Unable to set dma state for liodn %d",
  683. info->liodn);
  684. }
  685. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  686. return 0;
  687. }
  688. static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
  689. enum iommu_attr attr_type, void *data)
  690. {
  691. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  692. int ret = 0;
  693. switch (attr_type) {
  694. case DOMAIN_ATTR_GEOMETRY:
  695. ret = configure_domain_geometry(domain, data);
  696. break;
  697. case DOMAIN_ATTR_FSL_PAMU_STASH:
  698. ret = configure_domain_stash(dma_domain, data);
  699. break;
  700. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  701. ret = configure_domain_dma_state(dma_domain, *(int *)data);
  702. break;
  703. default:
  704. pr_debug("Unsupported attribute type\n");
  705. ret = -EINVAL;
  706. break;
  707. }
  708. return ret;
  709. }
  710. static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
  711. enum iommu_attr attr_type, void *data)
  712. {
  713. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  714. int ret = 0;
  715. switch (attr_type) {
  716. case DOMAIN_ATTR_FSL_PAMU_STASH:
  717. memcpy(data, &dma_domain->dma_stash,
  718. sizeof(struct pamu_stash_attribute));
  719. break;
  720. case DOMAIN_ATTR_FSL_PAMU_ENABLE:
  721. *(int *)data = dma_domain->enabled;
  722. break;
  723. case DOMAIN_ATTR_FSL_PAMUV1:
  724. *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
  725. break;
  726. default:
  727. pr_debug("Unsupported attribute type\n");
  728. ret = -EINVAL;
  729. break;
  730. }
  731. return ret;
  732. }
  733. static struct iommu_group *get_device_iommu_group(struct device *dev)
  734. {
  735. struct iommu_group *group;
  736. group = iommu_group_get(dev);
  737. if (!group)
  738. group = iommu_group_alloc();
  739. return group;
  740. }
  741. static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
  742. {
  743. u32 version;
  744. /* Check the PCI controller version number by readding BRR1 register */
  745. version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
  746. version &= PCI_FSL_BRR1_VER;
  747. /* If PCI controller version is >= 0x204 we can partition endpoints */
  748. return version >= 0x204;
  749. }
  750. /* Get iommu group information from peer devices or devices on the parent bus */
  751. static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
  752. {
  753. struct pci_dev *tmp;
  754. struct iommu_group *group;
  755. struct pci_bus *bus = pdev->bus;
  756. /*
  757. * Traverese the pci bus device list to get
  758. * the shared iommu group.
  759. */
  760. while (bus) {
  761. list_for_each_entry(tmp, &bus->devices, bus_list) {
  762. if (tmp == pdev)
  763. continue;
  764. group = iommu_group_get(&tmp->dev);
  765. if (group)
  766. return group;
  767. }
  768. bus = bus->parent;
  769. }
  770. return NULL;
  771. }
  772. static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
  773. {
  774. struct pci_controller *pci_ctl;
  775. bool pci_endpt_partioning;
  776. struct iommu_group *group = NULL;
  777. pci_ctl = pci_bus_to_host(pdev->bus);
  778. pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
  779. /* We can partition PCIe devices so assign device group to the device */
  780. if (pci_endpt_partioning) {
  781. group = pci_device_group(&pdev->dev);
  782. /*
  783. * PCIe controller is not a paritionable entity
  784. * free the controller device iommu_group.
  785. */
  786. if (pci_ctl->parent->iommu_group)
  787. iommu_group_remove_device(pci_ctl->parent);
  788. } else {
  789. /*
  790. * All devices connected to the controller will share the
  791. * PCI controllers device group. If this is the first
  792. * device to be probed for the pci controller, copy the
  793. * device group information from the PCI controller device
  794. * node and remove the PCI controller iommu group.
  795. * For subsequent devices, the iommu group information can
  796. * be obtained from sibling devices (i.e. from the bus_devices
  797. * link list).
  798. */
  799. if (pci_ctl->parent->iommu_group) {
  800. group = get_device_iommu_group(pci_ctl->parent);
  801. iommu_group_remove_device(pci_ctl->parent);
  802. } else {
  803. group = get_shared_pci_device_group(pdev);
  804. }
  805. }
  806. if (!group)
  807. group = ERR_PTR(-ENODEV);
  808. return group;
  809. }
  810. static struct iommu_group *fsl_pamu_device_group(struct device *dev)
  811. {
  812. struct iommu_group *group = ERR_PTR(-ENODEV);
  813. int len;
  814. /*
  815. * For platform devices we allocate a separate group for
  816. * each of the devices.
  817. */
  818. if (dev_is_pci(dev))
  819. group = get_pci_device_group(to_pci_dev(dev));
  820. else if (of_get_property(dev->of_node, "fsl,liodn", &len))
  821. group = get_device_iommu_group(dev);
  822. return group;
  823. }
  824. static int fsl_pamu_add_device(struct device *dev)
  825. {
  826. struct iommu_group *group;
  827. group = iommu_group_get_for_dev(dev);
  828. if (IS_ERR(group))
  829. return PTR_ERR(group);
  830. iommu_group_put(group);
  831. iommu_device_link(&pamu_iommu, dev);
  832. return 0;
  833. }
  834. static void fsl_pamu_remove_device(struct device *dev)
  835. {
  836. iommu_device_unlink(&pamu_iommu, dev);
  837. iommu_group_remove_device(dev);
  838. }
  839. static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
  840. {
  841. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  842. unsigned long flags;
  843. int ret;
  844. spin_lock_irqsave(&dma_domain->domain_lock, flags);
  845. /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
  846. if (dma_domain->enabled) {
  847. pr_debug("Can't set geometry attributes as domain is active\n");
  848. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  849. return -EBUSY;
  850. }
  851. /* Ensure that the geometry has been set for the domain */
  852. if (!dma_domain->geom_size) {
  853. pr_debug("Please configure geometry before setting the number of windows\n");
  854. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  855. return -EINVAL;
  856. }
  857. /*
  858. * Ensure we have valid window count i.e. it should be less than
  859. * maximum permissible limit and should be a power of two.
  860. */
  861. if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
  862. pr_debug("Invalid window count\n");
  863. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  864. return -EINVAL;
  865. }
  866. ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
  867. w_count > 1 ? w_count : 0);
  868. if (!ret) {
  869. kfree(dma_domain->win_arr);
  870. dma_domain->win_arr = kcalloc(w_count,
  871. sizeof(*dma_domain->win_arr),
  872. GFP_ATOMIC);
  873. if (!dma_domain->win_arr) {
  874. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  875. return -ENOMEM;
  876. }
  877. dma_domain->win_cnt = w_count;
  878. }
  879. spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
  880. return ret;
  881. }
  882. static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
  883. {
  884. struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
  885. return dma_domain->win_cnt;
  886. }
  887. static const struct iommu_ops fsl_pamu_ops = {
  888. .capable = fsl_pamu_capable,
  889. .domain_alloc = fsl_pamu_domain_alloc,
  890. .domain_free = fsl_pamu_domain_free,
  891. .attach_dev = fsl_pamu_attach_device,
  892. .detach_dev = fsl_pamu_detach_device,
  893. .domain_window_enable = fsl_pamu_window_enable,
  894. .domain_window_disable = fsl_pamu_window_disable,
  895. .domain_get_windows = fsl_pamu_get_windows,
  896. .domain_set_windows = fsl_pamu_set_windows,
  897. .iova_to_phys = fsl_pamu_iova_to_phys,
  898. .domain_set_attr = fsl_pamu_set_domain_attr,
  899. .domain_get_attr = fsl_pamu_get_domain_attr,
  900. .add_device = fsl_pamu_add_device,
  901. .remove_device = fsl_pamu_remove_device,
  902. .device_group = fsl_pamu_device_group,
  903. };
  904. int __init pamu_domain_init(void)
  905. {
  906. int ret = 0;
  907. ret = iommu_init_mempool();
  908. if (ret)
  909. return ret;
  910. ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
  911. if (ret)
  912. return ret;
  913. iommu_device_set_ops(&pamu_iommu, &fsl_pamu_ops);
  914. ret = iommu_device_register(&pamu_iommu);
  915. if (ret) {
  916. iommu_device_sysfs_remove(&pamu_iommu);
  917. pr_err("Can't register iommu device\n");
  918. return ret;
  919. }
  920. bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
  921. bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
  922. return ret;
  923. }