io-pgtable-arm.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * CPU-agnostic ARM page table allocator.
  4. *
  5. * Copyright (C) 2014 ARM Limited
  6. *
  7. * Author: Will Deacon <will.deacon@arm.com>
  8. */
  9. #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
  10. #include <linux/atomic.h>
  11. #include <linux/bitops.h>
  12. #include <linux/io-pgtable.h>
  13. #include <linux/kernel.h>
  14. #include <linux/sizes.h>
  15. #include <linux/slab.h>
  16. #include <linux/types.h>
  17. #include <linux/dma-mapping.h>
  18. #include <asm/barrier.h>
  19. #include "io-pgtable-arm.h"
  20. #include "iommu-pages.h"
  21. #define ARM_LPAE_MAX_ADDR_BITS 52
  22. #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
  23. #define ARM_LPAE_MAX_LEVELS 4
  24. /* Struct accessors */
  25. #define io_pgtable_to_data(x) \
  26. container_of((x), struct arm_lpae_io_pgtable, iop)
  27. #define io_pgtable_ops_to_data(x) \
  28. io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
  29. /*
  30. * Calculate the right shift amount to get to the portion describing level l
  31. * in a virtual address mapped by the pagetable in d.
  32. */
  33. #define ARM_LPAE_LVL_SHIFT(l,d) \
  34. (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
  35. ilog2(sizeof(arm_lpae_iopte)))
  36. #define ARM_LPAE_GRANULE(d) \
  37. (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
  38. #define ARM_LPAE_PGD_SIZE(d) \
  39. (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
  40. #define ARM_LPAE_PTES_PER_TABLE(d) \
  41. (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
  42. /*
  43. * Calculate the index at level l used to map virtual address a using the
  44. * pagetable in d.
  45. */
  46. #define ARM_LPAE_PGD_IDX(l,d) \
  47. ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
  48. #define ARM_LPAE_LVL_IDX(a,l,d) \
  49. (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
  50. ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
  51. /* Calculate the block/page mapping size at level l for pagetable in d. */
  52. #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
  53. /* Page table bits */
  54. #define ARM_LPAE_PTE_TYPE_SHIFT 0
  55. #define ARM_LPAE_PTE_TYPE_MASK 0x3
  56. #define ARM_LPAE_PTE_TYPE_BLOCK 1
  57. #define ARM_LPAE_PTE_TYPE_TABLE 3
  58. #define ARM_LPAE_PTE_TYPE_PAGE 3
  59. #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
  60. #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
  61. #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
  62. #define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51)
  63. #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
  64. #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
  65. #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
  66. #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
  67. #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
  68. #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
  69. #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
  70. /* Ignore the contiguous bit for block splitting */
  71. #define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
  72. #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
  73. ARM_LPAE_PTE_ATTR_HI_MASK)
  74. /* Software bit for solving coherency races */
  75. #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
  76. /* Stage-1 PTE */
  77. #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
  78. #define ARM_LPAE_PTE_AP_RDONLY_BIT 7
  79. #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \
  80. ARM_LPAE_PTE_AP_RDONLY_BIT)
  81. #define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \
  82. ARM_LPAE_PTE_DBM)
  83. #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
  84. #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
  85. /* Stage-2 PTE */
  86. #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
  87. #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
  88. #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
  89. #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
  90. #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
  91. #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
  92. /* Register bits */
  93. #define ARM_LPAE_VTCR_SL0_MASK 0x3
  94. #define ARM_LPAE_TCR_T0SZ_SHIFT 0
  95. #define ARM_LPAE_VTCR_PS_SHIFT 16
  96. #define ARM_LPAE_VTCR_PS_MASK 0x7
  97. #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
  98. #define ARM_LPAE_MAIR_ATTR_MASK 0xff
  99. #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
  100. #define ARM_LPAE_MAIR_ATTR_NC 0x44
  101. #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
  102. #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
  103. #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
  104. #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
  105. #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
  106. #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
  107. #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
  108. #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
  109. #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
  110. #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
  111. #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
  112. /* IOPTE accessors */
  113. #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
  114. #define iopte_type(pte) \
  115. (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
  116. #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
  117. #define iopte_writeable_dirty(pte) \
  118. (((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
  119. #define iopte_set_writeable_clean(ptep) \
  120. set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
  121. struct arm_lpae_io_pgtable {
  122. struct io_pgtable iop;
  123. int pgd_bits;
  124. int start_level;
  125. int bits_per_level;
  126. void *pgd;
  127. };
  128. typedef u64 arm_lpae_iopte;
  129. static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
  130. enum io_pgtable_fmt fmt)
  131. {
  132. if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
  133. return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
  134. return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
  135. }
  136. static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
  137. {
  138. if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
  139. return false;
  140. return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
  141. }
  142. static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
  143. struct arm_lpae_io_pgtable *data)
  144. {
  145. arm_lpae_iopte pte = paddr;
  146. /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
  147. return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
  148. }
  149. static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
  150. struct arm_lpae_io_pgtable *data)
  151. {
  152. u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
  153. if (ARM_LPAE_GRANULE(data) < SZ_64K)
  154. return paddr;
  155. /* Rotate the packed high-order bits back to the top */
  156. return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
  157. }
  158. /*
  159. * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
  160. * a concatenated PGD, into the maximum number of entries that can be
  161. * mapped in the same table page.
  162. */
  163. static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
  164. {
  165. int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
  166. return ptes_per_table - (i & (ptes_per_table - 1));
  167. }
  168. static bool selftest_running = false;
  169. static dma_addr_t __arm_lpae_dma_addr(void *pages)
  170. {
  171. return (dma_addr_t)virt_to_phys(pages);
  172. }
  173. static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
  174. struct io_pgtable_cfg *cfg,
  175. void *cookie)
  176. {
  177. struct device *dev = cfg->iommu_dev;
  178. int order = get_order(size);
  179. dma_addr_t dma;
  180. void *pages;
  181. VM_BUG_ON((gfp & __GFP_HIGHMEM));
  182. if (cfg->alloc)
  183. pages = cfg->alloc(cookie, size, gfp);
  184. else
  185. pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
  186. if (!pages)
  187. return NULL;
  188. if (!cfg->coherent_walk) {
  189. dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
  190. if (dma_mapping_error(dev, dma))
  191. goto out_free;
  192. /*
  193. * We depend on the IOMMU being able to work with any physical
  194. * address directly, so if the DMA layer suggests otherwise by
  195. * translating or truncating them, that bodes very badly...
  196. */
  197. if (dma != virt_to_phys(pages))
  198. goto out_unmap;
  199. }
  200. return pages;
  201. out_unmap:
  202. dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
  203. dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
  204. out_free:
  205. if (cfg->free)
  206. cfg->free(cookie, pages, size);
  207. else
  208. iommu_free_pages(pages, order);
  209. return NULL;
  210. }
  211. static void __arm_lpae_free_pages(void *pages, size_t size,
  212. struct io_pgtable_cfg *cfg,
  213. void *cookie)
  214. {
  215. if (!cfg->coherent_walk)
  216. dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
  217. size, DMA_TO_DEVICE);
  218. if (cfg->free)
  219. cfg->free(cookie, pages, size);
  220. else
  221. iommu_free_pages(pages, get_order(size));
  222. }
  223. static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
  224. struct io_pgtable_cfg *cfg)
  225. {
  226. dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
  227. sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
  228. }
  229. static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
  230. {
  231. for (int i = 0; i < num_entries; i++)
  232. ptep[i] = 0;
  233. if (!cfg->coherent_walk && num_entries)
  234. __arm_lpae_sync_pte(ptep, num_entries, cfg);
  235. }
  236. static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
  237. struct iommu_iotlb_gather *gather,
  238. unsigned long iova, size_t size, size_t pgcount,
  239. int lvl, arm_lpae_iopte *ptep);
  240. static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
  241. phys_addr_t paddr, arm_lpae_iopte prot,
  242. int lvl, int num_entries, arm_lpae_iopte *ptep)
  243. {
  244. arm_lpae_iopte pte = prot;
  245. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  246. size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
  247. int i;
  248. if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
  249. pte |= ARM_LPAE_PTE_TYPE_PAGE;
  250. else
  251. pte |= ARM_LPAE_PTE_TYPE_BLOCK;
  252. for (i = 0; i < num_entries; i++)
  253. ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
  254. if (!cfg->coherent_walk)
  255. __arm_lpae_sync_pte(ptep, num_entries, cfg);
  256. }
  257. static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
  258. unsigned long iova, phys_addr_t paddr,
  259. arm_lpae_iopte prot, int lvl, int num_entries,
  260. arm_lpae_iopte *ptep)
  261. {
  262. int i;
  263. for (i = 0; i < num_entries; i++)
  264. if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
  265. /* We require an unmap first */
  266. WARN_ON(!selftest_running);
  267. return -EEXIST;
  268. } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
  269. /*
  270. * We need to unmap and free the old table before
  271. * overwriting it with a block entry.
  272. */
  273. arm_lpae_iopte *tblp;
  274. size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
  275. tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
  276. if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
  277. lvl, tblp) != sz) {
  278. WARN_ON(1);
  279. return -EINVAL;
  280. }
  281. }
  282. __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
  283. return 0;
  284. }
  285. static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
  286. arm_lpae_iopte *ptep,
  287. arm_lpae_iopte curr,
  288. struct arm_lpae_io_pgtable *data)
  289. {
  290. arm_lpae_iopte old, new;
  291. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  292. new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
  293. if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
  294. new |= ARM_LPAE_PTE_NSTABLE;
  295. /*
  296. * Ensure the table itself is visible before its PTE can be.
  297. * Whilst we could get away with cmpxchg64_release below, this
  298. * doesn't have any ordering semantics when !CONFIG_SMP.
  299. */
  300. dma_wmb();
  301. old = cmpxchg64_relaxed(ptep, curr, new);
  302. if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
  303. return old;
  304. /* Even if it's not ours, there's no point waiting; just kick it */
  305. __arm_lpae_sync_pte(ptep, 1, cfg);
  306. if (old == curr)
  307. WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
  308. return old;
  309. }
  310. static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
  311. phys_addr_t paddr, size_t size, size_t pgcount,
  312. arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
  313. gfp_t gfp, size_t *mapped)
  314. {
  315. arm_lpae_iopte *cptep, pte;
  316. size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
  317. size_t tblsz = ARM_LPAE_GRANULE(data);
  318. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  319. int ret = 0, num_entries, max_entries, map_idx_start;
  320. /* Find our entry at the current level */
  321. map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
  322. ptep += map_idx_start;
  323. /* If we can install a leaf entry at this level, then do so */
  324. if (size == block_size) {
  325. max_entries = arm_lpae_max_entries(map_idx_start, data);
  326. num_entries = min_t(int, pgcount, max_entries);
  327. ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
  328. if (!ret)
  329. *mapped += num_entries * size;
  330. return ret;
  331. }
  332. /* We can't allocate tables at the final level */
  333. if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
  334. return -EINVAL;
  335. /* Grab a pointer to the next level */
  336. pte = READ_ONCE(*ptep);
  337. if (!pte) {
  338. cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
  339. if (!cptep)
  340. return -ENOMEM;
  341. pte = arm_lpae_install_table(cptep, ptep, 0, data);
  342. if (pte)
  343. __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
  344. } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
  345. __arm_lpae_sync_pte(ptep, 1, cfg);
  346. }
  347. if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
  348. cptep = iopte_deref(pte, data);
  349. } else if (pte) {
  350. /* We require an unmap first */
  351. WARN_ON(!selftest_running);
  352. return -EEXIST;
  353. }
  354. /* Rinse, repeat */
  355. return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
  356. cptep, gfp, mapped);
  357. }
  358. static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
  359. int prot)
  360. {
  361. arm_lpae_iopte pte;
  362. if (data->iop.fmt == ARM_64_LPAE_S1 ||
  363. data->iop.fmt == ARM_32_LPAE_S1) {
  364. pte = ARM_LPAE_PTE_nG;
  365. if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
  366. pte |= ARM_LPAE_PTE_AP_RDONLY;
  367. else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
  368. pte |= ARM_LPAE_PTE_DBM;
  369. if (!(prot & IOMMU_PRIV))
  370. pte |= ARM_LPAE_PTE_AP_UNPRIV;
  371. } else {
  372. pte = ARM_LPAE_PTE_HAP_FAULT;
  373. if (prot & IOMMU_READ)
  374. pte |= ARM_LPAE_PTE_HAP_READ;
  375. if (prot & IOMMU_WRITE)
  376. pte |= ARM_LPAE_PTE_HAP_WRITE;
  377. }
  378. /*
  379. * Note that this logic is structured to accommodate Mali LPAE
  380. * having stage-1-like attributes but stage-2-like permissions.
  381. */
  382. if (data->iop.fmt == ARM_64_LPAE_S2 ||
  383. data->iop.fmt == ARM_32_LPAE_S2) {
  384. if (prot & IOMMU_MMIO)
  385. pte |= ARM_LPAE_PTE_MEMATTR_DEV;
  386. else if (prot & IOMMU_CACHE)
  387. pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
  388. else
  389. pte |= ARM_LPAE_PTE_MEMATTR_NC;
  390. } else {
  391. if (prot & IOMMU_MMIO)
  392. pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
  393. << ARM_LPAE_PTE_ATTRINDX_SHIFT);
  394. else if (prot & IOMMU_CACHE)
  395. pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
  396. << ARM_LPAE_PTE_ATTRINDX_SHIFT);
  397. }
  398. /*
  399. * Also Mali has its own notions of shareability wherein its Inner
  400. * domain covers the cores within the GPU, and its Outer domain is
  401. * "outside the GPU" (i.e. either the Inner or System domain in CPU
  402. * terms, depending on coherency).
  403. */
  404. if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
  405. pte |= ARM_LPAE_PTE_SH_IS;
  406. else
  407. pte |= ARM_LPAE_PTE_SH_OS;
  408. if (prot & IOMMU_NOEXEC)
  409. pte |= ARM_LPAE_PTE_XN;
  410. if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
  411. pte |= ARM_LPAE_PTE_NS;
  412. if (data->iop.fmt != ARM_MALI_LPAE)
  413. pte |= ARM_LPAE_PTE_AF;
  414. return pte;
  415. }
  416. static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
  417. phys_addr_t paddr, size_t pgsize, size_t pgcount,
  418. int iommu_prot, gfp_t gfp, size_t *mapped)
  419. {
  420. struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
  421. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  422. arm_lpae_iopte *ptep = data->pgd;
  423. int ret, lvl = data->start_level;
  424. arm_lpae_iopte prot;
  425. long iaext = (s64)iova >> cfg->ias;
  426. if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
  427. return -EINVAL;
  428. if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
  429. iaext = ~iaext;
  430. if (WARN_ON(iaext || paddr >> cfg->oas))
  431. return -ERANGE;
  432. if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
  433. return -EINVAL;
  434. prot = arm_lpae_prot_to_pte(data, iommu_prot);
  435. ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
  436. ptep, gfp, mapped);
  437. /*
  438. * Synchronise all PTE updates for the new mapping before there's
  439. * a chance for anything to kick off a table walk for the new iova.
  440. */
  441. wmb();
  442. return ret;
  443. }
  444. static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
  445. arm_lpae_iopte *ptep)
  446. {
  447. arm_lpae_iopte *start, *end;
  448. unsigned long table_size;
  449. if (lvl == data->start_level)
  450. table_size = ARM_LPAE_PGD_SIZE(data);
  451. else
  452. table_size = ARM_LPAE_GRANULE(data);
  453. start = ptep;
  454. /* Only leaf entries at the last level */
  455. if (lvl == ARM_LPAE_MAX_LEVELS - 1)
  456. end = ptep;
  457. else
  458. end = (void *)ptep + table_size;
  459. while (ptep != end) {
  460. arm_lpae_iopte pte = *ptep++;
  461. if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
  462. continue;
  463. __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
  464. }
  465. __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
  466. }
  467. static void arm_lpae_free_pgtable(struct io_pgtable *iop)
  468. {
  469. struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
  470. __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
  471. kfree(data);
  472. }
  473. static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
  474. struct iommu_iotlb_gather *gather,
  475. unsigned long iova, size_t size,
  476. arm_lpae_iopte blk_pte, int lvl,
  477. arm_lpae_iopte *ptep, size_t pgcount)
  478. {
  479. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  480. arm_lpae_iopte pte, *tablep;
  481. phys_addr_t blk_paddr;
  482. size_t tablesz = ARM_LPAE_GRANULE(data);
  483. size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
  484. int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
  485. int i, unmap_idx_start = -1, num_entries = 0, max_entries;
  486. if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
  487. return 0;
  488. tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
  489. if (!tablep)
  490. return 0; /* Bytes unmapped */
  491. if (size == split_sz) {
  492. unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
  493. max_entries = arm_lpae_max_entries(unmap_idx_start, data);
  494. num_entries = min_t(int, pgcount, max_entries);
  495. }
  496. blk_paddr = iopte_to_paddr(blk_pte, data);
  497. pte = iopte_prot(blk_pte);
  498. for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
  499. /* Unmap! */
  500. if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
  501. continue;
  502. __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
  503. }
  504. pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
  505. if (pte != blk_pte) {
  506. __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
  507. /*
  508. * We may race against someone unmapping another part of this
  509. * block, but anything else is invalid. We can't misinterpret
  510. * a page entry here since we're never at the last level.
  511. */
  512. if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
  513. return 0;
  514. tablep = iopte_deref(pte, data);
  515. } else if (unmap_idx_start >= 0) {
  516. for (i = 0; i < num_entries; i++)
  517. io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
  518. return num_entries * size;
  519. }
  520. return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
  521. }
  522. static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
  523. struct iommu_iotlb_gather *gather,
  524. unsigned long iova, size_t size, size_t pgcount,
  525. int lvl, arm_lpae_iopte *ptep)
  526. {
  527. arm_lpae_iopte pte;
  528. struct io_pgtable *iop = &data->iop;
  529. int i = 0, num_entries, max_entries, unmap_idx_start;
  530. /* Something went horribly wrong and we ran out of page table */
  531. if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
  532. return 0;
  533. unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
  534. ptep += unmap_idx_start;
  535. pte = READ_ONCE(*ptep);
  536. if (WARN_ON(!pte))
  537. return 0;
  538. /* If the size matches this level, we're in the right place */
  539. if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
  540. max_entries = arm_lpae_max_entries(unmap_idx_start, data);
  541. num_entries = min_t(int, pgcount, max_entries);
  542. /* Find and handle non-leaf entries */
  543. for (i = 0; i < num_entries; i++) {
  544. pte = READ_ONCE(ptep[i]);
  545. if (WARN_ON(!pte))
  546. break;
  547. if (!iopte_leaf(pte, lvl, iop->fmt)) {
  548. __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
  549. /* Also flush any partial walks */
  550. io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
  551. ARM_LPAE_GRANULE(data));
  552. __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
  553. }
  554. }
  555. /* Clear the remaining entries */
  556. __arm_lpae_clear_pte(ptep, &iop->cfg, i);
  557. if (gather && !iommu_iotlb_gather_queued(gather))
  558. for (int j = 0; j < i; j++)
  559. io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
  560. return i * size;
  561. } else if (iopte_leaf(pte, lvl, iop->fmt)) {
  562. /*
  563. * Insert a table at the next level to map the old region,
  564. * minus the part we want to unmap
  565. */
  566. return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
  567. lvl + 1, ptep, pgcount);
  568. }
  569. /* Keep on walkin' */
  570. ptep = iopte_deref(pte, data);
  571. return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
  572. }
  573. static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
  574. size_t pgsize, size_t pgcount,
  575. struct iommu_iotlb_gather *gather)
  576. {
  577. struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
  578. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  579. arm_lpae_iopte *ptep = data->pgd;
  580. long iaext = (s64)iova >> cfg->ias;
  581. if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
  582. return 0;
  583. if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
  584. iaext = ~iaext;
  585. if (WARN_ON(iaext))
  586. return 0;
  587. return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
  588. data->start_level, ptep);
  589. }
  590. static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
  591. unsigned long iova)
  592. {
  593. struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
  594. arm_lpae_iopte pte, *ptep = data->pgd;
  595. int lvl = data->start_level;
  596. do {
  597. /* Valid IOPTE pointer? */
  598. if (!ptep)
  599. return 0;
  600. /* Grab the IOPTE we're interested in */
  601. ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
  602. pte = READ_ONCE(*ptep);
  603. /* Valid entry? */
  604. if (!pte)
  605. return 0;
  606. /* Leaf entry? */
  607. if (iopte_leaf(pte, lvl, data->iop.fmt))
  608. goto found_translation;
  609. /* Take it to the next level */
  610. ptep = iopte_deref(pte, data);
  611. } while (++lvl < ARM_LPAE_MAX_LEVELS);
  612. /* Ran out of page tables to walk */
  613. return 0;
  614. found_translation:
  615. iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
  616. return iopte_to_paddr(pte, data) | iova;
  617. }
  618. struct io_pgtable_walk_data {
  619. struct iommu_dirty_bitmap *dirty;
  620. unsigned long flags;
  621. u64 addr;
  622. const u64 end;
  623. };
  624. static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
  625. struct io_pgtable_walk_data *walk_data,
  626. arm_lpae_iopte *ptep,
  627. int lvl);
  628. static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
  629. struct io_pgtable_walk_data *walk_data,
  630. arm_lpae_iopte *ptep, int lvl)
  631. {
  632. struct io_pgtable *iop = &data->iop;
  633. arm_lpae_iopte pte = READ_ONCE(*ptep);
  634. if (iopte_leaf(pte, lvl, iop->fmt)) {
  635. size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
  636. if (iopte_writeable_dirty(pte)) {
  637. iommu_dirty_bitmap_record(walk_data->dirty,
  638. walk_data->addr, size);
  639. if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
  640. iopte_set_writeable_clean(ptep);
  641. }
  642. walk_data->addr += size;
  643. return 0;
  644. }
  645. if (WARN_ON(!iopte_table(pte, lvl)))
  646. return -EINVAL;
  647. ptep = iopte_deref(pte, data);
  648. return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
  649. }
  650. static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
  651. struct io_pgtable_walk_data *walk_data,
  652. arm_lpae_iopte *ptep,
  653. int lvl)
  654. {
  655. u32 idx;
  656. int max_entries, ret;
  657. if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
  658. return -EINVAL;
  659. if (lvl == data->start_level)
  660. max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
  661. else
  662. max_entries = ARM_LPAE_PTES_PER_TABLE(data);
  663. for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
  664. (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
  665. ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
  666. if (ret)
  667. return ret;
  668. }
  669. return 0;
  670. }
  671. static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
  672. unsigned long iova, size_t size,
  673. unsigned long flags,
  674. struct iommu_dirty_bitmap *dirty)
  675. {
  676. struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
  677. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  678. struct io_pgtable_walk_data walk_data = {
  679. .dirty = dirty,
  680. .flags = flags,
  681. .addr = iova,
  682. .end = iova + size,
  683. };
  684. arm_lpae_iopte *ptep = data->pgd;
  685. int lvl = data->start_level;
  686. if (WARN_ON(!size))
  687. return -EINVAL;
  688. if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
  689. return -EINVAL;
  690. if (data->iop.fmt != ARM_64_LPAE_S1)
  691. return -EINVAL;
  692. return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
  693. }
  694. static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
  695. {
  696. unsigned long granule, page_sizes;
  697. unsigned int max_addr_bits = 48;
  698. /*
  699. * We need to restrict the supported page sizes to match the
  700. * translation regime for a particular granule. Aim to match
  701. * the CPU page size if possible, otherwise prefer smaller sizes.
  702. * While we're at it, restrict the block sizes to match the
  703. * chosen granule.
  704. */
  705. if (cfg->pgsize_bitmap & PAGE_SIZE)
  706. granule = PAGE_SIZE;
  707. else if (cfg->pgsize_bitmap & ~PAGE_MASK)
  708. granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
  709. else if (cfg->pgsize_bitmap & PAGE_MASK)
  710. granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
  711. else
  712. granule = 0;
  713. switch (granule) {
  714. case SZ_4K:
  715. page_sizes = (SZ_4K | SZ_2M | SZ_1G);
  716. break;
  717. case SZ_16K:
  718. page_sizes = (SZ_16K | SZ_32M);
  719. break;
  720. case SZ_64K:
  721. max_addr_bits = 52;
  722. page_sizes = (SZ_64K | SZ_512M);
  723. if (cfg->oas > 48)
  724. page_sizes |= 1ULL << 42; /* 4TB */
  725. break;
  726. default:
  727. page_sizes = 0;
  728. }
  729. cfg->pgsize_bitmap &= page_sizes;
  730. cfg->ias = min(cfg->ias, max_addr_bits);
  731. cfg->oas = min(cfg->oas, max_addr_bits);
  732. }
  733. static struct arm_lpae_io_pgtable *
  734. arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
  735. {
  736. struct arm_lpae_io_pgtable *data;
  737. int levels, va_bits, pg_shift;
  738. arm_lpae_restrict_pgsizes(cfg);
  739. if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
  740. return NULL;
  741. if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
  742. return NULL;
  743. if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
  744. return NULL;
  745. data = kmalloc(sizeof(*data), GFP_KERNEL);
  746. if (!data)
  747. return NULL;
  748. pg_shift = __ffs(cfg->pgsize_bitmap);
  749. data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
  750. va_bits = cfg->ias - pg_shift;
  751. levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
  752. data->start_level = ARM_LPAE_MAX_LEVELS - levels;
  753. /* Calculate the actual size of our pgd (without concatenation) */
  754. data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
  755. data->iop.ops = (struct io_pgtable_ops) {
  756. .map_pages = arm_lpae_map_pages,
  757. .unmap_pages = arm_lpae_unmap_pages,
  758. .iova_to_phys = arm_lpae_iova_to_phys,
  759. .read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
  760. };
  761. return data;
  762. }
  763. static struct io_pgtable *
  764. arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
  765. {
  766. u64 reg;
  767. struct arm_lpae_io_pgtable *data;
  768. typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
  769. bool tg1;
  770. if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
  771. IO_PGTABLE_QUIRK_ARM_TTBR1 |
  772. IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
  773. IO_PGTABLE_QUIRK_ARM_HD))
  774. return NULL;
  775. data = arm_lpae_alloc_pgtable(cfg);
  776. if (!data)
  777. return NULL;
  778. /* TCR */
  779. if (cfg->coherent_walk) {
  780. tcr->sh = ARM_LPAE_TCR_SH_IS;
  781. tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
  782. tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
  783. if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
  784. goto out_free_data;
  785. } else {
  786. tcr->sh = ARM_LPAE_TCR_SH_OS;
  787. tcr->irgn = ARM_LPAE_TCR_RGN_NC;
  788. if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
  789. tcr->orgn = ARM_LPAE_TCR_RGN_NC;
  790. else
  791. tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
  792. }
  793. tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
  794. switch (ARM_LPAE_GRANULE(data)) {
  795. case SZ_4K:
  796. tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
  797. break;
  798. case SZ_16K:
  799. tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
  800. break;
  801. case SZ_64K:
  802. tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
  803. break;
  804. }
  805. switch (cfg->oas) {
  806. case 32:
  807. tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
  808. break;
  809. case 36:
  810. tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
  811. break;
  812. case 40:
  813. tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
  814. break;
  815. case 42:
  816. tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
  817. break;
  818. case 44:
  819. tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
  820. break;
  821. case 48:
  822. tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
  823. break;
  824. case 52:
  825. tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
  826. break;
  827. default:
  828. goto out_free_data;
  829. }
  830. tcr->tsz = 64ULL - cfg->ias;
  831. /* MAIRs */
  832. reg = (ARM_LPAE_MAIR_ATTR_NC
  833. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
  834. (ARM_LPAE_MAIR_ATTR_WBRWA
  835. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
  836. (ARM_LPAE_MAIR_ATTR_DEVICE
  837. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
  838. (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
  839. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
  840. cfg->arm_lpae_s1_cfg.mair = reg;
  841. /* Looking good; allocate a pgd */
  842. data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
  843. GFP_KERNEL, cfg, cookie);
  844. if (!data->pgd)
  845. goto out_free_data;
  846. /* Ensure the empty pgd is visible before any actual TTBR write */
  847. wmb();
  848. /* TTBR */
  849. cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
  850. return &data->iop;
  851. out_free_data:
  852. kfree(data);
  853. return NULL;
  854. }
  855. static struct io_pgtable *
  856. arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
  857. {
  858. u64 sl;
  859. struct arm_lpae_io_pgtable *data;
  860. typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
  861. /* The NS quirk doesn't apply at stage 2 */
  862. if (cfg->quirks)
  863. return NULL;
  864. data = arm_lpae_alloc_pgtable(cfg);
  865. if (!data)
  866. return NULL;
  867. /*
  868. * Concatenate PGDs at level 1 if possible in order to reduce
  869. * the depth of the stage-2 walk.
  870. */
  871. if (data->start_level == 0) {
  872. unsigned long pgd_pages;
  873. pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
  874. if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
  875. data->pgd_bits += data->bits_per_level;
  876. data->start_level++;
  877. }
  878. }
  879. /* VTCR */
  880. if (cfg->coherent_walk) {
  881. vtcr->sh = ARM_LPAE_TCR_SH_IS;
  882. vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
  883. vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
  884. } else {
  885. vtcr->sh = ARM_LPAE_TCR_SH_OS;
  886. vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
  887. vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
  888. }
  889. sl = data->start_level;
  890. switch (ARM_LPAE_GRANULE(data)) {
  891. case SZ_4K:
  892. vtcr->tg = ARM_LPAE_TCR_TG0_4K;
  893. sl++; /* SL0 format is different for 4K granule size */
  894. break;
  895. case SZ_16K:
  896. vtcr->tg = ARM_LPAE_TCR_TG0_16K;
  897. break;
  898. case SZ_64K:
  899. vtcr->tg = ARM_LPAE_TCR_TG0_64K;
  900. break;
  901. }
  902. switch (cfg->oas) {
  903. case 32:
  904. vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
  905. break;
  906. case 36:
  907. vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
  908. break;
  909. case 40:
  910. vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
  911. break;
  912. case 42:
  913. vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
  914. break;
  915. case 44:
  916. vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
  917. break;
  918. case 48:
  919. vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
  920. break;
  921. case 52:
  922. vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
  923. break;
  924. default:
  925. goto out_free_data;
  926. }
  927. vtcr->tsz = 64ULL - cfg->ias;
  928. vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
  929. /* Allocate pgd pages */
  930. data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
  931. GFP_KERNEL, cfg, cookie);
  932. if (!data->pgd)
  933. goto out_free_data;
  934. /* Ensure the empty pgd is visible before any actual TTBR write */
  935. wmb();
  936. /* VTTBR */
  937. cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
  938. return &data->iop;
  939. out_free_data:
  940. kfree(data);
  941. return NULL;
  942. }
  943. static struct io_pgtable *
  944. arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
  945. {
  946. if (cfg->ias > 32 || cfg->oas > 40)
  947. return NULL;
  948. cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
  949. return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
  950. }
  951. static struct io_pgtable *
  952. arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
  953. {
  954. if (cfg->ias > 40 || cfg->oas > 40)
  955. return NULL;
  956. cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
  957. return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
  958. }
  959. static struct io_pgtable *
  960. arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
  961. {
  962. struct arm_lpae_io_pgtable *data;
  963. /* No quirks for Mali (hopefully) */
  964. if (cfg->quirks)
  965. return NULL;
  966. if (cfg->ias > 48 || cfg->oas > 40)
  967. return NULL;
  968. cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
  969. data = arm_lpae_alloc_pgtable(cfg);
  970. if (!data)
  971. return NULL;
  972. /* Mali seems to need a full 4-level table regardless of IAS */
  973. if (data->start_level > 0) {
  974. data->start_level = 0;
  975. data->pgd_bits = 0;
  976. }
  977. /*
  978. * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
  979. * best we can do is mimic the out-of-tree driver and hope that the
  980. * "implementation-defined caching policy" is good enough. Similarly,
  981. * we'll use it for the sake of a valid attribute for our 'device'
  982. * index, although callers should never request that in practice.
  983. */
  984. cfg->arm_mali_lpae_cfg.memattr =
  985. (ARM_MALI_LPAE_MEMATTR_IMP_DEF
  986. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
  987. (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
  988. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
  989. (ARM_MALI_LPAE_MEMATTR_IMP_DEF
  990. << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
  991. data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
  992. cfg, cookie);
  993. if (!data->pgd)
  994. goto out_free_data;
  995. /* Ensure the empty pgd is visible before TRANSTAB can be written */
  996. wmb();
  997. cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
  998. ARM_MALI_LPAE_TTBR_READ_INNER |
  999. ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
  1000. if (cfg->coherent_walk)
  1001. cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
  1002. return &data->iop;
  1003. out_free_data:
  1004. kfree(data);
  1005. return NULL;
  1006. }
  1007. struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
  1008. .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
  1009. .alloc = arm_64_lpae_alloc_pgtable_s1,
  1010. .free = arm_lpae_free_pgtable,
  1011. };
  1012. struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
  1013. .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
  1014. .alloc = arm_64_lpae_alloc_pgtable_s2,
  1015. .free = arm_lpae_free_pgtable,
  1016. };
  1017. struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
  1018. .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
  1019. .alloc = arm_32_lpae_alloc_pgtable_s1,
  1020. .free = arm_lpae_free_pgtable,
  1021. };
  1022. struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
  1023. .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
  1024. .alloc = arm_32_lpae_alloc_pgtable_s2,
  1025. .free = arm_lpae_free_pgtable,
  1026. };
  1027. struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
  1028. .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
  1029. .alloc = arm_mali_lpae_alloc_pgtable,
  1030. .free = arm_lpae_free_pgtable,
  1031. };
  1032. #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
  1033. static struct io_pgtable_cfg *cfg_cookie __initdata;
  1034. static void __init dummy_tlb_flush_all(void *cookie)
  1035. {
  1036. WARN_ON(cookie != cfg_cookie);
  1037. }
  1038. static void __init dummy_tlb_flush(unsigned long iova, size_t size,
  1039. size_t granule, void *cookie)
  1040. {
  1041. WARN_ON(cookie != cfg_cookie);
  1042. WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
  1043. }
  1044. static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
  1045. unsigned long iova, size_t granule,
  1046. void *cookie)
  1047. {
  1048. dummy_tlb_flush(iova, granule, granule, cookie);
  1049. }
  1050. static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
  1051. .tlb_flush_all = dummy_tlb_flush_all,
  1052. .tlb_flush_walk = dummy_tlb_flush,
  1053. .tlb_add_page = dummy_tlb_add_page,
  1054. };
  1055. static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
  1056. {
  1057. struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
  1058. struct io_pgtable_cfg *cfg = &data->iop.cfg;
  1059. pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
  1060. cfg->pgsize_bitmap, cfg->ias);
  1061. pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
  1062. ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
  1063. ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
  1064. }
  1065. #define __FAIL(ops, i) ({ \
  1066. WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
  1067. arm_lpae_dump_ops(ops); \
  1068. selftest_running = false; \
  1069. -EFAULT; \
  1070. })
  1071. static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
  1072. {
  1073. static const enum io_pgtable_fmt fmts[] __initconst = {
  1074. ARM_64_LPAE_S1,
  1075. ARM_64_LPAE_S2,
  1076. };
  1077. int i, j;
  1078. unsigned long iova;
  1079. size_t size, mapped;
  1080. struct io_pgtable_ops *ops;
  1081. selftest_running = true;
  1082. for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
  1083. cfg_cookie = cfg;
  1084. ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
  1085. if (!ops) {
  1086. pr_err("selftest: failed to allocate io pgtable ops\n");
  1087. return -ENOMEM;
  1088. }
  1089. /*
  1090. * Initial sanity checks.
  1091. * Empty page tables shouldn't provide any translations.
  1092. */
  1093. if (ops->iova_to_phys(ops, 42))
  1094. return __FAIL(ops, i);
  1095. if (ops->iova_to_phys(ops, SZ_1G + 42))
  1096. return __FAIL(ops, i);
  1097. if (ops->iova_to_phys(ops, SZ_2G + 42))
  1098. return __FAIL(ops, i);
  1099. /*
  1100. * Distinct mappings of different granule sizes.
  1101. */
  1102. iova = 0;
  1103. for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
  1104. size = 1UL << j;
  1105. if (ops->map_pages(ops, iova, iova, size, 1,
  1106. IOMMU_READ | IOMMU_WRITE |
  1107. IOMMU_NOEXEC | IOMMU_CACHE,
  1108. GFP_KERNEL, &mapped))
  1109. return __FAIL(ops, i);
  1110. /* Overlapping mappings */
  1111. if (!ops->map_pages(ops, iova, iova + size, size, 1,
  1112. IOMMU_READ | IOMMU_NOEXEC,
  1113. GFP_KERNEL, &mapped))
  1114. return __FAIL(ops, i);
  1115. if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
  1116. return __FAIL(ops, i);
  1117. iova += SZ_1G;
  1118. }
  1119. /* Partial unmap */
  1120. size = 1UL << __ffs(cfg->pgsize_bitmap);
  1121. if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
  1122. return __FAIL(ops, i);
  1123. /* Remap of partial unmap */
  1124. if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
  1125. IOMMU_READ, GFP_KERNEL, &mapped))
  1126. return __FAIL(ops, i);
  1127. if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
  1128. return __FAIL(ops, i);
  1129. /* Full unmap */
  1130. iova = 0;
  1131. for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
  1132. size = 1UL << j;
  1133. if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
  1134. return __FAIL(ops, i);
  1135. if (ops->iova_to_phys(ops, iova + 42))
  1136. return __FAIL(ops, i);
  1137. /* Remap full block */
  1138. if (ops->map_pages(ops, iova, iova, size, 1,
  1139. IOMMU_WRITE, GFP_KERNEL, &mapped))
  1140. return __FAIL(ops, i);
  1141. if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
  1142. return __FAIL(ops, i);
  1143. iova += SZ_1G;
  1144. }
  1145. free_io_pgtable_ops(ops);
  1146. }
  1147. selftest_running = false;
  1148. return 0;
  1149. }
  1150. static int __init arm_lpae_do_selftests(void)
  1151. {
  1152. static const unsigned long pgsize[] __initconst = {
  1153. SZ_4K | SZ_2M | SZ_1G,
  1154. SZ_16K | SZ_32M,
  1155. SZ_64K | SZ_512M,
  1156. };
  1157. static const unsigned int ias[] __initconst = {
  1158. 32, 36, 40, 42, 44, 48,
  1159. };
  1160. int i, j, pass = 0, fail = 0;
  1161. struct device dev;
  1162. struct io_pgtable_cfg cfg = {
  1163. .tlb = &dummy_tlb_ops,
  1164. .oas = 48,
  1165. .coherent_walk = true,
  1166. .iommu_dev = &dev,
  1167. };
  1168. /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
  1169. set_dev_node(&dev, NUMA_NO_NODE);
  1170. for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
  1171. for (j = 0; j < ARRAY_SIZE(ias); ++j) {
  1172. cfg.pgsize_bitmap = pgsize[i];
  1173. cfg.ias = ias[j];
  1174. pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
  1175. pgsize[i], ias[j]);
  1176. if (arm_lpae_run_tests(&cfg))
  1177. fail++;
  1178. else
  1179. pass++;
  1180. }
  1181. }
  1182. pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
  1183. return fail ? -EFAULT : 0;
  1184. }
  1185. subsys_initcall(arm_lpae_do_selftests);
  1186. #endif