gmap.c 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KVM guest address space mapping code
  4. *
  5. * Copyright IBM Corp. 2007, 2016, 2018
  6. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * David Hildenbrand <david@redhat.com>
  8. * Janosch Frank <frankja@linux.vnet.ibm.com>
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/mm.h>
  12. #include <linux/swap.h>
  13. #include <linux/smp.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/slab.h>
  16. #include <linux/swapops.h>
  17. #include <linux/ksm.h>
  18. #include <linux/mman.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/gmap.h>
  22. #include <asm/tlb.h>
  23. #define GMAP_SHADOW_FAKE_TABLE 1ULL
  24. /**
  25. * gmap_alloc - allocate and initialize a guest address space
  26. * @mm: pointer to the parent mm_struct
  27. * @limit: maximum address of the gmap address space
  28. *
  29. * Returns a guest address space structure.
  30. */
  31. static struct gmap *gmap_alloc(unsigned long limit)
  32. {
  33. struct gmap *gmap;
  34. struct page *page;
  35. unsigned long *table;
  36. unsigned long etype, atype;
  37. if (limit < _REGION3_SIZE) {
  38. limit = _REGION3_SIZE - 1;
  39. atype = _ASCE_TYPE_SEGMENT;
  40. etype = _SEGMENT_ENTRY_EMPTY;
  41. } else if (limit < _REGION2_SIZE) {
  42. limit = _REGION2_SIZE - 1;
  43. atype = _ASCE_TYPE_REGION3;
  44. etype = _REGION3_ENTRY_EMPTY;
  45. } else if (limit < _REGION1_SIZE) {
  46. limit = _REGION1_SIZE - 1;
  47. atype = _ASCE_TYPE_REGION2;
  48. etype = _REGION2_ENTRY_EMPTY;
  49. } else {
  50. limit = -1UL;
  51. atype = _ASCE_TYPE_REGION1;
  52. etype = _REGION1_ENTRY_EMPTY;
  53. }
  54. gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
  55. if (!gmap)
  56. goto out;
  57. INIT_LIST_HEAD(&gmap->crst_list);
  58. INIT_LIST_HEAD(&gmap->children);
  59. INIT_LIST_HEAD(&gmap->pt_list);
  60. INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
  61. INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
  62. INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
  63. spin_lock_init(&gmap->guest_table_lock);
  64. spin_lock_init(&gmap->shadow_lock);
  65. atomic_set(&gmap->ref_count, 1);
  66. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  67. if (!page)
  68. goto out_free;
  69. page->index = 0;
  70. list_add(&page->lru, &gmap->crst_list);
  71. table = (unsigned long *) page_to_phys(page);
  72. crst_table_init(table, etype);
  73. gmap->table = table;
  74. gmap->asce = atype | _ASCE_TABLE_LENGTH |
  75. _ASCE_USER_BITS | __pa(table);
  76. gmap->asce_end = limit;
  77. return gmap;
  78. out_free:
  79. kfree(gmap);
  80. out:
  81. return NULL;
  82. }
  83. /**
  84. * gmap_create - create a guest address space
  85. * @mm: pointer to the parent mm_struct
  86. * @limit: maximum size of the gmap address space
  87. *
  88. * Returns a guest address space structure.
  89. */
  90. struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
  91. {
  92. struct gmap *gmap;
  93. unsigned long gmap_asce;
  94. gmap = gmap_alloc(limit);
  95. if (!gmap)
  96. return NULL;
  97. gmap->mm = mm;
  98. spin_lock(&mm->context.lock);
  99. list_add_rcu(&gmap->list, &mm->context.gmap_list);
  100. if (list_is_singular(&mm->context.gmap_list))
  101. gmap_asce = gmap->asce;
  102. else
  103. gmap_asce = -1UL;
  104. WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
  105. spin_unlock(&mm->context.lock);
  106. return gmap;
  107. }
  108. EXPORT_SYMBOL_GPL(gmap_create);
  109. static void gmap_flush_tlb(struct gmap *gmap)
  110. {
  111. if (MACHINE_HAS_IDTE)
  112. __tlb_flush_idte(gmap->asce);
  113. else
  114. __tlb_flush_global();
  115. }
  116. static void gmap_radix_tree_free(struct radix_tree_root *root)
  117. {
  118. struct radix_tree_iter iter;
  119. unsigned long indices[16];
  120. unsigned long index;
  121. void __rcu **slot;
  122. int i, nr;
  123. /* A radix tree is freed by deleting all of its entries */
  124. index = 0;
  125. do {
  126. nr = 0;
  127. radix_tree_for_each_slot(slot, root, &iter, index) {
  128. indices[nr] = iter.index;
  129. if (++nr == 16)
  130. break;
  131. }
  132. for (i = 0; i < nr; i++) {
  133. index = indices[i];
  134. radix_tree_delete(root, index);
  135. }
  136. } while (nr > 0);
  137. }
  138. static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
  139. {
  140. struct gmap_rmap *rmap, *rnext, *head;
  141. struct radix_tree_iter iter;
  142. unsigned long indices[16];
  143. unsigned long index;
  144. void __rcu **slot;
  145. int i, nr;
  146. /* A radix tree is freed by deleting all of its entries */
  147. index = 0;
  148. do {
  149. nr = 0;
  150. radix_tree_for_each_slot(slot, root, &iter, index) {
  151. indices[nr] = iter.index;
  152. if (++nr == 16)
  153. break;
  154. }
  155. for (i = 0; i < nr; i++) {
  156. index = indices[i];
  157. head = radix_tree_delete(root, index);
  158. gmap_for_each_rmap_safe(rmap, rnext, head)
  159. kfree(rmap);
  160. }
  161. } while (nr > 0);
  162. }
  163. /**
  164. * gmap_free - free a guest address space
  165. * @gmap: pointer to the guest address space structure
  166. *
  167. * No locks required. There are no references to this gmap anymore.
  168. */
  169. static void gmap_free(struct gmap *gmap)
  170. {
  171. struct page *page, *next;
  172. /* Flush tlb of all gmaps (if not already done for shadows) */
  173. if (!(gmap_is_shadow(gmap) && gmap->removed))
  174. gmap_flush_tlb(gmap);
  175. /* Free all segment & region tables. */
  176. list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
  177. __free_pages(page, CRST_ALLOC_ORDER);
  178. gmap_radix_tree_free(&gmap->guest_to_host);
  179. gmap_radix_tree_free(&gmap->host_to_guest);
  180. /* Free additional data for a shadow gmap */
  181. if (gmap_is_shadow(gmap)) {
  182. /* Free all page tables. */
  183. list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
  184. page_table_free_pgste(page);
  185. gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
  186. /* Release reference to the parent */
  187. gmap_put(gmap->parent);
  188. }
  189. kfree(gmap);
  190. }
  191. /**
  192. * gmap_get - increase reference counter for guest address space
  193. * @gmap: pointer to the guest address space structure
  194. *
  195. * Returns the gmap pointer
  196. */
  197. struct gmap *gmap_get(struct gmap *gmap)
  198. {
  199. atomic_inc(&gmap->ref_count);
  200. return gmap;
  201. }
  202. EXPORT_SYMBOL_GPL(gmap_get);
  203. /**
  204. * gmap_put - decrease reference counter for guest address space
  205. * @gmap: pointer to the guest address space structure
  206. *
  207. * If the reference counter reaches zero the guest address space is freed.
  208. */
  209. void gmap_put(struct gmap *gmap)
  210. {
  211. if (atomic_dec_return(&gmap->ref_count) == 0)
  212. gmap_free(gmap);
  213. }
  214. EXPORT_SYMBOL_GPL(gmap_put);
  215. /**
  216. * gmap_remove - remove a guest address space but do not free it yet
  217. * @gmap: pointer to the guest address space structure
  218. */
  219. void gmap_remove(struct gmap *gmap)
  220. {
  221. struct gmap *sg, *next;
  222. unsigned long gmap_asce;
  223. /* Remove all shadow gmaps linked to this gmap */
  224. if (!list_empty(&gmap->children)) {
  225. spin_lock(&gmap->shadow_lock);
  226. list_for_each_entry_safe(sg, next, &gmap->children, list) {
  227. list_del(&sg->list);
  228. gmap_put(sg);
  229. }
  230. spin_unlock(&gmap->shadow_lock);
  231. }
  232. /* Remove gmap from the pre-mm list */
  233. spin_lock(&gmap->mm->context.lock);
  234. list_del_rcu(&gmap->list);
  235. if (list_empty(&gmap->mm->context.gmap_list))
  236. gmap_asce = 0;
  237. else if (list_is_singular(&gmap->mm->context.gmap_list))
  238. gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
  239. struct gmap, list)->asce;
  240. else
  241. gmap_asce = -1UL;
  242. WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
  243. spin_unlock(&gmap->mm->context.lock);
  244. synchronize_rcu();
  245. /* Put reference */
  246. gmap_put(gmap);
  247. }
  248. EXPORT_SYMBOL_GPL(gmap_remove);
  249. /**
  250. * gmap_enable - switch primary space to the guest address space
  251. * @gmap: pointer to the guest address space structure
  252. */
  253. void gmap_enable(struct gmap *gmap)
  254. {
  255. S390_lowcore.gmap = (unsigned long) gmap;
  256. }
  257. EXPORT_SYMBOL_GPL(gmap_enable);
  258. /**
  259. * gmap_disable - switch back to the standard primary address space
  260. * @gmap: pointer to the guest address space structure
  261. */
  262. void gmap_disable(struct gmap *gmap)
  263. {
  264. S390_lowcore.gmap = 0UL;
  265. }
  266. EXPORT_SYMBOL_GPL(gmap_disable);
  267. /**
  268. * gmap_get_enabled - get a pointer to the currently enabled gmap
  269. *
  270. * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
  271. */
  272. struct gmap *gmap_get_enabled(void)
  273. {
  274. return (struct gmap *) S390_lowcore.gmap;
  275. }
  276. EXPORT_SYMBOL_GPL(gmap_get_enabled);
  277. /*
  278. * gmap_alloc_table is assumed to be called with mmap_sem held
  279. */
  280. static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
  281. unsigned long init, unsigned long gaddr)
  282. {
  283. struct page *page;
  284. unsigned long *new;
  285. /* since we dont free the gmap table until gmap_free we can unlock */
  286. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  287. if (!page)
  288. return -ENOMEM;
  289. new = (unsigned long *) page_to_phys(page);
  290. crst_table_init(new, init);
  291. spin_lock(&gmap->guest_table_lock);
  292. if (*table & _REGION_ENTRY_INVALID) {
  293. list_add(&page->lru, &gmap->crst_list);
  294. *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
  295. (*table & _REGION_ENTRY_TYPE_MASK);
  296. page->index = gaddr;
  297. page = NULL;
  298. }
  299. spin_unlock(&gmap->guest_table_lock);
  300. if (page)
  301. __free_pages(page, CRST_ALLOC_ORDER);
  302. return 0;
  303. }
  304. /**
  305. * __gmap_segment_gaddr - find virtual address from segment pointer
  306. * @entry: pointer to a segment table entry in the guest address space
  307. *
  308. * Returns the virtual address in the guest address space for the segment
  309. */
  310. static unsigned long __gmap_segment_gaddr(unsigned long *entry)
  311. {
  312. struct page *page;
  313. unsigned long offset, mask;
  314. offset = (unsigned long) entry / sizeof(unsigned long);
  315. offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
  316. mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
  317. page = virt_to_page((void *)((unsigned long) entry & mask));
  318. return page->index + offset;
  319. }
  320. /**
  321. * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
  322. * @gmap: pointer to the guest address space structure
  323. * @vmaddr: address in the host process address space
  324. *
  325. * Returns 1 if a TLB flush is required
  326. */
  327. static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
  328. {
  329. unsigned long *entry;
  330. int flush = 0;
  331. BUG_ON(gmap_is_shadow(gmap));
  332. spin_lock(&gmap->guest_table_lock);
  333. entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
  334. if (entry) {
  335. flush = (*entry != _SEGMENT_ENTRY_EMPTY);
  336. *entry = _SEGMENT_ENTRY_EMPTY;
  337. }
  338. spin_unlock(&gmap->guest_table_lock);
  339. return flush;
  340. }
  341. /**
  342. * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
  343. * @gmap: pointer to the guest address space structure
  344. * @gaddr: address in the guest address space
  345. *
  346. * Returns 1 if a TLB flush is required
  347. */
  348. static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
  349. {
  350. unsigned long vmaddr;
  351. vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
  352. gaddr >> PMD_SHIFT);
  353. return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
  354. }
  355. /**
  356. * gmap_unmap_segment - unmap segment from the guest address space
  357. * @gmap: pointer to the guest address space structure
  358. * @to: address in the guest address space
  359. * @len: length of the memory area to unmap
  360. *
  361. * Returns 0 if the unmap succeeded, -EINVAL if not.
  362. */
  363. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
  364. {
  365. unsigned long off;
  366. int flush;
  367. BUG_ON(gmap_is_shadow(gmap));
  368. if ((to | len) & (PMD_SIZE - 1))
  369. return -EINVAL;
  370. if (len == 0 || to + len < to)
  371. return -EINVAL;
  372. flush = 0;
  373. down_write(&gmap->mm->mmap_sem);
  374. for (off = 0; off < len; off += PMD_SIZE)
  375. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  376. up_write(&gmap->mm->mmap_sem);
  377. if (flush)
  378. gmap_flush_tlb(gmap);
  379. return 0;
  380. }
  381. EXPORT_SYMBOL_GPL(gmap_unmap_segment);
  382. /**
  383. * gmap_map_segment - map a segment to the guest address space
  384. * @gmap: pointer to the guest address space structure
  385. * @from: source address in the parent address space
  386. * @to: target address in the guest address space
  387. * @len: length of the memory area to map
  388. *
  389. * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
  390. */
  391. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  392. unsigned long to, unsigned long len)
  393. {
  394. unsigned long off;
  395. int flush;
  396. BUG_ON(gmap_is_shadow(gmap));
  397. if ((from | to | len) & (PMD_SIZE - 1))
  398. return -EINVAL;
  399. if (len == 0 || from + len < from || to + len < to ||
  400. from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
  401. return -EINVAL;
  402. flush = 0;
  403. down_write(&gmap->mm->mmap_sem);
  404. for (off = 0; off < len; off += PMD_SIZE) {
  405. /* Remove old translation */
  406. flush |= __gmap_unmap_by_gaddr(gmap, to + off);
  407. /* Store new translation */
  408. if (radix_tree_insert(&gmap->guest_to_host,
  409. (to + off) >> PMD_SHIFT,
  410. (void *) from + off))
  411. break;
  412. }
  413. up_write(&gmap->mm->mmap_sem);
  414. if (flush)
  415. gmap_flush_tlb(gmap);
  416. if (off >= len)
  417. return 0;
  418. gmap_unmap_segment(gmap, to, len);
  419. return -ENOMEM;
  420. }
  421. EXPORT_SYMBOL_GPL(gmap_map_segment);
  422. /**
  423. * __gmap_translate - translate a guest address to a user space address
  424. * @gmap: pointer to guest mapping meta data structure
  425. * @gaddr: guest address
  426. *
  427. * Returns user space address which corresponds to the guest address or
  428. * -EFAULT if no such mapping exists.
  429. * This function does not establish potentially missing page table entries.
  430. * The mmap_sem of the mm that belongs to the address space must be held
  431. * when this function gets called.
  432. *
  433. * Note: Can also be called for shadow gmaps.
  434. */
  435. unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
  436. {
  437. unsigned long vmaddr;
  438. vmaddr = (unsigned long)
  439. radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
  440. /* Note: guest_to_host is empty for a shadow gmap */
  441. return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
  442. }
  443. EXPORT_SYMBOL_GPL(__gmap_translate);
  444. /**
  445. * gmap_translate - translate a guest address to a user space address
  446. * @gmap: pointer to guest mapping meta data structure
  447. * @gaddr: guest address
  448. *
  449. * Returns user space address which corresponds to the guest address or
  450. * -EFAULT if no such mapping exists.
  451. * This function does not establish potentially missing page table entries.
  452. */
  453. unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
  454. {
  455. unsigned long rc;
  456. down_read(&gmap->mm->mmap_sem);
  457. rc = __gmap_translate(gmap, gaddr);
  458. up_read(&gmap->mm->mmap_sem);
  459. return rc;
  460. }
  461. EXPORT_SYMBOL_GPL(gmap_translate);
  462. /**
  463. * gmap_unlink - disconnect a page table from the gmap shadow tables
  464. * @gmap: pointer to guest mapping meta data structure
  465. * @table: pointer to the host page table
  466. * @vmaddr: vm address associated with the host page table
  467. */
  468. void gmap_unlink(struct mm_struct *mm, unsigned long *table,
  469. unsigned long vmaddr)
  470. {
  471. struct gmap *gmap;
  472. int flush;
  473. rcu_read_lock();
  474. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  475. flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
  476. if (flush)
  477. gmap_flush_tlb(gmap);
  478. }
  479. rcu_read_unlock();
  480. }
  481. static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
  482. unsigned long gaddr);
  483. /**
  484. * gmap_link - set up shadow page tables to connect a host to a guest address
  485. * @gmap: pointer to guest mapping meta data structure
  486. * @gaddr: guest address
  487. * @vmaddr: vm address
  488. *
  489. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  490. * if the vm address is already mapped to a different guest segment.
  491. * The mmap_sem of the mm that belongs to the address space must be held
  492. * when this function gets called.
  493. */
  494. int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
  495. {
  496. struct mm_struct *mm;
  497. unsigned long *table;
  498. spinlock_t *ptl;
  499. pgd_t *pgd;
  500. p4d_t *p4d;
  501. pud_t *pud;
  502. pmd_t *pmd;
  503. u64 unprot;
  504. int rc;
  505. BUG_ON(gmap_is_shadow(gmap));
  506. /* Create higher level tables in the gmap page table */
  507. table = gmap->table;
  508. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
  509. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  510. if ((*table & _REGION_ENTRY_INVALID) &&
  511. gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
  512. gaddr & _REGION1_MASK))
  513. return -ENOMEM;
  514. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  515. }
  516. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
  517. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  518. if ((*table & _REGION_ENTRY_INVALID) &&
  519. gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
  520. gaddr & _REGION2_MASK))
  521. return -ENOMEM;
  522. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  523. }
  524. if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
  525. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  526. if ((*table & _REGION_ENTRY_INVALID) &&
  527. gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
  528. gaddr & _REGION3_MASK))
  529. return -ENOMEM;
  530. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  531. }
  532. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  533. /* Walk the parent mm page table */
  534. mm = gmap->mm;
  535. pgd = pgd_offset(mm, vmaddr);
  536. VM_BUG_ON(pgd_none(*pgd));
  537. p4d = p4d_offset(pgd, vmaddr);
  538. VM_BUG_ON(p4d_none(*p4d));
  539. pud = pud_offset(p4d, vmaddr);
  540. VM_BUG_ON(pud_none(*pud));
  541. /* large puds cannot yet be handled */
  542. if (pud_large(*pud))
  543. return -EFAULT;
  544. pmd = pmd_offset(pud, vmaddr);
  545. VM_BUG_ON(pmd_none(*pmd));
  546. /* Are we allowed to use huge pages? */
  547. if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
  548. return -EFAULT;
  549. /* Link gmap segment table entry location to page table. */
  550. rc = radix_tree_preload(GFP_KERNEL);
  551. if (rc)
  552. return rc;
  553. ptl = pmd_lock(mm, pmd);
  554. spin_lock(&gmap->guest_table_lock);
  555. if (*table == _SEGMENT_ENTRY_EMPTY) {
  556. rc = radix_tree_insert(&gmap->host_to_guest,
  557. vmaddr >> PMD_SHIFT, table);
  558. if (!rc) {
  559. if (pmd_large(*pmd)) {
  560. *table = (pmd_val(*pmd) &
  561. _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
  562. | _SEGMENT_ENTRY_GMAP_UC;
  563. } else
  564. *table = pmd_val(*pmd) &
  565. _SEGMENT_ENTRY_HARDWARE_BITS;
  566. }
  567. } else if (*table & _SEGMENT_ENTRY_PROTECT &&
  568. !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
  569. unprot = (u64)*table;
  570. unprot &= ~_SEGMENT_ENTRY_PROTECT;
  571. unprot |= _SEGMENT_ENTRY_GMAP_UC;
  572. gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
  573. }
  574. spin_unlock(&gmap->guest_table_lock);
  575. spin_unlock(ptl);
  576. radix_tree_preload_end();
  577. return rc;
  578. }
  579. /**
  580. * gmap_fault - resolve a fault on a guest address
  581. * @gmap: pointer to guest mapping meta data structure
  582. * @gaddr: guest address
  583. * @fault_flags: flags to pass down to handle_mm_fault()
  584. *
  585. * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
  586. * if the vm address is already mapped to a different guest segment.
  587. */
  588. int gmap_fault(struct gmap *gmap, unsigned long gaddr,
  589. unsigned int fault_flags)
  590. {
  591. unsigned long vmaddr;
  592. int rc;
  593. bool unlocked;
  594. down_read(&gmap->mm->mmap_sem);
  595. retry:
  596. unlocked = false;
  597. vmaddr = __gmap_translate(gmap, gaddr);
  598. if (IS_ERR_VALUE(vmaddr)) {
  599. rc = vmaddr;
  600. goto out_up;
  601. }
  602. if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
  603. &unlocked)) {
  604. rc = -EFAULT;
  605. goto out_up;
  606. }
  607. /*
  608. * In the case that fixup_user_fault unlocked the mmap_sem during
  609. * faultin redo __gmap_translate to not race with a map/unmap_segment.
  610. */
  611. if (unlocked)
  612. goto retry;
  613. rc = __gmap_link(gmap, gaddr, vmaddr);
  614. out_up:
  615. up_read(&gmap->mm->mmap_sem);
  616. return rc;
  617. }
  618. EXPORT_SYMBOL_GPL(gmap_fault);
  619. /*
  620. * this function is assumed to be called with mmap_sem held
  621. */
  622. void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
  623. {
  624. unsigned long vmaddr;
  625. spinlock_t *ptl;
  626. pte_t *ptep;
  627. /* Find the vm address for the guest address */
  628. vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
  629. gaddr >> PMD_SHIFT);
  630. if (vmaddr) {
  631. vmaddr |= gaddr & ~PMD_MASK;
  632. /* Get pointer to the page table entry */
  633. ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
  634. if (likely(ptep))
  635. ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
  636. pte_unmap_unlock(ptep, ptl);
  637. }
  638. }
  639. EXPORT_SYMBOL_GPL(__gmap_zap);
  640. void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
  641. {
  642. unsigned long gaddr, vmaddr, size;
  643. struct vm_area_struct *vma;
  644. down_read(&gmap->mm->mmap_sem);
  645. for (gaddr = from; gaddr < to;
  646. gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
  647. /* Find the vm address for the guest address */
  648. vmaddr = (unsigned long)
  649. radix_tree_lookup(&gmap->guest_to_host,
  650. gaddr >> PMD_SHIFT);
  651. if (!vmaddr)
  652. continue;
  653. vmaddr |= gaddr & ~PMD_MASK;
  654. /* Find vma in the parent mm */
  655. vma = find_vma(gmap->mm, vmaddr);
  656. if (!vma)
  657. continue;
  658. /*
  659. * We do not discard pages that are backed by
  660. * hugetlbfs, so we don't have to refault them.
  661. */
  662. if (is_vm_hugetlb_page(vma))
  663. continue;
  664. size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
  665. zap_page_range(vma, vmaddr, size);
  666. }
  667. up_read(&gmap->mm->mmap_sem);
  668. }
  669. EXPORT_SYMBOL_GPL(gmap_discard);
  670. static LIST_HEAD(gmap_notifier_list);
  671. static DEFINE_SPINLOCK(gmap_notifier_lock);
  672. /**
  673. * gmap_register_pte_notifier - register a pte invalidation callback
  674. * @nb: pointer to the gmap notifier block
  675. */
  676. void gmap_register_pte_notifier(struct gmap_notifier *nb)
  677. {
  678. spin_lock(&gmap_notifier_lock);
  679. list_add_rcu(&nb->list, &gmap_notifier_list);
  680. spin_unlock(&gmap_notifier_lock);
  681. }
  682. EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
  683. /**
  684. * gmap_unregister_pte_notifier - remove a pte invalidation callback
  685. * @nb: pointer to the gmap notifier block
  686. */
  687. void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
  688. {
  689. spin_lock(&gmap_notifier_lock);
  690. list_del_rcu(&nb->list);
  691. spin_unlock(&gmap_notifier_lock);
  692. synchronize_rcu();
  693. }
  694. EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
  695. /**
  696. * gmap_call_notifier - call all registered invalidation callbacks
  697. * @gmap: pointer to guest mapping meta data structure
  698. * @start: start virtual address in the guest address space
  699. * @end: end virtual address in the guest address space
  700. */
  701. static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  702. unsigned long end)
  703. {
  704. struct gmap_notifier *nb;
  705. list_for_each_entry(nb, &gmap_notifier_list, list)
  706. nb->notifier_call(gmap, start, end);
  707. }
  708. /**
  709. * gmap_table_walk - walk the gmap page tables
  710. * @gmap: pointer to guest mapping meta data structure
  711. * @gaddr: virtual address in the guest address space
  712. * @level: page table level to stop at
  713. *
  714. * Returns a table entry pointer for the given guest address and @level
  715. * @level=0 : returns a pointer to a page table table entry (or NULL)
  716. * @level=1 : returns a pointer to a segment table entry (or NULL)
  717. * @level=2 : returns a pointer to a region-3 table entry (or NULL)
  718. * @level=3 : returns a pointer to a region-2 table entry (or NULL)
  719. * @level=4 : returns a pointer to a region-1 table entry (or NULL)
  720. *
  721. * Returns NULL if the gmap page tables could not be walked to the
  722. * requested level.
  723. *
  724. * Note: Can also be called for shadow gmaps.
  725. */
  726. static inline unsigned long *gmap_table_walk(struct gmap *gmap,
  727. unsigned long gaddr, int level)
  728. {
  729. const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
  730. unsigned long *table;
  731. if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
  732. return NULL;
  733. if (gmap_is_shadow(gmap) && gmap->removed)
  734. return NULL;
  735. if (asce_type != _ASCE_TYPE_REGION1 &&
  736. gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
  737. return NULL;
  738. table = gmap->table;
  739. switch (gmap->asce & _ASCE_TYPE_MASK) {
  740. case _ASCE_TYPE_REGION1:
  741. table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
  742. if (level == 4)
  743. break;
  744. if (*table & _REGION_ENTRY_INVALID)
  745. return NULL;
  746. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  747. /* Fallthrough */
  748. case _ASCE_TYPE_REGION2:
  749. table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
  750. if (level == 3)
  751. break;
  752. if (*table & _REGION_ENTRY_INVALID)
  753. return NULL;
  754. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  755. /* Fallthrough */
  756. case _ASCE_TYPE_REGION3:
  757. table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
  758. if (level == 2)
  759. break;
  760. if (*table & _REGION_ENTRY_INVALID)
  761. return NULL;
  762. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  763. /* Fallthrough */
  764. case _ASCE_TYPE_SEGMENT:
  765. table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
  766. if (level == 1)
  767. break;
  768. if (*table & _REGION_ENTRY_INVALID)
  769. return NULL;
  770. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  771. table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
  772. }
  773. return table;
  774. }
  775. /**
  776. * gmap_pte_op_walk - walk the gmap page table, get the page table lock
  777. * and return the pte pointer
  778. * @gmap: pointer to guest mapping meta data structure
  779. * @gaddr: virtual address in the guest address space
  780. * @ptl: pointer to the spinlock pointer
  781. *
  782. * Returns a pointer to the locked pte for a guest address, or NULL
  783. */
  784. static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
  785. spinlock_t **ptl)
  786. {
  787. unsigned long *table;
  788. BUG_ON(gmap_is_shadow(gmap));
  789. /* Walk the gmap page table, lock and get pte pointer */
  790. table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
  791. if (!table || *table & _SEGMENT_ENTRY_INVALID)
  792. return NULL;
  793. return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
  794. }
  795. /**
  796. * gmap_pte_op_fixup - force a page in and connect the gmap page table
  797. * @gmap: pointer to guest mapping meta data structure
  798. * @gaddr: virtual address in the guest address space
  799. * @vmaddr: address in the host process address space
  800. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  801. *
  802. * Returns 0 if the caller can retry __gmap_translate (might fail again),
  803. * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
  804. * up or connecting the gmap page table.
  805. */
  806. static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
  807. unsigned long vmaddr, int prot)
  808. {
  809. struct mm_struct *mm = gmap->mm;
  810. unsigned int fault_flags;
  811. bool unlocked = false;
  812. BUG_ON(gmap_is_shadow(gmap));
  813. fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
  814. if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
  815. return -EFAULT;
  816. if (unlocked)
  817. /* lost mmap_sem, caller has to retry __gmap_translate */
  818. return 0;
  819. /* Connect the page tables */
  820. return __gmap_link(gmap, gaddr, vmaddr);
  821. }
  822. /**
  823. * gmap_pte_op_end - release the page table lock
  824. * @ptl: pointer to the spinlock pointer
  825. */
  826. static void gmap_pte_op_end(spinlock_t *ptl)
  827. {
  828. if (ptl)
  829. spin_unlock(ptl);
  830. }
  831. /**
  832. * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
  833. * and return the pmd pointer
  834. * @gmap: pointer to guest mapping meta data structure
  835. * @gaddr: virtual address in the guest address space
  836. *
  837. * Returns a pointer to the pmd for a guest address, or NULL
  838. */
  839. static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
  840. {
  841. pmd_t *pmdp;
  842. BUG_ON(gmap_is_shadow(gmap));
  843. spin_lock(&gmap->guest_table_lock);
  844. pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
  845. if (!pmdp || pmd_none(*pmdp)) {
  846. spin_unlock(&gmap->guest_table_lock);
  847. return NULL;
  848. }
  849. /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
  850. if (!pmd_large(*pmdp))
  851. spin_unlock(&gmap->guest_table_lock);
  852. return pmdp;
  853. }
  854. /**
  855. * gmap_pmd_op_end - release the guest_table_lock if needed
  856. * @gmap: pointer to the guest mapping meta data structure
  857. * @pmdp: pointer to the pmd
  858. */
  859. static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
  860. {
  861. if (pmd_large(*pmdp))
  862. spin_unlock(&gmap->guest_table_lock);
  863. }
  864. /*
  865. * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
  866. * @pmdp: pointer to the pmd to be protected
  867. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  868. * @bits: notification bits to set
  869. *
  870. * Returns:
  871. * 0 if successfully protected
  872. * -EAGAIN if a fixup is needed
  873. * -EINVAL if unsupported notifier bits have been specified
  874. *
  875. * Expected to be called with sg->mm->mmap_sem in read and
  876. * guest_table_lock held.
  877. */
  878. static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
  879. pmd_t *pmdp, int prot, unsigned long bits)
  880. {
  881. int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
  882. int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
  883. pmd_t new = *pmdp;
  884. /* Fixup needed */
  885. if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
  886. return -EAGAIN;
  887. if (prot == PROT_NONE && !pmd_i) {
  888. pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
  889. gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
  890. }
  891. if (prot == PROT_READ && !pmd_p) {
  892. pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
  893. pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
  894. gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
  895. }
  896. if (bits & GMAP_NOTIFY_MPROT)
  897. pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
  898. /* Shadow GMAP protection needs split PMDs */
  899. if (bits & GMAP_NOTIFY_SHADOW)
  900. return -EINVAL;
  901. return 0;
  902. }
  903. /*
  904. * gmap_protect_pte - remove access rights to memory and set pgste bits
  905. * @gmap: pointer to guest mapping meta data structure
  906. * @gaddr: virtual address in the guest address space
  907. * @pmdp: pointer to the pmd associated with the pte
  908. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  909. * @bits: notification bits to set
  910. *
  911. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  912. * -EAGAIN if a fixup is needed.
  913. *
  914. * Expected to be called with sg->mm->mmap_sem in read
  915. */
  916. static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
  917. pmd_t *pmdp, int prot, unsigned long bits)
  918. {
  919. int rc;
  920. pte_t *ptep;
  921. spinlock_t *ptl = NULL;
  922. unsigned long pbits = 0;
  923. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
  924. return -EAGAIN;
  925. ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
  926. if (!ptep)
  927. return -ENOMEM;
  928. pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
  929. pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
  930. /* Protect and unlock. */
  931. rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
  932. gmap_pte_op_end(ptl);
  933. return rc;
  934. }
  935. /*
  936. * gmap_protect_range - remove access rights to memory and set pgste bits
  937. * @gmap: pointer to guest mapping meta data structure
  938. * @gaddr: virtual address in the guest address space
  939. * @len: size of area
  940. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  941. * @bits: pgste notification bits to set
  942. *
  943. * Returns 0 if successfully protected, -ENOMEM if out of memory and
  944. * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
  945. *
  946. * Called with sg->mm->mmap_sem in read.
  947. */
  948. static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
  949. unsigned long len, int prot, unsigned long bits)
  950. {
  951. unsigned long vmaddr, dist;
  952. pmd_t *pmdp;
  953. int rc;
  954. BUG_ON(gmap_is_shadow(gmap));
  955. while (len) {
  956. rc = -EAGAIN;
  957. pmdp = gmap_pmd_op_walk(gmap, gaddr);
  958. if (pmdp) {
  959. if (!pmd_large(*pmdp)) {
  960. rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
  961. bits);
  962. if (!rc) {
  963. len -= PAGE_SIZE;
  964. gaddr += PAGE_SIZE;
  965. }
  966. } else {
  967. rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
  968. bits);
  969. if (!rc) {
  970. dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
  971. len = len < dist ? 0 : len - dist;
  972. gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
  973. }
  974. }
  975. gmap_pmd_op_end(gmap, pmdp);
  976. }
  977. if (rc) {
  978. if (rc == -EINVAL)
  979. return rc;
  980. /* -EAGAIN, fixup of userspace mm and gmap */
  981. vmaddr = __gmap_translate(gmap, gaddr);
  982. if (IS_ERR_VALUE(vmaddr))
  983. return vmaddr;
  984. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
  985. if (rc)
  986. return rc;
  987. }
  988. }
  989. return 0;
  990. }
  991. /**
  992. * gmap_mprotect_notify - change access rights for a range of ptes and
  993. * call the notifier if any pte changes again
  994. * @gmap: pointer to guest mapping meta data structure
  995. * @gaddr: virtual address in the guest address space
  996. * @len: size of area
  997. * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
  998. *
  999. * Returns 0 if for each page in the given range a gmap mapping exists,
  1000. * the new access rights could be set and the notifier could be armed.
  1001. * If the gmap mapping is missing for one or more pages -EFAULT is
  1002. * returned. If no memory could be allocated -ENOMEM is returned.
  1003. * This function establishes missing page table entries.
  1004. */
  1005. int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
  1006. unsigned long len, int prot)
  1007. {
  1008. int rc;
  1009. if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
  1010. return -EINVAL;
  1011. if (!MACHINE_HAS_ESOP && prot == PROT_READ)
  1012. return -EINVAL;
  1013. down_read(&gmap->mm->mmap_sem);
  1014. rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
  1015. up_read(&gmap->mm->mmap_sem);
  1016. return rc;
  1017. }
  1018. EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
  1019. /**
  1020. * gmap_read_table - get an unsigned long value from a guest page table using
  1021. * absolute addressing, without marking the page referenced.
  1022. * @gmap: pointer to guest mapping meta data structure
  1023. * @gaddr: virtual address in the guest address space
  1024. * @val: pointer to the unsigned long value to return
  1025. *
  1026. * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
  1027. * if reading using the virtual address failed. -EINVAL if called on a gmap
  1028. * shadow.
  1029. *
  1030. * Called with gmap->mm->mmap_sem in read.
  1031. */
  1032. int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
  1033. {
  1034. unsigned long address, vmaddr;
  1035. spinlock_t *ptl;
  1036. pte_t *ptep, pte;
  1037. int rc;
  1038. if (gmap_is_shadow(gmap))
  1039. return -EINVAL;
  1040. while (1) {
  1041. rc = -EAGAIN;
  1042. ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
  1043. if (ptep) {
  1044. pte = *ptep;
  1045. if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
  1046. address = pte_val(pte) & PAGE_MASK;
  1047. address += gaddr & ~PAGE_MASK;
  1048. *val = *(unsigned long *) address;
  1049. pte_val(*ptep) |= _PAGE_YOUNG;
  1050. /* Do *NOT* clear the _PAGE_INVALID bit! */
  1051. rc = 0;
  1052. }
  1053. gmap_pte_op_end(ptl);
  1054. }
  1055. if (!rc)
  1056. break;
  1057. vmaddr = __gmap_translate(gmap, gaddr);
  1058. if (IS_ERR_VALUE(vmaddr)) {
  1059. rc = vmaddr;
  1060. break;
  1061. }
  1062. rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
  1063. if (rc)
  1064. break;
  1065. }
  1066. return rc;
  1067. }
  1068. EXPORT_SYMBOL_GPL(gmap_read_table);
  1069. /**
  1070. * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
  1071. * @sg: pointer to the shadow guest address space structure
  1072. * @vmaddr: vm address associated with the rmap
  1073. * @rmap: pointer to the rmap structure
  1074. *
  1075. * Called with the sg->guest_table_lock
  1076. */
  1077. static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
  1078. struct gmap_rmap *rmap)
  1079. {
  1080. void __rcu **slot;
  1081. BUG_ON(!gmap_is_shadow(sg));
  1082. slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  1083. if (slot) {
  1084. rmap->next = radix_tree_deref_slot_protected(slot,
  1085. &sg->guest_table_lock);
  1086. radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
  1087. } else {
  1088. rmap->next = NULL;
  1089. radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
  1090. rmap);
  1091. }
  1092. }
  1093. /**
  1094. * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
  1095. * @sg: pointer to the shadow guest address space structure
  1096. * @raddr: rmap address in the shadow gmap
  1097. * @paddr: address in the parent guest address space
  1098. * @len: length of the memory area to protect
  1099. *
  1100. * Returns 0 if successfully protected and the rmap was created, -ENOMEM
  1101. * if out of memory and -EFAULT if paddr is invalid.
  1102. */
  1103. static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
  1104. unsigned long paddr, unsigned long len)
  1105. {
  1106. struct gmap *parent;
  1107. struct gmap_rmap *rmap;
  1108. unsigned long vmaddr;
  1109. spinlock_t *ptl;
  1110. pte_t *ptep;
  1111. int rc;
  1112. BUG_ON(!gmap_is_shadow(sg));
  1113. parent = sg->parent;
  1114. while (len) {
  1115. vmaddr = __gmap_translate(parent, paddr);
  1116. if (IS_ERR_VALUE(vmaddr))
  1117. return vmaddr;
  1118. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1119. if (!rmap)
  1120. return -ENOMEM;
  1121. rmap->raddr = raddr;
  1122. rc = radix_tree_preload(GFP_KERNEL);
  1123. if (rc) {
  1124. kfree(rmap);
  1125. return rc;
  1126. }
  1127. rc = -EAGAIN;
  1128. ptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1129. if (ptep) {
  1130. spin_lock(&sg->guest_table_lock);
  1131. rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
  1132. PGSTE_VSIE_BIT);
  1133. if (!rc)
  1134. gmap_insert_rmap(sg, vmaddr, rmap);
  1135. spin_unlock(&sg->guest_table_lock);
  1136. gmap_pte_op_end(ptl);
  1137. }
  1138. radix_tree_preload_end();
  1139. if (rc) {
  1140. kfree(rmap);
  1141. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
  1142. if (rc)
  1143. return rc;
  1144. continue;
  1145. }
  1146. paddr += PAGE_SIZE;
  1147. len -= PAGE_SIZE;
  1148. }
  1149. return 0;
  1150. }
  1151. #define _SHADOW_RMAP_MASK 0x7
  1152. #define _SHADOW_RMAP_REGION1 0x5
  1153. #define _SHADOW_RMAP_REGION2 0x4
  1154. #define _SHADOW_RMAP_REGION3 0x3
  1155. #define _SHADOW_RMAP_SEGMENT 0x2
  1156. #define _SHADOW_RMAP_PGTABLE 0x1
  1157. /**
  1158. * gmap_idte_one - invalidate a single region or segment table entry
  1159. * @asce: region or segment table *origin* + table-type bits
  1160. * @vaddr: virtual address to identify the table entry to flush
  1161. *
  1162. * The invalid bit of a single region or segment table entry is set
  1163. * and the associated TLB entries depending on the entry are flushed.
  1164. * The table-type of the @asce identifies the portion of the @vaddr
  1165. * that is used as the invalidation index.
  1166. */
  1167. static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
  1168. {
  1169. asm volatile(
  1170. " .insn rrf,0xb98e0000,%0,%1,0,0"
  1171. : : "a" (asce), "a" (vaddr) : "cc", "memory");
  1172. }
  1173. /**
  1174. * gmap_unshadow_page - remove a page from a shadow page table
  1175. * @sg: pointer to the shadow guest address space structure
  1176. * @raddr: rmap address in the shadow guest address space
  1177. *
  1178. * Called with the sg->guest_table_lock
  1179. */
  1180. static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
  1181. {
  1182. unsigned long *table;
  1183. BUG_ON(!gmap_is_shadow(sg));
  1184. table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
  1185. if (!table || *table & _PAGE_INVALID)
  1186. return;
  1187. gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
  1188. ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
  1189. }
  1190. /**
  1191. * __gmap_unshadow_pgt - remove all entries from a shadow page table
  1192. * @sg: pointer to the shadow guest address space structure
  1193. * @raddr: rmap address in the shadow guest address space
  1194. * @pgt: pointer to the start of a shadow page table
  1195. *
  1196. * Called with the sg->guest_table_lock
  1197. */
  1198. static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
  1199. unsigned long *pgt)
  1200. {
  1201. int i;
  1202. BUG_ON(!gmap_is_shadow(sg));
  1203. for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
  1204. pgt[i] = _PAGE_INVALID;
  1205. }
  1206. /**
  1207. * gmap_unshadow_pgt - remove a shadow page table from a segment entry
  1208. * @sg: pointer to the shadow guest address space structure
  1209. * @raddr: address in the shadow guest address space
  1210. *
  1211. * Called with the sg->guest_table_lock
  1212. */
  1213. static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
  1214. {
  1215. unsigned long sto, *ste, *pgt;
  1216. struct page *page;
  1217. BUG_ON(!gmap_is_shadow(sg));
  1218. ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
  1219. if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
  1220. return;
  1221. gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
  1222. sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
  1223. gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
  1224. pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
  1225. *ste = _SEGMENT_ENTRY_EMPTY;
  1226. __gmap_unshadow_pgt(sg, raddr, pgt);
  1227. /* Free page table */
  1228. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1229. list_del(&page->lru);
  1230. page_table_free_pgste(page);
  1231. }
  1232. /**
  1233. * __gmap_unshadow_sgt - remove all entries from a shadow segment table
  1234. * @sg: pointer to the shadow guest address space structure
  1235. * @raddr: rmap address in the shadow guest address space
  1236. * @sgt: pointer to the start of a shadow segment table
  1237. *
  1238. * Called with the sg->guest_table_lock
  1239. */
  1240. static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
  1241. unsigned long *sgt)
  1242. {
  1243. unsigned long *pgt;
  1244. struct page *page;
  1245. int i;
  1246. BUG_ON(!gmap_is_shadow(sg));
  1247. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
  1248. if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
  1249. continue;
  1250. pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
  1251. sgt[i] = _SEGMENT_ENTRY_EMPTY;
  1252. __gmap_unshadow_pgt(sg, raddr, pgt);
  1253. /* Free page table */
  1254. page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
  1255. list_del(&page->lru);
  1256. page_table_free_pgste(page);
  1257. }
  1258. }
  1259. /**
  1260. * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
  1261. * @sg: pointer to the shadow guest address space structure
  1262. * @raddr: rmap address in the shadow guest address space
  1263. *
  1264. * Called with the shadow->guest_table_lock
  1265. */
  1266. static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
  1267. {
  1268. unsigned long r3o, *r3e, *sgt;
  1269. struct page *page;
  1270. BUG_ON(!gmap_is_shadow(sg));
  1271. r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
  1272. if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
  1273. return;
  1274. gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
  1275. r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
  1276. gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
  1277. sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
  1278. *r3e = _REGION3_ENTRY_EMPTY;
  1279. __gmap_unshadow_sgt(sg, raddr, sgt);
  1280. /* Free segment table */
  1281. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1282. list_del(&page->lru);
  1283. __free_pages(page, CRST_ALLOC_ORDER);
  1284. }
  1285. /**
  1286. * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
  1287. * @sg: pointer to the shadow guest address space structure
  1288. * @raddr: address in the shadow guest address space
  1289. * @r3t: pointer to the start of a shadow region-3 table
  1290. *
  1291. * Called with the sg->guest_table_lock
  1292. */
  1293. static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
  1294. unsigned long *r3t)
  1295. {
  1296. unsigned long *sgt;
  1297. struct page *page;
  1298. int i;
  1299. BUG_ON(!gmap_is_shadow(sg));
  1300. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
  1301. if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
  1302. continue;
  1303. sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
  1304. r3t[i] = _REGION3_ENTRY_EMPTY;
  1305. __gmap_unshadow_sgt(sg, raddr, sgt);
  1306. /* Free segment table */
  1307. page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
  1308. list_del(&page->lru);
  1309. __free_pages(page, CRST_ALLOC_ORDER);
  1310. }
  1311. }
  1312. /**
  1313. * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
  1314. * @sg: pointer to the shadow guest address space structure
  1315. * @raddr: rmap address in the shadow guest address space
  1316. *
  1317. * Called with the sg->guest_table_lock
  1318. */
  1319. static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
  1320. {
  1321. unsigned long r2o, *r2e, *r3t;
  1322. struct page *page;
  1323. BUG_ON(!gmap_is_shadow(sg));
  1324. r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
  1325. if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
  1326. return;
  1327. gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
  1328. r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
  1329. gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
  1330. r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
  1331. *r2e = _REGION2_ENTRY_EMPTY;
  1332. __gmap_unshadow_r3t(sg, raddr, r3t);
  1333. /* Free region 3 table */
  1334. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1335. list_del(&page->lru);
  1336. __free_pages(page, CRST_ALLOC_ORDER);
  1337. }
  1338. /**
  1339. * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
  1340. * @sg: pointer to the shadow guest address space structure
  1341. * @raddr: rmap address in the shadow guest address space
  1342. * @r2t: pointer to the start of a shadow region-2 table
  1343. *
  1344. * Called with the sg->guest_table_lock
  1345. */
  1346. static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
  1347. unsigned long *r2t)
  1348. {
  1349. unsigned long *r3t;
  1350. struct page *page;
  1351. int i;
  1352. BUG_ON(!gmap_is_shadow(sg));
  1353. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
  1354. if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
  1355. continue;
  1356. r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
  1357. r2t[i] = _REGION2_ENTRY_EMPTY;
  1358. __gmap_unshadow_r3t(sg, raddr, r3t);
  1359. /* Free region 3 table */
  1360. page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
  1361. list_del(&page->lru);
  1362. __free_pages(page, CRST_ALLOC_ORDER);
  1363. }
  1364. }
  1365. /**
  1366. * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
  1367. * @sg: pointer to the shadow guest address space structure
  1368. * @raddr: rmap address in the shadow guest address space
  1369. *
  1370. * Called with the sg->guest_table_lock
  1371. */
  1372. static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
  1373. {
  1374. unsigned long r1o, *r1e, *r2t;
  1375. struct page *page;
  1376. BUG_ON(!gmap_is_shadow(sg));
  1377. r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
  1378. if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
  1379. return;
  1380. gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
  1381. r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
  1382. gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
  1383. r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
  1384. *r1e = _REGION1_ENTRY_EMPTY;
  1385. __gmap_unshadow_r2t(sg, raddr, r2t);
  1386. /* Free region 2 table */
  1387. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1388. list_del(&page->lru);
  1389. __free_pages(page, CRST_ALLOC_ORDER);
  1390. }
  1391. /**
  1392. * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
  1393. * @sg: pointer to the shadow guest address space structure
  1394. * @raddr: rmap address in the shadow guest address space
  1395. * @r1t: pointer to the start of a shadow region-1 table
  1396. *
  1397. * Called with the shadow->guest_table_lock
  1398. */
  1399. static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
  1400. unsigned long *r1t)
  1401. {
  1402. unsigned long asce, *r2t;
  1403. struct page *page;
  1404. int i;
  1405. BUG_ON(!gmap_is_shadow(sg));
  1406. asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
  1407. for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
  1408. if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
  1409. continue;
  1410. r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
  1411. __gmap_unshadow_r2t(sg, raddr, r2t);
  1412. /* Clear entry and flush translation r1t -> r2t */
  1413. gmap_idte_one(asce, raddr);
  1414. r1t[i] = _REGION1_ENTRY_EMPTY;
  1415. /* Free region 2 table */
  1416. page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
  1417. list_del(&page->lru);
  1418. __free_pages(page, CRST_ALLOC_ORDER);
  1419. }
  1420. }
  1421. /**
  1422. * gmap_unshadow - remove a shadow page table completely
  1423. * @sg: pointer to the shadow guest address space structure
  1424. *
  1425. * Called with sg->guest_table_lock
  1426. */
  1427. static void gmap_unshadow(struct gmap *sg)
  1428. {
  1429. unsigned long *table;
  1430. BUG_ON(!gmap_is_shadow(sg));
  1431. if (sg->removed)
  1432. return;
  1433. sg->removed = 1;
  1434. gmap_call_notifier(sg, 0, -1UL);
  1435. gmap_flush_tlb(sg);
  1436. table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
  1437. switch (sg->asce & _ASCE_TYPE_MASK) {
  1438. case _ASCE_TYPE_REGION1:
  1439. __gmap_unshadow_r1t(sg, 0, table);
  1440. break;
  1441. case _ASCE_TYPE_REGION2:
  1442. __gmap_unshadow_r2t(sg, 0, table);
  1443. break;
  1444. case _ASCE_TYPE_REGION3:
  1445. __gmap_unshadow_r3t(sg, 0, table);
  1446. break;
  1447. case _ASCE_TYPE_SEGMENT:
  1448. __gmap_unshadow_sgt(sg, 0, table);
  1449. break;
  1450. }
  1451. }
  1452. /**
  1453. * gmap_find_shadow - find a specific asce in the list of shadow tables
  1454. * @parent: pointer to the parent gmap
  1455. * @asce: ASCE for which the shadow table is created
  1456. * @edat_level: edat level to be used for the shadow translation
  1457. *
  1458. * Returns the pointer to a gmap if a shadow table with the given asce is
  1459. * already available, ERR_PTR(-EAGAIN) if another one is just being created,
  1460. * otherwise NULL
  1461. */
  1462. static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
  1463. int edat_level)
  1464. {
  1465. struct gmap *sg;
  1466. list_for_each_entry(sg, &parent->children, list) {
  1467. if (sg->orig_asce != asce || sg->edat_level != edat_level ||
  1468. sg->removed)
  1469. continue;
  1470. if (!sg->initialized)
  1471. return ERR_PTR(-EAGAIN);
  1472. atomic_inc(&sg->ref_count);
  1473. return sg;
  1474. }
  1475. return NULL;
  1476. }
  1477. /**
  1478. * gmap_shadow_valid - check if a shadow guest address space matches the
  1479. * given properties and is still valid
  1480. * @sg: pointer to the shadow guest address space structure
  1481. * @asce: ASCE for which the shadow table is requested
  1482. * @edat_level: edat level to be used for the shadow translation
  1483. *
  1484. * Returns 1 if the gmap shadow is still valid and matches the given
  1485. * properties, the caller can continue using it. Returns 0 otherwise, the
  1486. * caller has to request a new shadow gmap in this case.
  1487. *
  1488. */
  1489. int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
  1490. {
  1491. if (sg->removed)
  1492. return 0;
  1493. return sg->orig_asce == asce && sg->edat_level == edat_level;
  1494. }
  1495. EXPORT_SYMBOL_GPL(gmap_shadow_valid);
  1496. /**
  1497. * gmap_shadow - create/find a shadow guest address space
  1498. * @parent: pointer to the parent gmap
  1499. * @asce: ASCE for which the shadow table is created
  1500. * @edat_level: edat level to be used for the shadow translation
  1501. *
  1502. * The pages of the top level page table referred by the asce parameter
  1503. * will be set to read-only and marked in the PGSTEs of the kvm process.
  1504. * The shadow table will be removed automatically on any change to the
  1505. * PTE mapping for the source table.
  1506. *
  1507. * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
  1508. * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
  1509. * parent gmap table could not be protected.
  1510. */
  1511. struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
  1512. int edat_level)
  1513. {
  1514. struct gmap *sg, *new;
  1515. unsigned long limit;
  1516. int rc;
  1517. BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
  1518. BUG_ON(gmap_is_shadow(parent));
  1519. spin_lock(&parent->shadow_lock);
  1520. sg = gmap_find_shadow(parent, asce, edat_level);
  1521. spin_unlock(&parent->shadow_lock);
  1522. if (sg)
  1523. return sg;
  1524. /* Create a new shadow gmap */
  1525. limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
  1526. if (asce & _ASCE_REAL_SPACE)
  1527. limit = -1UL;
  1528. new = gmap_alloc(limit);
  1529. if (!new)
  1530. return ERR_PTR(-ENOMEM);
  1531. new->mm = parent->mm;
  1532. new->parent = gmap_get(parent);
  1533. new->orig_asce = asce;
  1534. new->edat_level = edat_level;
  1535. new->initialized = false;
  1536. spin_lock(&parent->shadow_lock);
  1537. /* Recheck if another CPU created the same shadow */
  1538. sg = gmap_find_shadow(parent, asce, edat_level);
  1539. if (sg) {
  1540. spin_unlock(&parent->shadow_lock);
  1541. gmap_free(new);
  1542. return sg;
  1543. }
  1544. if (asce & _ASCE_REAL_SPACE) {
  1545. /* only allow one real-space gmap shadow */
  1546. list_for_each_entry(sg, &parent->children, list) {
  1547. if (sg->orig_asce & _ASCE_REAL_SPACE) {
  1548. spin_lock(&sg->guest_table_lock);
  1549. gmap_unshadow(sg);
  1550. spin_unlock(&sg->guest_table_lock);
  1551. list_del(&sg->list);
  1552. gmap_put(sg);
  1553. break;
  1554. }
  1555. }
  1556. }
  1557. atomic_set(&new->ref_count, 2);
  1558. list_add(&new->list, &parent->children);
  1559. if (asce & _ASCE_REAL_SPACE) {
  1560. /* nothing to protect, return right away */
  1561. new->initialized = true;
  1562. spin_unlock(&parent->shadow_lock);
  1563. return new;
  1564. }
  1565. spin_unlock(&parent->shadow_lock);
  1566. /* protect after insertion, so it will get properly invalidated */
  1567. down_read(&parent->mm->mmap_sem);
  1568. rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
  1569. ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
  1570. PROT_READ, GMAP_NOTIFY_SHADOW);
  1571. up_read(&parent->mm->mmap_sem);
  1572. spin_lock(&parent->shadow_lock);
  1573. new->initialized = true;
  1574. if (rc) {
  1575. list_del(&new->list);
  1576. gmap_free(new);
  1577. new = ERR_PTR(rc);
  1578. }
  1579. spin_unlock(&parent->shadow_lock);
  1580. return new;
  1581. }
  1582. EXPORT_SYMBOL_GPL(gmap_shadow);
  1583. /**
  1584. * gmap_shadow_r2t - create an empty shadow region 2 table
  1585. * @sg: pointer to the shadow guest address space structure
  1586. * @saddr: faulting address in the shadow gmap
  1587. * @r2t: parent gmap address of the region 2 table to get shadowed
  1588. * @fake: r2t references contiguous guest memory block, not a r2t
  1589. *
  1590. * The r2t parameter specifies the address of the source table. The
  1591. * four pages of the source table are made read-only in the parent gmap
  1592. * address space. A write to the source table area @r2t will automatically
  1593. * remove the shadow r2 table and all of its decendents.
  1594. *
  1595. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1596. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1597. * -EFAULT if an address in the parent gmap could not be resolved.
  1598. *
  1599. * Called with sg->mm->mmap_sem in read.
  1600. */
  1601. int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
  1602. int fake)
  1603. {
  1604. unsigned long raddr, origin, offset, len;
  1605. unsigned long *s_r2t, *table;
  1606. struct page *page;
  1607. int rc;
  1608. BUG_ON(!gmap_is_shadow(sg));
  1609. /* Allocate a shadow region second table */
  1610. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1611. if (!page)
  1612. return -ENOMEM;
  1613. page->index = r2t & _REGION_ENTRY_ORIGIN;
  1614. if (fake)
  1615. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1616. s_r2t = (unsigned long *) page_to_phys(page);
  1617. /* Install shadow region second table */
  1618. spin_lock(&sg->guest_table_lock);
  1619. table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
  1620. if (!table) {
  1621. rc = -EAGAIN; /* Race with unshadow */
  1622. goto out_free;
  1623. }
  1624. if (!(*table & _REGION_ENTRY_INVALID)) {
  1625. rc = 0; /* Already established */
  1626. goto out_free;
  1627. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1628. rc = -EAGAIN; /* Race with shadow */
  1629. goto out_free;
  1630. }
  1631. crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
  1632. /* mark as invalid as long as the parent table is not protected */
  1633. *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
  1634. _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
  1635. if (sg->edat_level >= 1)
  1636. *table |= (r2t & _REGION_ENTRY_PROTECT);
  1637. list_add(&page->lru, &sg->crst_list);
  1638. if (fake) {
  1639. /* nothing to protect for fake tables */
  1640. *table &= ~_REGION_ENTRY_INVALID;
  1641. spin_unlock(&sg->guest_table_lock);
  1642. return 0;
  1643. }
  1644. spin_unlock(&sg->guest_table_lock);
  1645. /* Make r2t read-only in parent gmap page table */
  1646. raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
  1647. origin = r2t & _REGION_ENTRY_ORIGIN;
  1648. offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1649. len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1650. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1651. spin_lock(&sg->guest_table_lock);
  1652. if (!rc) {
  1653. table = gmap_table_walk(sg, saddr, 4);
  1654. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1655. (unsigned long) s_r2t)
  1656. rc = -EAGAIN; /* Race with unshadow */
  1657. else
  1658. *table &= ~_REGION_ENTRY_INVALID;
  1659. } else {
  1660. gmap_unshadow_r2t(sg, raddr);
  1661. }
  1662. spin_unlock(&sg->guest_table_lock);
  1663. return rc;
  1664. out_free:
  1665. spin_unlock(&sg->guest_table_lock);
  1666. __free_pages(page, CRST_ALLOC_ORDER);
  1667. return rc;
  1668. }
  1669. EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
  1670. /**
  1671. * gmap_shadow_r3t - create a shadow region 3 table
  1672. * @sg: pointer to the shadow guest address space structure
  1673. * @saddr: faulting address in the shadow gmap
  1674. * @r3t: parent gmap address of the region 3 table to get shadowed
  1675. * @fake: r3t references contiguous guest memory block, not a r3t
  1676. *
  1677. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1678. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1679. * -EFAULT if an address in the parent gmap could not be resolved.
  1680. *
  1681. * Called with sg->mm->mmap_sem in read.
  1682. */
  1683. int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
  1684. int fake)
  1685. {
  1686. unsigned long raddr, origin, offset, len;
  1687. unsigned long *s_r3t, *table;
  1688. struct page *page;
  1689. int rc;
  1690. BUG_ON(!gmap_is_shadow(sg));
  1691. /* Allocate a shadow region second table */
  1692. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1693. if (!page)
  1694. return -ENOMEM;
  1695. page->index = r3t & _REGION_ENTRY_ORIGIN;
  1696. if (fake)
  1697. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1698. s_r3t = (unsigned long *) page_to_phys(page);
  1699. /* Install shadow region second table */
  1700. spin_lock(&sg->guest_table_lock);
  1701. table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
  1702. if (!table) {
  1703. rc = -EAGAIN; /* Race with unshadow */
  1704. goto out_free;
  1705. }
  1706. if (!(*table & _REGION_ENTRY_INVALID)) {
  1707. rc = 0; /* Already established */
  1708. goto out_free;
  1709. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1710. rc = -EAGAIN; /* Race with shadow */
  1711. goto out_free;
  1712. }
  1713. crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
  1714. /* mark as invalid as long as the parent table is not protected */
  1715. *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
  1716. _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
  1717. if (sg->edat_level >= 1)
  1718. *table |= (r3t & _REGION_ENTRY_PROTECT);
  1719. list_add(&page->lru, &sg->crst_list);
  1720. if (fake) {
  1721. /* nothing to protect for fake tables */
  1722. *table &= ~_REGION_ENTRY_INVALID;
  1723. spin_unlock(&sg->guest_table_lock);
  1724. return 0;
  1725. }
  1726. spin_unlock(&sg->guest_table_lock);
  1727. /* Make r3t read-only in parent gmap page table */
  1728. raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
  1729. origin = r3t & _REGION_ENTRY_ORIGIN;
  1730. offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1731. len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1732. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1733. spin_lock(&sg->guest_table_lock);
  1734. if (!rc) {
  1735. table = gmap_table_walk(sg, saddr, 3);
  1736. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1737. (unsigned long) s_r3t)
  1738. rc = -EAGAIN; /* Race with unshadow */
  1739. else
  1740. *table &= ~_REGION_ENTRY_INVALID;
  1741. } else {
  1742. gmap_unshadow_r3t(sg, raddr);
  1743. }
  1744. spin_unlock(&sg->guest_table_lock);
  1745. return rc;
  1746. out_free:
  1747. spin_unlock(&sg->guest_table_lock);
  1748. __free_pages(page, CRST_ALLOC_ORDER);
  1749. return rc;
  1750. }
  1751. EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
  1752. /**
  1753. * gmap_shadow_sgt - create a shadow segment table
  1754. * @sg: pointer to the shadow guest address space structure
  1755. * @saddr: faulting address in the shadow gmap
  1756. * @sgt: parent gmap address of the segment table to get shadowed
  1757. * @fake: sgt references contiguous guest memory block, not a sgt
  1758. *
  1759. * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1760. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1761. * -EFAULT if an address in the parent gmap could not be resolved.
  1762. *
  1763. * Called with sg->mm->mmap_sem in read.
  1764. */
  1765. int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
  1766. int fake)
  1767. {
  1768. unsigned long raddr, origin, offset, len;
  1769. unsigned long *s_sgt, *table;
  1770. struct page *page;
  1771. int rc;
  1772. BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
  1773. /* Allocate a shadow segment table */
  1774. page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
  1775. if (!page)
  1776. return -ENOMEM;
  1777. page->index = sgt & _REGION_ENTRY_ORIGIN;
  1778. if (fake)
  1779. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1780. s_sgt = (unsigned long *) page_to_phys(page);
  1781. /* Install shadow region second table */
  1782. spin_lock(&sg->guest_table_lock);
  1783. table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
  1784. if (!table) {
  1785. rc = -EAGAIN; /* Race with unshadow */
  1786. goto out_free;
  1787. }
  1788. if (!(*table & _REGION_ENTRY_INVALID)) {
  1789. rc = 0; /* Already established */
  1790. goto out_free;
  1791. } else if (*table & _REGION_ENTRY_ORIGIN) {
  1792. rc = -EAGAIN; /* Race with shadow */
  1793. goto out_free;
  1794. }
  1795. crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
  1796. /* mark as invalid as long as the parent table is not protected */
  1797. *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
  1798. _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
  1799. if (sg->edat_level >= 1)
  1800. *table |= sgt & _REGION_ENTRY_PROTECT;
  1801. list_add(&page->lru, &sg->crst_list);
  1802. if (fake) {
  1803. /* nothing to protect for fake tables */
  1804. *table &= ~_REGION_ENTRY_INVALID;
  1805. spin_unlock(&sg->guest_table_lock);
  1806. return 0;
  1807. }
  1808. spin_unlock(&sg->guest_table_lock);
  1809. /* Make sgt read-only in parent gmap page table */
  1810. raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
  1811. origin = sgt & _REGION_ENTRY_ORIGIN;
  1812. offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
  1813. len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
  1814. rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
  1815. spin_lock(&sg->guest_table_lock);
  1816. if (!rc) {
  1817. table = gmap_table_walk(sg, saddr, 2);
  1818. if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
  1819. (unsigned long) s_sgt)
  1820. rc = -EAGAIN; /* Race with unshadow */
  1821. else
  1822. *table &= ~_REGION_ENTRY_INVALID;
  1823. } else {
  1824. gmap_unshadow_sgt(sg, raddr);
  1825. }
  1826. spin_unlock(&sg->guest_table_lock);
  1827. return rc;
  1828. out_free:
  1829. spin_unlock(&sg->guest_table_lock);
  1830. __free_pages(page, CRST_ALLOC_ORDER);
  1831. return rc;
  1832. }
  1833. EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
  1834. /**
  1835. * gmap_shadow_lookup_pgtable - find a shadow page table
  1836. * @sg: pointer to the shadow guest address space structure
  1837. * @saddr: the address in the shadow aguest address space
  1838. * @pgt: parent gmap address of the page table to get shadowed
  1839. * @dat_protection: if the pgtable is marked as protected by dat
  1840. * @fake: pgt references contiguous guest memory block, not a pgtable
  1841. *
  1842. * Returns 0 if the shadow page table was found and -EAGAIN if the page
  1843. * table was not found.
  1844. *
  1845. * Called with sg->mm->mmap_sem in read.
  1846. */
  1847. int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
  1848. unsigned long *pgt, int *dat_protection,
  1849. int *fake)
  1850. {
  1851. unsigned long *table;
  1852. struct page *page;
  1853. int rc;
  1854. BUG_ON(!gmap_is_shadow(sg));
  1855. spin_lock(&sg->guest_table_lock);
  1856. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1857. if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
  1858. /* Shadow page tables are full pages (pte+pgste) */
  1859. page = pfn_to_page(*table >> PAGE_SHIFT);
  1860. *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
  1861. *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
  1862. *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
  1863. rc = 0;
  1864. } else {
  1865. rc = -EAGAIN;
  1866. }
  1867. spin_unlock(&sg->guest_table_lock);
  1868. return rc;
  1869. }
  1870. EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
  1871. /**
  1872. * gmap_shadow_pgt - instantiate a shadow page table
  1873. * @sg: pointer to the shadow guest address space structure
  1874. * @saddr: faulting address in the shadow gmap
  1875. * @pgt: parent gmap address of the page table to get shadowed
  1876. * @fake: pgt references contiguous guest memory block, not a pgtable
  1877. *
  1878. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1879. * shadow table structure is incomplete, -ENOMEM if out of memory,
  1880. * -EFAULT if an address in the parent gmap could not be resolved and
  1881. *
  1882. * Called with gmap->mm->mmap_sem in read
  1883. */
  1884. int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
  1885. int fake)
  1886. {
  1887. unsigned long raddr, origin;
  1888. unsigned long *s_pgt, *table;
  1889. struct page *page;
  1890. int rc;
  1891. BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
  1892. /* Allocate a shadow page table */
  1893. page = page_table_alloc_pgste(sg->mm);
  1894. if (!page)
  1895. return -ENOMEM;
  1896. page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
  1897. if (fake)
  1898. page->index |= GMAP_SHADOW_FAKE_TABLE;
  1899. s_pgt = (unsigned long *) page_to_phys(page);
  1900. /* Install shadow page table */
  1901. spin_lock(&sg->guest_table_lock);
  1902. table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
  1903. if (!table) {
  1904. rc = -EAGAIN; /* Race with unshadow */
  1905. goto out_free;
  1906. }
  1907. if (!(*table & _SEGMENT_ENTRY_INVALID)) {
  1908. rc = 0; /* Already established */
  1909. goto out_free;
  1910. } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
  1911. rc = -EAGAIN; /* Race with shadow */
  1912. goto out_free;
  1913. }
  1914. /* mark as invalid as long as the parent table is not protected */
  1915. *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
  1916. (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
  1917. list_add(&page->lru, &sg->pt_list);
  1918. if (fake) {
  1919. /* nothing to protect for fake tables */
  1920. *table &= ~_SEGMENT_ENTRY_INVALID;
  1921. spin_unlock(&sg->guest_table_lock);
  1922. return 0;
  1923. }
  1924. spin_unlock(&sg->guest_table_lock);
  1925. /* Make pgt read-only in parent gmap page table (not the pgste) */
  1926. raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
  1927. origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
  1928. rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
  1929. spin_lock(&sg->guest_table_lock);
  1930. if (!rc) {
  1931. table = gmap_table_walk(sg, saddr, 1);
  1932. if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
  1933. (unsigned long) s_pgt)
  1934. rc = -EAGAIN; /* Race with unshadow */
  1935. else
  1936. *table &= ~_SEGMENT_ENTRY_INVALID;
  1937. } else {
  1938. gmap_unshadow_pgt(sg, raddr);
  1939. }
  1940. spin_unlock(&sg->guest_table_lock);
  1941. return rc;
  1942. out_free:
  1943. spin_unlock(&sg->guest_table_lock);
  1944. page_table_free_pgste(page);
  1945. return rc;
  1946. }
  1947. EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
  1948. /**
  1949. * gmap_shadow_page - create a shadow page mapping
  1950. * @sg: pointer to the shadow guest address space structure
  1951. * @saddr: faulting address in the shadow gmap
  1952. * @pte: pte in parent gmap address space to get shadowed
  1953. *
  1954. * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
  1955. * shadow table structure is incomplete, -ENOMEM if out of memory and
  1956. * -EFAULT if an address in the parent gmap could not be resolved.
  1957. *
  1958. * Called with sg->mm->mmap_sem in read.
  1959. */
  1960. int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
  1961. {
  1962. struct gmap *parent;
  1963. struct gmap_rmap *rmap;
  1964. unsigned long vmaddr, paddr;
  1965. spinlock_t *ptl;
  1966. pte_t *sptep, *tptep;
  1967. int prot;
  1968. int rc;
  1969. BUG_ON(!gmap_is_shadow(sg));
  1970. parent = sg->parent;
  1971. prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
  1972. rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
  1973. if (!rmap)
  1974. return -ENOMEM;
  1975. rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
  1976. while (1) {
  1977. paddr = pte_val(pte) & PAGE_MASK;
  1978. vmaddr = __gmap_translate(parent, paddr);
  1979. if (IS_ERR_VALUE(vmaddr)) {
  1980. rc = vmaddr;
  1981. break;
  1982. }
  1983. rc = radix_tree_preload(GFP_KERNEL);
  1984. if (rc)
  1985. break;
  1986. rc = -EAGAIN;
  1987. sptep = gmap_pte_op_walk(parent, paddr, &ptl);
  1988. if (sptep) {
  1989. spin_lock(&sg->guest_table_lock);
  1990. /* Get page table pointer */
  1991. tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
  1992. if (!tptep) {
  1993. spin_unlock(&sg->guest_table_lock);
  1994. gmap_pte_op_end(ptl);
  1995. radix_tree_preload_end();
  1996. break;
  1997. }
  1998. rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
  1999. if (rc > 0) {
  2000. /* Success and a new mapping */
  2001. gmap_insert_rmap(sg, vmaddr, rmap);
  2002. rmap = NULL;
  2003. rc = 0;
  2004. }
  2005. gmap_pte_op_end(ptl);
  2006. spin_unlock(&sg->guest_table_lock);
  2007. }
  2008. radix_tree_preload_end();
  2009. if (!rc)
  2010. break;
  2011. rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
  2012. if (rc)
  2013. break;
  2014. }
  2015. kfree(rmap);
  2016. return rc;
  2017. }
  2018. EXPORT_SYMBOL_GPL(gmap_shadow_page);
  2019. /**
  2020. * gmap_shadow_notify - handle notifications for shadow gmap
  2021. *
  2022. * Called with sg->parent->shadow_lock.
  2023. */
  2024. static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
  2025. unsigned long gaddr)
  2026. {
  2027. struct gmap_rmap *rmap, *rnext, *head;
  2028. unsigned long start, end, bits, raddr;
  2029. BUG_ON(!gmap_is_shadow(sg));
  2030. spin_lock(&sg->guest_table_lock);
  2031. if (sg->removed) {
  2032. spin_unlock(&sg->guest_table_lock);
  2033. return;
  2034. }
  2035. /* Check for top level table */
  2036. start = sg->orig_asce & _ASCE_ORIGIN;
  2037. end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
  2038. if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
  2039. gaddr < end) {
  2040. /* The complete shadow table has to go */
  2041. gmap_unshadow(sg);
  2042. spin_unlock(&sg->guest_table_lock);
  2043. list_del(&sg->list);
  2044. gmap_put(sg);
  2045. return;
  2046. }
  2047. /* Remove the page table tree from on specific entry */
  2048. head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
  2049. gmap_for_each_rmap_safe(rmap, rnext, head) {
  2050. bits = rmap->raddr & _SHADOW_RMAP_MASK;
  2051. raddr = rmap->raddr ^ bits;
  2052. switch (bits) {
  2053. case _SHADOW_RMAP_REGION1:
  2054. gmap_unshadow_r2t(sg, raddr);
  2055. break;
  2056. case _SHADOW_RMAP_REGION2:
  2057. gmap_unshadow_r3t(sg, raddr);
  2058. break;
  2059. case _SHADOW_RMAP_REGION3:
  2060. gmap_unshadow_sgt(sg, raddr);
  2061. break;
  2062. case _SHADOW_RMAP_SEGMENT:
  2063. gmap_unshadow_pgt(sg, raddr);
  2064. break;
  2065. case _SHADOW_RMAP_PGTABLE:
  2066. gmap_unshadow_page(sg, raddr);
  2067. break;
  2068. }
  2069. kfree(rmap);
  2070. }
  2071. spin_unlock(&sg->guest_table_lock);
  2072. }
  2073. /**
  2074. * ptep_notify - call all invalidation callbacks for a specific pte.
  2075. * @mm: pointer to the process mm_struct
  2076. * @addr: virtual address in the process address space
  2077. * @pte: pointer to the page table entry
  2078. * @bits: bits from the pgste that caused the notify call
  2079. *
  2080. * This function is assumed to be called with the page table lock held
  2081. * for the pte to notify.
  2082. */
  2083. void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
  2084. pte_t *pte, unsigned long bits)
  2085. {
  2086. unsigned long offset, gaddr = 0;
  2087. unsigned long *table;
  2088. struct gmap *gmap, *sg, *next;
  2089. offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
  2090. offset = offset * (PAGE_SIZE / sizeof(pte_t));
  2091. rcu_read_lock();
  2092. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2093. spin_lock(&gmap->guest_table_lock);
  2094. table = radix_tree_lookup(&gmap->host_to_guest,
  2095. vmaddr >> PMD_SHIFT);
  2096. if (table)
  2097. gaddr = __gmap_segment_gaddr(table) + offset;
  2098. spin_unlock(&gmap->guest_table_lock);
  2099. if (!table)
  2100. continue;
  2101. if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
  2102. spin_lock(&gmap->shadow_lock);
  2103. list_for_each_entry_safe(sg, next,
  2104. &gmap->children, list)
  2105. gmap_shadow_notify(sg, vmaddr, gaddr);
  2106. spin_unlock(&gmap->shadow_lock);
  2107. }
  2108. if (bits & PGSTE_IN_BIT)
  2109. gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
  2110. }
  2111. rcu_read_unlock();
  2112. }
  2113. EXPORT_SYMBOL_GPL(ptep_notify);
  2114. static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
  2115. unsigned long gaddr)
  2116. {
  2117. pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
  2118. gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
  2119. }
  2120. /**
  2121. * gmap_pmdp_xchg - exchange a gmap pmd with another
  2122. * @gmap: pointer to the guest address space structure
  2123. * @pmdp: pointer to the pmd entry
  2124. * @new: replacement entry
  2125. * @gaddr: the affected guest address
  2126. *
  2127. * This function is assumed to be called with the guest_table_lock
  2128. * held.
  2129. */
  2130. static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
  2131. unsigned long gaddr)
  2132. {
  2133. gaddr &= HPAGE_MASK;
  2134. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2135. pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
  2136. if (MACHINE_HAS_TLB_GUEST)
  2137. __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
  2138. IDTE_GLOBAL);
  2139. else if (MACHINE_HAS_IDTE)
  2140. __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
  2141. else
  2142. __pmdp_csp(pmdp);
  2143. *pmdp = new;
  2144. }
  2145. static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
  2146. int purge)
  2147. {
  2148. pmd_t *pmdp;
  2149. struct gmap *gmap;
  2150. unsigned long gaddr;
  2151. rcu_read_lock();
  2152. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2153. spin_lock(&gmap->guest_table_lock);
  2154. pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
  2155. vmaddr >> PMD_SHIFT);
  2156. if (pmdp) {
  2157. gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
  2158. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2159. WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2160. _SEGMENT_ENTRY_GMAP_UC));
  2161. if (purge)
  2162. __pmdp_csp(pmdp);
  2163. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  2164. }
  2165. spin_unlock(&gmap->guest_table_lock);
  2166. }
  2167. rcu_read_unlock();
  2168. }
  2169. /**
  2170. * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
  2171. * flushing
  2172. * @mm: pointer to the process mm_struct
  2173. * @vmaddr: virtual address in the process address space
  2174. */
  2175. void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
  2176. {
  2177. gmap_pmdp_clear(mm, vmaddr, 0);
  2178. }
  2179. EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
  2180. /**
  2181. * gmap_pmdp_csp - csp all affected guest pmd entries
  2182. * @mm: pointer to the process mm_struct
  2183. * @vmaddr: virtual address in the process address space
  2184. */
  2185. void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
  2186. {
  2187. gmap_pmdp_clear(mm, vmaddr, 1);
  2188. }
  2189. EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
  2190. /**
  2191. * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
  2192. * @mm: pointer to the process mm_struct
  2193. * @vmaddr: virtual address in the process address space
  2194. */
  2195. void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
  2196. {
  2197. unsigned long *entry, gaddr;
  2198. struct gmap *gmap;
  2199. pmd_t *pmdp;
  2200. rcu_read_lock();
  2201. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2202. spin_lock(&gmap->guest_table_lock);
  2203. entry = radix_tree_delete(&gmap->host_to_guest,
  2204. vmaddr >> PMD_SHIFT);
  2205. if (entry) {
  2206. pmdp = (pmd_t *)entry;
  2207. gaddr = __gmap_segment_gaddr(entry);
  2208. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2209. WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2210. _SEGMENT_ENTRY_GMAP_UC));
  2211. if (MACHINE_HAS_TLB_GUEST)
  2212. __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
  2213. gmap->asce, IDTE_LOCAL);
  2214. else if (MACHINE_HAS_IDTE)
  2215. __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
  2216. *entry = _SEGMENT_ENTRY_EMPTY;
  2217. }
  2218. spin_unlock(&gmap->guest_table_lock);
  2219. }
  2220. rcu_read_unlock();
  2221. }
  2222. EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
  2223. /**
  2224. * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
  2225. * @mm: pointer to the process mm_struct
  2226. * @vmaddr: virtual address in the process address space
  2227. */
  2228. void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
  2229. {
  2230. unsigned long *entry, gaddr;
  2231. struct gmap *gmap;
  2232. pmd_t *pmdp;
  2233. rcu_read_lock();
  2234. list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
  2235. spin_lock(&gmap->guest_table_lock);
  2236. entry = radix_tree_delete(&gmap->host_to_guest,
  2237. vmaddr >> PMD_SHIFT);
  2238. if (entry) {
  2239. pmdp = (pmd_t *)entry;
  2240. gaddr = __gmap_segment_gaddr(entry);
  2241. pmdp_notify_gmap(gmap, pmdp, gaddr);
  2242. WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
  2243. _SEGMENT_ENTRY_GMAP_UC));
  2244. if (MACHINE_HAS_TLB_GUEST)
  2245. __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
  2246. gmap->asce, IDTE_GLOBAL);
  2247. else if (MACHINE_HAS_IDTE)
  2248. __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
  2249. else
  2250. __pmdp_csp(pmdp);
  2251. *entry = _SEGMENT_ENTRY_EMPTY;
  2252. }
  2253. spin_unlock(&gmap->guest_table_lock);
  2254. }
  2255. rcu_read_unlock();
  2256. }
  2257. EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
  2258. /**
  2259. * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
  2260. * @gmap: pointer to guest address space
  2261. * @pmdp: pointer to the pmd to be tested
  2262. * @gaddr: virtual address in the guest address space
  2263. *
  2264. * This function is assumed to be called with the guest_table_lock
  2265. * held.
  2266. */
  2267. bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
  2268. unsigned long gaddr)
  2269. {
  2270. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
  2271. return false;
  2272. /* Already protected memory, which did not change is clean */
  2273. if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
  2274. !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
  2275. return false;
  2276. /* Clear UC indication and reset protection */
  2277. pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
  2278. gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
  2279. return true;
  2280. }
  2281. /**
  2282. * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
  2283. * @gmap: pointer to guest address space
  2284. * @bitmap: dirty bitmap for this pmd
  2285. * @gaddr: virtual address in the guest address space
  2286. * @vmaddr: virtual address in the host address space
  2287. *
  2288. * This function is assumed to be called with the guest_table_lock
  2289. * held.
  2290. */
  2291. void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
  2292. unsigned long gaddr, unsigned long vmaddr)
  2293. {
  2294. int i;
  2295. pmd_t *pmdp;
  2296. pte_t *ptep;
  2297. spinlock_t *ptl;
  2298. pmdp = gmap_pmd_op_walk(gmap, gaddr);
  2299. if (!pmdp)
  2300. return;
  2301. if (pmd_large(*pmdp)) {
  2302. if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
  2303. bitmap_fill(bitmap, _PAGE_ENTRIES);
  2304. } else {
  2305. for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
  2306. ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
  2307. if (!ptep)
  2308. continue;
  2309. if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
  2310. set_bit(i, bitmap);
  2311. spin_unlock(ptl);
  2312. }
  2313. }
  2314. gmap_pmd_op_end(gmap, pmdp);
  2315. }
  2316. EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
  2317. static inline void thp_split_mm(struct mm_struct *mm)
  2318. {
  2319. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  2320. struct vm_area_struct *vma;
  2321. unsigned long addr;
  2322. for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
  2323. for (addr = vma->vm_start;
  2324. addr < vma->vm_end;
  2325. addr += PAGE_SIZE)
  2326. follow_page(vma, addr, FOLL_SPLIT);
  2327. vma->vm_flags &= ~VM_HUGEPAGE;
  2328. vma->vm_flags |= VM_NOHUGEPAGE;
  2329. }
  2330. mm->def_flags |= VM_NOHUGEPAGE;
  2331. #endif
  2332. }
  2333. /*
  2334. * Remove all empty zero pages from the mapping for lazy refaulting
  2335. * - This must be called after mm->context.has_pgste is set, to avoid
  2336. * future creation of zero pages
  2337. * - This must be called after THP was enabled
  2338. */
  2339. static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
  2340. unsigned long end, struct mm_walk *walk)
  2341. {
  2342. unsigned long addr;
  2343. for (addr = start; addr != end; addr += PAGE_SIZE) {
  2344. pte_t *ptep;
  2345. spinlock_t *ptl;
  2346. ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  2347. if (is_zero_pfn(pte_pfn(*ptep)))
  2348. ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
  2349. pte_unmap_unlock(ptep, ptl);
  2350. }
  2351. return 0;
  2352. }
  2353. static inline void zap_zero_pages(struct mm_struct *mm)
  2354. {
  2355. struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
  2356. walk.mm = mm;
  2357. walk_page_range(0, TASK_SIZE, &walk);
  2358. }
  2359. /*
  2360. * switch on pgstes for its userspace process (for kvm)
  2361. */
  2362. int s390_enable_sie(void)
  2363. {
  2364. struct mm_struct *mm = current->mm;
  2365. /* Do we have pgstes? if yes, we are done */
  2366. if (mm_has_pgste(mm))
  2367. return 0;
  2368. /* Fail if the page tables are 2K */
  2369. if (!mm_alloc_pgste(mm))
  2370. return -EINVAL;
  2371. down_write(&mm->mmap_sem);
  2372. mm->context.has_pgste = 1;
  2373. /* split thp mappings and disable thp for future mappings */
  2374. thp_split_mm(mm);
  2375. zap_zero_pages(mm);
  2376. up_write(&mm->mmap_sem);
  2377. return 0;
  2378. }
  2379. EXPORT_SYMBOL_GPL(s390_enable_sie);
  2380. /*
  2381. * Enable storage key handling from now on and initialize the storage
  2382. * keys with the default key.
  2383. */
  2384. static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
  2385. unsigned long next, struct mm_walk *walk)
  2386. {
  2387. /* Clear storage key */
  2388. ptep_zap_key(walk->mm, addr, pte);
  2389. return 0;
  2390. }
  2391. static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
  2392. unsigned long hmask, unsigned long next,
  2393. struct mm_walk *walk)
  2394. {
  2395. pmd_t *pmd = (pmd_t *)pte;
  2396. unsigned long start, end;
  2397. struct page *page = pmd_page(*pmd);
  2398. /*
  2399. * The write check makes sure we do not set a key on shared
  2400. * memory. This is needed as the walker does not differentiate
  2401. * between actual guest memory and the process executable or
  2402. * shared libraries.
  2403. */
  2404. if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
  2405. !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
  2406. return 0;
  2407. start = pmd_val(*pmd) & HPAGE_MASK;
  2408. end = start + HPAGE_SIZE - 1;
  2409. __storage_key_init_range(start, end);
  2410. set_bit(PG_arch_1, &page->flags);
  2411. return 0;
  2412. }
  2413. int s390_enable_skey(void)
  2414. {
  2415. struct mm_walk walk = {
  2416. .hugetlb_entry = __s390_enable_skey_hugetlb,
  2417. .pte_entry = __s390_enable_skey_pte,
  2418. };
  2419. struct mm_struct *mm = current->mm;
  2420. struct vm_area_struct *vma;
  2421. int rc = 0;
  2422. down_write(&mm->mmap_sem);
  2423. if (mm_uses_skeys(mm))
  2424. goto out_up;
  2425. mm->context.uses_skeys = 1;
  2426. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  2427. if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
  2428. MADV_UNMERGEABLE, &vma->vm_flags)) {
  2429. mm->context.uses_skeys = 0;
  2430. rc = -ENOMEM;
  2431. goto out_up;
  2432. }
  2433. }
  2434. mm->def_flags &= ~VM_MERGEABLE;
  2435. walk.mm = mm;
  2436. walk_page_range(0, TASK_SIZE, &walk);
  2437. out_up:
  2438. up_write(&mm->mmap_sem);
  2439. return rc;
  2440. }
  2441. EXPORT_SYMBOL_GPL(s390_enable_skey);
  2442. /*
  2443. * Reset CMMA state, make all pages stable again.
  2444. */
  2445. static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
  2446. unsigned long next, struct mm_walk *walk)
  2447. {
  2448. ptep_zap_unused(walk->mm, addr, pte, 1);
  2449. return 0;
  2450. }
  2451. void s390_reset_cmma(struct mm_struct *mm)
  2452. {
  2453. struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
  2454. down_write(&mm->mmap_sem);
  2455. walk.mm = mm;
  2456. walk_page_range(0, TASK_SIZE, &walk);
  2457. up_write(&mm->mmap_sem);
  2458. }
  2459. EXPORT_SYMBOL_GPL(s390_reset_cmma);