mmu_pv.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xen mmu operations
  4. *
  5. * This file contains the various mmu fetch and update operations.
  6. * The most important job they must perform is the mapping between the
  7. * domain's pfn and the overall machine mfns.
  8. *
  9. * Xen allows guests to directly update the pagetable, in a controlled
  10. * fashion. In other words, the guest modifies the same pagetable
  11. * that the CPU actually uses, which eliminates the overhead of having
  12. * a separate shadow pagetable.
  13. *
  14. * In order to allow this, it falls on the guest domain to map its
  15. * notion of a "physical" pfn - which is just a domain-local linear
  16. * address - into a real "machine address" which the CPU's MMU can
  17. * use.
  18. *
  19. * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
  20. * inserted directly into the pagetable. When creating a new
  21. * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
  22. * when reading the content back with __(pgd|pmd|pte)_val, it converts
  23. * the mfn back into a pfn.
  24. *
  25. * The other constraint is that all pages which make up a pagetable
  26. * must be mapped read-only in the guest. This prevents uncontrolled
  27. * guest updates to the pagetable. Xen strictly enforces this, and
  28. * will disallow any pagetable update which will end up mapping a
  29. * pagetable page RW, and will disallow using any writable page as a
  30. * pagetable.
  31. *
  32. * Naively, when loading %cr3 with the base of a new pagetable, Xen
  33. * would need to validate the whole pagetable before going on.
  34. * Naturally, this is quite slow. The solution is to "pin" a
  35. * pagetable, which enforces all the constraints on the pagetable even
  36. * when it is not actively in use. This means that Xen can be assured
  37. * that it is still valid when you do load it into %cr3, and doesn't
  38. * need to revalidate it.
  39. *
  40. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  41. */
  42. #include <linux/sched/mm.h>
  43. #include <linux/debugfs.h>
  44. #include <linux/bug.h>
  45. #include <linux/vmalloc.h>
  46. #include <linux/export.h>
  47. #include <linux/init.h>
  48. #include <linux/gfp.h>
  49. #include <linux/memblock.h>
  50. #include <linux/seq_file.h>
  51. #include <linux/crash_dump.h>
  52. #include <linux/pgtable.h>
  53. #ifdef CONFIG_KEXEC_CORE
  54. #include <linux/kexec.h>
  55. #endif
  56. #include <trace/events/xen.h>
  57. #include <asm/tlbflush.h>
  58. #include <asm/fixmap.h>
  59. #include <asm/mmu_context.h>
  60. #include <asm/setup.h>
  61. #include <asm/paravirt.h>
  62. #include <asm/e820/api.h>
  63. #include <asm/linkage.h>
  64. #include <asm/page.h>
  65. #include <asm/init.h>
  66. #include <asm/memtype.h>
  67. #include <asm/smp.h>
  68. #include <asm/tlb.h>
  69. #include <asm/xen/hypercall.h>
  70. #include <asm/xen/hypervisor.h>
  71. #include <xen/xen.h>
  72. #include <xen/page.h>
  73. #include <xen/interface/xen.h>
  74. #include <xen/interface/hvm/hvm_op.h>
  75. #include <xen/interface/version.h>
  76. #include <xen/interface/memory.h>
  77. #include <xen/hvc-console.h>
  78. #include <xen/swiotlb-xen.h>
  79. #include "xen-ops.h"
  80. /*
  81. * Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order
  82. * to avoid warnings with "-Wmissing-prototypes".
  83. */
  84. pteval_t xen_pte_val(pte_t pte);
  85. pgdval_t xen_pgd_val(pgd_t pgd);
  86. pmdval_t xen_pmd_val(pmd_t pmd);
  87. pudval_t xen_pud_val(pud_t pud);
  88. p4dval_t xen_p4d_val(p4d_t p4d);
  89. pte_t xen_make_pte(pteval_t pte);
  90. pgd_t xen_make_pgd(pgdval_t pgd);
  91. pmd_t xen_make_pmd(pmdval_t pmd);
  92. pud_t xen_make_pud(pudval_t pud);
  93. p4d_t xen_make_p4d(p4dval_t p4d);
  94. pte_t xen_make_pte_init(pteval_t pte);
  95. #ifdef CONFIG_X86_VSYSCALL_EMULATION
  96. /* l3 pud for userspace vsyscall mapping */
  97. static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
  98. #endif
  99. /*
  100. * Protects atomic reservation decrease/increase against concurrent increases.
  101. * Also protects non-atomic updates of current_pages and balloon lists.
  102. */
  103. static DEFINE_SPINLOCK(xen_reservation_lock);
  104. /* Protected by xen_reservation_lock. */
  105. #define MIN_CONTIG_ORDER 9 /* 2MB */
  106. static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
  107. static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
  108. static unsigned long *discontig_frames __refdata = discontig_frames_early;
  109. static bool discontig_frames_dyn;
  110. static int alloc_discontig_frames(unsigned int order)
  111. {
  112. unsigned long *new_array, *old_array;
  113. unsigned int old_order;
  114. unsigned long flags;
  115. BUG_ON(order < MIN_CONTIG_ORDER);
  116. BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
  117. new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
  118. order - MIN_CONTIG_ORDER);
  119. if (!new_array)
  120. return -ENOMEM;
  121. spin_lock_irqsave(&xen_reservation_lock, flags);
  122. old_order = discontig_frames_order;
  123. if (order > discontig_frames_order || !discontig_frames_dyn) {
  124. if (!discontig_frames_dyn)
  125. old_array = NULL;
  126. else
  127. old_array = discontig_frames;
  128. discontig_frames = new_array;
  129. discontig_frames_order = order;
  130. discontig_frames_dyn = true;
  131. } else {
  132. old_array = new_array;
  133. }
  134. spin_unlock_irqrestore(&xen_reservation_lock, flags);
  135. free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
  136. return 0;
  137. }
  138. /*
  139. * Note about cr3 (pagetable base) values:
  140. *
  141. * xen_cr3 contains the current logical cr3 value; it contains the
  142. * last set cr3. This may not be the current effective cr3, because
  143. * its update may be being lazily deferred. However, a vcpu looking
  144. * at its own cr3 can use this value knowing that it everything will
  145. * be self-consistent.
  146. *
  147. * xen_current_cr3 contains the actual vcpu cr3; it is set once the
  148. * hypercall to set the vcpu cr3 is complete (so it may be a little
  149. * out of date, but it will never be set early). If one vcpu is
  150. * looking at another vcpu's cr3 value, it should use this variable.
  151. */
  152. DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
  153. static DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
  154. static phys_addr_t xen_pt_base, xen_pt_size __initdata;
  155. static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
  156. /*
  157. * Just beyond the highest usermode address. STACK_TOP_MAX has a
  158. * redzone above it, so round it up to a PGD boundary.
  159. */
  160. #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
  161. void make_lowmem_page_readonly(void *vaddr)
  162. {
  163. pte_t *pte, ptev;
  164. unsigned long address = (unsigned long)vaddr;
  165. unsigned int level;
  166. pte = lookup_address(address, &level);
  167. if (pte == NULL)
  168. return; /* vaddr missing */
  169. ptev = pte_wrprotect(*pte);
  170. if (HYPERVISOR_update_va_mapping(address, ptev, 0))
  171. BUG();
  172. }
  173. void make_lowmem_page_readwrite(void *vaddr)
  174. {
  175. pte_t *pte, ptev;
  176. unsigned long address = (unsigned long)vaddr;
  177. unsigned int level;
  178. pte = lookup_address(address, &level);
  179. if (pte == NULL)
  180. return; /* vaddr missing */
  181. ptev = pte_mkwrite_novma(*pte);
  182. if (HYPERVISOR_update_va_mapping(address, ptev, 0))
  183. BUG();
  184. }
  185. /*
  186. * During early boot all page table pages are pinned, but we do not have struct
  187. * pages, so return true until struct pages are ready.
  188. */
  189. static bool xen_page_pinned(void *ptr)
  190. {
  191. if (static_branch_likely(&xen_struct_pages_ready)) {
  192. struct page *page = virt_to_page(ptr);
  193. return PagePinned(page);
  194. }
  195. return true;
  196. }
  197. static void xen_extend_mmu_update(const struct mmu_update *update)
  198. {
  199. struct multicall_space mcs;
  200. struct mmu_update *u;
  201. mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
  202. if (mcs.mc != NULL) {
  203. mcs.mc->args[1]++;
  204. } else {
  205. mcs = __xen_mc_entry(sizeof(*u));
  206. MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
  207. }
  208. u = mcs.args;
  209. *u = *update;
  210. }
  211. static void xen_extend_mmuext_op(const struct mmuext_op *op)
  212. {
  213. struct multicall_space mcs;
  214. struct mmuext_op *u;
  215. mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
  216. if (mcs.mc != NULL) {
  217. mcs.mc->args[1]++;
  218. } else {
  219. mcs = __xen_mc_entry(sizeof(*u));
  220. MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
  221. }
  222. u = mcs.args;
  223. *u = *op;
  224. }
  225. static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
  226. {
  227. struct mmu_update u;
  228. preempt_disable();
  229. xen_mc_batch();
  230. /* ptr may be ioremapped for 64-bit pagetable setup */
  231. u.ptr = arbitrary_virt_to_machine(ptr).maddr;
  232. u.val = pmd_val_ma(val);
  233. xen_extend_mmu_update(&u);
  234. xen_mc_issue(XEN_LAZY_MMU);
  235. preempt_enable();
  236. }
  237. static void xen_set_pmd(pmd_t *ptr, pmd_t val)
  238. {
  239. trace_xen_mmu_set_pmd(ptr, val);
  240. /* If page is not pinned, we can just update the entry
  241. directly */
  242. if (!xen_page_pinned(ptr)) {
  243. *ptr = val;
  244. return;
  245. }
  246. xen_set_pmd_hyper(ptr, val);
  247. }
  248. /*
  249. * Associate a virtual page frame with a given physical page frame
  250. * and protection flags for that frame.
  251. */
  252. void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
  253. {
  254. if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
  255. UVMF_INVLPG))
  256. BUG();
  257. }
  258. static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
  259. {
  260. struct mmu_update u;
  261. if (xen_get_lazy_mode() != XEN_LAZY_MMU)
  262. return false;
  263. xen_mc_batch();
  264. u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
  265. u.val = pte_val_ma(pteval);
  266. xen_extend_mmu_update(&u);
  267. xen_mc_issue(XEN_LAZY_MMU);
  268. return true;
  269. }
  270. static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
  271. {
  272. if (!xen_batched_set_pte(ptep, pteval)) {
  273. /*
  274. * Could call native_set_pte() here and trap and
  275. * emulate the PTE write, but a hypercall is much cheaper.
  276. */
  277. struct mmu_update u;
  278. u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
  279. u.val = pte_val_ma(pteval);
  280. HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
  281. }
  282. }
  283. static void xen_set_pte(pte_t *ptep, pte_t pteval)
  284. {
  285. trace_xen_mmu_set_pte(ptep, pteval);
  286. __xen_set_pte(ptep, pteval);
  287. }
  288. static pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
  289. unsigned long addr, pte_t *ptep)
  290. {
  291. /* Just return the pte as-is. We preserve the bits on commit */
  292. trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
  293. return *ptep;
  294. }
  295. static void xen_ptep_modify_prot_commit(struct vm_area_struct *vma,
  296. unsigned long addr,
  297. pte_t *ptep, pte_t pte)
  298. {
  299. struct mmu_update u;
  300. trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
  301. xen_mc_batch();
  302. u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
  303. u.val = pte_val_ma(pte);
  304. xen_extend_mmu_update(&u);
  305. xen_mc_issue(XEN_LAZY_MMU);
  306. }
  307. /* Assume pteval_t is equivalent to all the other *val_t types. */
  308. static pteval_t pte_mfn_to_pfn(pteval_t val)
  309. {
  310. if (val & _PAGE_PRESENT) {
  311. unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
  312. unsigned long pfn = mfn_to_pfn(mfn);
  313. pteval_t flags = val & PTE_FLAGS_MASK;
  314. if (unlikely(pfn == ~0))
  315. val = flags & ~_PAGE_PRESENT;
  316. else
  317. val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
  318. }
  319. return val;
  320. }
  321. static pteval_t pte_pfn_to_mfn(pteval_t val)
  322. {
  323. if (val & _PAGE_PRESENT) {
  324. unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
  325. pteval_t flags = val & PTE_FLAGS_MASK;
  326. unsigned long mfn;
  327. mfn = __pfn_to_mfn(pfn);
  328. /*
  329. * If there's no mfn for the pfn, then just create an
  330. * empty non-present pte. Unfortunately this loses
  331. * information about the original pfn, so
  332. * pte_mfn_to_pfn is asymmetric.
  333. */
  334. if (unlikely(mfn == INVALID_P2M_ENTRY)) {
  335. mfn = 0;
  336. flags = 0;
  337. } else
  338. mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
  339. val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
  340. }
  341. return val;
  342. }
  343. __visible pteval_t xen_pte_val(pte_t pte)
  344. {
  345. pteval_t pteval = pte.pte;
  346. return pte_mfn_to_pfn(pteval);
  347. }
  348. PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
  349. __visible pgdval_t xen_pgd_val(pgd_t pgd)
  350. {
  351. return pte_mfn_to_pfn(pgd.pgd);
  352. }
  353. PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
  354. __visible pte_t xen_make_pte(pteval_t pte)
  355. {
  356. pte = pte_pfn_to_mfn(pte);
  357. return native_make_pte(pte);
  358. }
  359. PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
  360. __visible pgd_t xen_make_pgd(pgdval_t pgd)
  361. {
  362. pgd = pte_pfn_to_mfn(pgd);
  363. return native_make_pgd(pgd);
  364. }
  365. PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
  366. __visible pmdval_t xen_pmd_val(pmd_t pmd)
  367. {
  368. return pte_mfn_to_pfn(pmd.pmd);
  369. }
  370. PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
  371. static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
  372. {
  373. struct mmu_update u;
  374. preempt_disable();
  375. xen_mc_batch();
  376. /* ptr may be ioremapped for 64-bit pagetable setup */
  377. u.ptr = arbitrary_virt_to_machine(ptr).maddr;
  378. u.val = pud_val_ma(val);
  379. xen_extend_mmu_update(&u);
  380. xen_mc_issue(XEN_LAZY_MMU);
  381. preempt_enable();
  382. }
  383. static void xen_set_pud(pud_t *ptr, pud_t val)
  384. {
  385. trace_xen_mmu_set_pud(ptr, val);
  386. /* If page is not pinned, we can just update the entry
  387. directly */
  388. if (!xen_page_pinned(ptr)) {
  389. *ptr = val;
  390. return;
  391. }
  392. xen_set_pud_hyper(ptr, val);
  393. }
  394. __visible pmd_t xen_make_pmd(pmdval_t pmd)
  395. {
  396. pmd = pte_pfn_to_mfn(pmd);
  397. return native_make_pmd(pmd);
  398. }
  399. PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
  400. __visible pudval_t xen_pud_val(pud_t pud)
  401. {
  402. return pte_mfn_to_pfn(pud.pud);
  403. }
  404. PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
  405. __visible pud_t xen_make_pud(pudval_t pud)
  406. {
  407. pud = pte_pfn_to_mfn(pud);
  408. return native_make_pud(pud);
  409. }
  410. PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
  411. static pgd_t *xen_get_user_pgd(pgd_t *pgd)
  412. {
  413. pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
  414. unsigned offset = pgd - pgd_page;
  415. pgd_t *user_ptr = NULL;
  416. if (offset < pgd_index(USER_LIMIT)) {
  417. struct page *page = virt_to_page(pgd_page);
  418. user_ptr = (pgd_t *)page->private;
  419. if (user_ptr)
  420. user_ptr += offset;
  421. }
  422. return user_ptr;
  423. }
  424. static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
  425. {
  426. struct mmu_update u;
  427. u.ptr = virt_to_machine(ptr).maddr;
  428. u.val = p4d_val_ma(val);
  429. xen_extend_mmu_update(&u);
  430. }
  431. /*
  432. * Raw hypercall-based set_p4d, intended for in early boot before
  433. * there's a page structure. This implies:
  434. * 1. The only existing pagetable is the kernel's
  435. * 2. It is always pinned
  436. * 3. It has no user pagetable attached to it
  437. */
  438. static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
  439. {
  440. preempt_disable();
  441. xen_mc_batch();
  442. __xen_set_p4d_hyper(ptr, val);
  443. xen_mc_issue(XEN_LAZY_MMU);
  444. preempt_enable();
  445. }
  446. static void xen_set_p4d(p4d_t *ptr, p4d_t val)
  447. {
  448. pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
  449. pgd_t pgd_val;
  450. trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
  451. /* If page is not pinned, we can just update the entry
  452. directly */
  453. if (!xen_page_pinned(ptr)) {
  454. *ptr = val;
  455. if (user_ptr) {
  456. WARN_ON(xen_page_pinned(user_ptr));
  457. pgd_val.pgd = p4d_val_ma(val);
  458. *user_ptr = pgd_val;
  459. }
  460. return;
  461. }
  462. /* If it's pinned, then we can at least batch the kernel and
  463. user updates together. */
  464. xen_mc_batch();
  465. __xen_set_p4d_hyper(ptr, val);
  466. if (user_ptr)
  467. __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
  468. xen_mc_issue(XEN_LAZY_MMU);
  469. }
  470. #if CONFIG_PGTABLE_LEVELS >= 5
  471. __visible p4dval_t xen_p4d_val(p4d_t p4d)
  472. {
  473. return pte_mfn_to_pfn(p4d.p4d);
  474. }
  475. PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
  476. __visible p4d_t xen_make_p4d(p4dval_t p4d)
  477. {
  478. p4d = pte_pfn_to_mfn(p4d);
  479. return native_make_p4d(p4d);
  480. }
  481. PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
  482. #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
  483. static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
  484. void (*func)(struct mm_struct *mm, struct page *,
  485. enum pt_level),
  486. bool last, unsigned long limit)
  487. {
  488. int i, nr;
  489. nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
  490. for (i = 0; i < nr; i++) {
  491. if (!pmd_none(pmd[i]))
  492. (*func)(mm, pmd_page(pmd[i]), PT_PTE);
  493. }
  494. }
  495. static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
  496. void (*func)(struct mm_struct *mm, struct page *,
  497. enum pt_level),
  498. bool last, unsigned long limit)
  499. {
  500. int i, nr;
  501. nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
  502. for (i = 0; i < nr; i++) {
  503. pmd_t *pmd;
  504. if (pud_none(pud[i]))
  505. continue;
  506. pmd = pmd_offset(&pud[i], 0);
  507. if (PTRS_PER_PMD > 1)
  508. (*func)(mm, virt_to_page(pmd), PT_PMD);
  509. xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
  510. }
  511. }
  512. static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
  513. void (*func)(struct mm_struct *mm, struct page *,
  514. enum pt_level),
  515. bool last, unsigned long limit)
  516. {
  517. pud_t *pud;
  518. if (p4d_none(*p4d))
  519. return;
  520. pud = pud_offset(p4d, 0);
  521. if (PTRS_PER_PUD > 1)
  522. (*func)(mm, virt_to_page(pud), PT_PUD);
  523. xen_pud_walk(mm, pud, func, last, limit);
  524. }
  525. /*
  526. * (Yet another) pagetable walker. This one is intended for pinning a
  527. * pagetable. This means that it walks a pagetable and calls the
  528. * callback function on each page it finds making up the page table,
  529. * at every level. It walks the entire pagetable, but it only bothers
  530. * pinning pte pages which are below limit. In the normal case this
  531. * will be STACK_TOP_MAX, but at boot we need to pin up to
  532. * FIXADDR_TOP.
  533. *
  534. * We must skip the Xen hole in the middle of the address space, just after
  535. * the big x86-64 virtual hole.
  536. */
  537. static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
  538. void (*func)(struct mm_struct *mm, struct page *,
  539. enum pt_level),
  540. unsigned long limit)
  541. {
  542. int i, nr;
  543. unsigned hole_low = 0, hole_high = 0;
  544. /* The limit is the last byte to be touched */
  545. limit--;
  546. BUG_ON(limit >= FIXADDR_TOP);
  547. /*
  548. * 64-bit has a great big hole in the middle of the address
  549. * space, which contains the Xen mappings.
  550. */
  551. hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
  552. hole_high = pgd_index(GUARD_HOLE_END_ADDR);
  553. nr = pgd_index(limit) + 1;
  554. for (i = 0; i < nr; i++) {
  555. p4d_t *p4d;
  556. if (i >= hole_low && i < hole_high)
  557. continue;
  558. if (pgd_none(pgd[i]))
  559. continue;
  560. p4d = p4d_offset(&pgd[i], 0);
  561. xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
  562. }
  563. /* Do the top level last, so that the callbacks can use it as
  564. a cue to do final things like tlb flushes. */
  565. (*func)(mm, virt_to_page(pgd), PT_PGD);
  566. }
  567. static void xen_pgd_walk(struct mm_struct *mm,
  568. void (*func)(struct mm_struct *mm, struct page *,
  569. enum pt_level),
  570. unsigned long limit)
  571. {
  572. __xen_pgd_walk(mm, mm->pgd, func, limit);
  573. }
  574. /* If we're using split pte locks, then take the page's lock and
  575. return a pointer to it. Otherwise return NULL. */
  576. static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
  577. {
  578. spinlock_t *ptl = NULL;
  579. #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
  580. ptl = ptlock_ptr(page_ptdesc(page));
  581. spin_lock_nest_lock(ptl, &mm->page_table_lock);
  582. #endif
  583. return ptl;
  584. }
  585. static void xen_pte_unlock(void *v)
  586. {
  587. spinlock_t *ptl = v;
  588. spin_unlock(ptl);
  589. }
  590. static void xen_do_pin(unsigned level, unsigned long pfn)
  591. {
  592. struct mmuext_op op;
  593. op.cmd = level;
  594. op.arg1.mfn = pfn_to_mfn(pfn);
  595. xen_extend_mmuext_op(&op);
  596. }
  597. static void xen_pin_page(struct mm_struct *mm, struct page *page,
  598. enum pt_level level)
  599. {
  600. unsigned pgfl = TestSetPagePinned(page);
  601. if (!pgfl) {
  602. void *pt = lowmem_page_address(page);
  603. unsigned long pfn = page_to_pfn(page);
  604. struct multicall_space mcs = __xen_mc_entry(0);
  605. spinlock_t *ptl;
  606. /*
  607. * We need to hold the pagetable lock between the time
  608. * we make the pagetable RO and when we actually pin
  609. * it. If we don't, then other users may come in and
  610. * attempt to update the pagetable by writing it,
  611. * which will fail because the memory is RO but not
  612. * pinned, so Xen won't do the trap'n'emulate.
  613. *
  614. * If we're using split pte locks, we can't hold the
  615. * entire pagetable's worth of locks during the
  616. * traverse, because we may wrap the preempt count (8
  617. * bits). The solution is to mark RO and pin each PTE
  618. * page while holding the lock. This means the number
  619. * of locks we end up holding is never more than a
  620. * batch size (~32 entries, at present).
  621. *
  622. * If we're not using split pte locks, we needn't pin
  623. * the PTE pages independently, because we're
  624. * protected by the overall pagetable lock.
  625. */
  626. ptl = NULL;
  627. if (level == PT_PTE)
  628. ptl = xen_pte_lock(page, mm);
  629. MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
  630. pfn_pte(pfn, PAGE_KERNEL_RO),
  631. level == PT_PGD ? UVMF_TLB_FLUSH : 0);
  632. if (ptl) {
  633. xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
  634. /* Queue a deferred unlock for when this batch
  635. is completed. */
  636. xen_mc_callback(xen_pte_unlock, ptl);
  637. }
  638. }
  639. }
  640. /* This is called just after a mm has been created, but it has not
  641. been used yet. We need to make sure that its pagetable is all
  642. read-only, and can be pinned. */
  643. static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
  644. {
  645. pgd_t *user_pgd = xen_get_user_pgd(pgd);
  646. trace_xen_mmu_pgd_pin(mm, pgd);
  647. xen_mc_batch();
  648. __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
  649. xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
  650. if (user_pgd) {
  651. xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
  652. xen_do_pin(MMUEXT_PIN_L4_TABLE,
  653. PFN_DOWN(__pa(user_pgd)));
  654. }
  655. xen_mc_issue(0);
  656. }
  657. static void xen_pgd_pin(struct mm_struct *mm)
  658. {
  659. __xen_pgd_pin(mm, mm->pgd);
  660. }
  661. /*
  662. * On save, we need to pin all pagetables to make sure they get their
  663. * mfns turned into pfns. Search the list for any unpinned pgds and pin
  664. * them (unpinned pgds are not currently in use, probably because the
  665. * process is under construction or destruction).
  666. *
  667. * Expected to be called in stop_machine() ("equivalent to taking
  668. * every spinlock in the system"), so the locking doesn't really
  669. * matter all that much.
  670. */
  671. void xen_mm_pin_all(void)
  672. {
  673. struct page *page;
  674. spin_lock(&init_mm.page_table_lock);
  675. spin_lock(&pgd_lock);
  676. list_for_each_entry(page, &pgd_list, lru) {
  677. if (!PagePinned(page)) {
  678. __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
  679. SetPageSavePinned(page);
  680. }
  681. }
  682. spin_unlock(&pgd_lock);
  683. spin_unlock(&init_mm.page_table_lock);
  684. }
  685. static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
  686. enum pt_level level)
  687. {
  688. SetPagePinned(page);
  689. }
  690. /*
  691. * The init_mm pagetable is really pinned as soon as its created, but
  692. * that's before we have page structures to store the bits. So do all
  693. * the book-keeping now once struct pages for allocated pages are
  694. * initialized. This happens only after memblock_free_all() is called.
  695. */
  696. static void __init xen_after_bootmem(void)
  697. {
  698. static_branch_enable(&xen_struct_pages_ready);
  699. #ifdef CONFIG_X86_VSYSCALL_EMULATION
  700. SetPagePinned(virt_to_page(level3_user_vsyscall));
  701. #endif
  702. xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
  703. if (alloc_discontig_frames(MIN_CONTIG_ORDER))
  704. BUG();
  705. }
  706. static void xen_unpin_page(struct mm_struct *mm, struct page *page,
  707. enum pt_level level)
  708. {
  709. unsigned pgfl = TestClearPagePinned(page);
  710. if (pgfl) {
  711. void *pt = lowmem_page_address(page);
  712. unsigned long pfn = page_to_pfn(page);
  713. spinlock_t *ptl = NULL;
  714. struct multicall_space mcs;
  715. /*
  716. * Do the converse to pin_page. If we're using split
  717. * pte locks, we must be holding the lock for while
  718. * the pte page is unpinned but still RO to prevent
  719. * concurrent updates from seeing it in this
  720. * partially-pinned state.
  721. */
  722. if (level == PT_PTE) {
  723. ptl = xen_pte_lock(page, mm);
  724. if (ptl)
  725. xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
  726. }
  727. mcs = __xen_mc_entry(0);
  728. MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
  729. pfn_pte(pfn, PAGE_KERNEL),
  730. level == PT_PGD ? UVMF_TLB_FLUSH : 0);
  731. if (ptl) {
  732. /* unlock when batch completed */
  733. xen_mc_callback(xen_pte_unlock, ptl);
  734. }
  735. }
  736. }
  737. /* Release a pagetables pages back as normal RW */
  738. static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
  739. {
  740. pgd_t *user_pgd = xen_get_user_pgd(pgd);
  741. trace_xen_mmu_pgd_unpin(mm, pgd);
  742. xen_mc_batch();
  743. xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
  744. if (user_pgd) {
  745. xen_do_pin(MMUEXT_UNPIN_TABLE,
  746. PFN_DOWN(__pa(user_pgd)));
  747. xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
  748. }
  749. __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
  750. xen_mc_issue(0);
  751. }
  752. static void xen_pgd_unpin(struct mm_struct *mm)
  753. {
  754. __xen_pgd_unpin(mm, mm->pgd);
  755. }
  756. /*
  757. * On resume, undo any pinning done at save, so that the rest of the
  758. * kernel doesn't see any unexpected pinned pagetables.
  759. */
  760. void xen_mm_unpin_all(void)
  761. {
  762. struct page *page;
  763. spin_lock(&init_mm.page_table_lock);
  764. spin_lock(&pgd_lock);
  765. list_for_each_entry(page, &pgd_list, lru) {
  766. if (PageSavePinned(page)) {
  767. BUG_ON(!PagePinned(page));
  768. __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
  769. ClearPageSavePinned(page);
  770. }
  771. }
  772. spin_unlock(&pgd_lock);
  773. spin_unlock(&init_mm.page_table_lock);
  774. }
  775. static void xen_enter_mmap(struct mm_struct *mm)
  776. {
  777. spin_lock(&mm->page_table_lock);
  778. xen_pgd_pin(mm);
  779. spin_unlock(&mm->page_table_lock);
  780. }
  781. static void drop_mm_ref_this_cpu(void *info)
  782. {
  783. struct mm_struct *mm = info;
  784. if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
  785. leave_mm();
  786. /*
  787. * If this cpu still has a stale cr3 reference, then make sure
  788. * it has been flushed.
  789. */
  790. if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
  791. xen_mc_flush();
  792. }
  793. #ifdef CONFIG_SMP
  794. /*
  795. * Another cpu may still have their %cr3 pointing at the pagetable, so
  796. * we need to repoint it somewhere else before we can unpin it.
  797. */
  798. static void xen_drop_mm_ref(struct mm_struct *mm)
  799. {
  800. cpumask_var_t mask;
  801. unsigned cpu;
  802. drop_mm_ref_this_cpu(mm);
  803. /* Get the "official" set of cpus referring to our pagetable. */
  804. if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
  805. for_each_online_cpu(cpu) {
  806. if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
  807. continue;
  808. smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
  809. }
  810. return;
  811. }
  812. /*
  813. * It's possible that a vcpu may have a stale reference to our
  814. * cr3, because its in lazy mode, and it hasn't yet flushed
  815. * its set of pending hypercalls yet. In this case, we can
  816. * look at its actual current cr3 value, and force it to flush
  817. * if needed.
  818. */
  819. cpumask_clear(mask);
  820. for_each_online_cpu(cpu) {
  821. if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
  822. cpumask_set_cpu(cpu, mask);
  823. }
  824. smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
  825. free_cpumask_var(mask);
  826. }
  827. #else
  828. static void xen_drop_mm_ref(struct mm_struct *mm)
  829. {
  830. drop_mm_ref_this_cpu(mm);
  831. }
  832. #endif
  833. /*
  834. * While a process runs, Xen pins its pagetables, which means that the
  835. * hypervisor forces it to be read-only, and it controls all updates
  836. * to it. This means that all pagetable updates have to go via the
  837. * hypervisor, which is moderately expensive.
  838. *
  839. * Since we're pulling the pagetable down, we switch to use init_mm,
  840. * unpin old process pagetable and mark it all read-write, which
  841. * allows further operations on it to be simple memory accesses.
  842. *
  843. * The only subtle point is that another CPU may be still using the
  844. * pagetable because of lazy tlb flushing. This means we need need to
  845. * switch all CPUs off this pagetable before we can unpin it.
  846. */
  847. static void xen_exit_mmap(struct mm_struct *mm)
  848. {
  849. get_cpu(); /* make sure we don't move around */
  850. xen_drop_mm_ref(mm);
  851. put_cpu();
  852. spin_lock(&mm->page_table_lock);
  853. /* pgd may not be pinned in the error exit path of execve */
  854. if (xen_page_pinned(mm->pgd))
  855. xen_pgd_unpin(mm);
  856. spin_unlock(&mm->page_table_lock);
  857. }
  858. static void xen_post_allocator_init(void);
  859. static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
  860. {
  861. struct mmuext_op op;
  862. op.cmd = cmd;
  863. op.arg1.mfn = pfn_to_mfn(pfn);
  864. if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
  865. BUG();
  866. }
  867. static void __init xen_cleanhighmap(unsigned long vaddr,
  868. unsigned long vaddr_end)
  869. {
  870. unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
  871. pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
  872. /* NOTE: The loop is more greedy than the cleanup_highmap variant.
  873. * We include the PMD passed in on _both_ boundaries. */
  874. for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
  875. pmd++, vaddr += PMD_SIZE) {
  876. if (pmd_none(*pmd))
  877. continue;
  878. if (vaddr < (unsigned long) _text || vaddr > kernel_end)
  879. set_pmd(pmd, __pmd(0));
  880. }
  881. /* In case we did something silly, we should crash in this function
  882. * instead of somewhere later and be confusing. */
  883. xen_mc_flush();
  884. }
  885. /*
  886. * Make a page range writeable and free it.
  887. */
  888. static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
  889. {
  890. void *vaddr = __va(paddr);
  891. void *vaddr_end = vaddr + size;
  892. for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
  893. make_lowmem_page_readwrite(vaddr);
  894. memblock_phys_free(paddr, size);
  895. }
  896. static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
  897. {
  898. unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
  899. if (unpin)
  900. pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
  901. ClearPagePinned(virt_to_page(__va(pa)));
  902. xen_free_ro_pages(pa, PAGE_SIZE);
  903. }
  904. static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
  905. {
  906. unsigned long pa;
  907. pte_t *pte_tbl;
  908. int i;
  909. if (pmd_leaf(*pmd)) {
  910. pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
  911. xen_free_ro_pages(pa, PMD_SIZE);
  912. return;
  913. }
  914. pte_tbl = pte_offset_kernel(pmd, 0);
  915. for (i = 0; i < PTRS_PER_PTE; i++) {
  916. if (pte_none(pte_tbl[i]))
  917. continue;
  918. pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
  919. xen_free_ro_pages(pa, PAGE_SIZE);
  920. }
  921. set_pmd(pmd, __pmd(0));
  922. xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
  923. }
  924. static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
  925. {
  926. unsigned long pa;
  927. pmd_t *pmd_tbl;
  928. int i;
  929. if (pud_leaf(*pud)) {
  930. pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
  931. xen_free_ro_pages(pa, PUD_SIZE);
  932. return;
  933. }
  934. pmd_tbl = pmd_offset(pud, 0);
  935. for (i = 0; i < PTRS_PER_PMD; i++) {
  936. if (pmd_none(pmd_tbl[i]))
  937. continue;
  938. xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
  939. }
  940. set_pud(pud, __pud(0));
  941. xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
  942. }
  943. static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
  944. {
  945. unsigned long pa;
  946. pud_t *pud_tbl;
  947. int i;
  948. if (p4d_leaf(*p4d)) {
  949. pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
  950. xen_free_ro_pages(pa, P4D_SIZE);
  951. return;
  952. }
  953. pud_tbl = pud_offset(p4d, 0);
  954. for (i = 0; i < PTRS_PER_PUD; i++) {
  955. if (pud_none(pud_tbl[i]))
  956. continue;
  957. xen_cleanmfnmap_pud(pud_tbl + i, unpin);
  958. }
  959. set_p4d(p4d, __p4d(0));
  960. xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
  961. }
  962. /*
  963. * Since it is well isolated we can (and since it is perhaps large we should)
  964. * also free the page tables mapping the initial P->M table.
  965. */
  966. static void __init xen_cleanmfnmap(unsigned long vaddr)
  967. {
  968. pgd_t *pgd;
  969. p4d_t *p4d;
  970. bool unpin;
  971. unpin = (vaddr == 2 * PGDIR_SIZE);
  972. vaddr &= PMD_MASK;
  973. pgd = pgd_offset_k(vaddr);
  974. p4d = p4d_offset(pgd, 0);
  975. if (!p4d_none(*p4d))
  976. xen_cleanmfnmap_p4d(p4d, unpin);
  977. }
  978. static void __init xen_pagetable_p2m_free(void)
  979. {
  980. unsigned long size;
  981. unsigned long addr;
  982. size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
  983. /* No memory or already called. */
  984. if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
  985. return;
  986. /* using __ka address and sticking INVALID_P2M_ENTRY! */
  987. memset((void *)xen_start_info->mfn_list, 0xff, size);
  988. addr = xen_start_info->mfn_list;
  989. /*
  990. * We could be in __ka space.
  991. * We roundup to the PMD, which means that if anybody at this stage is
  992. * using the __ka address of xen_start_info or
  993. * xen_start_info->shared_info they are in going to crash. Fortunately
  994. * we have already revectored in xen_setup_kernel_pagetable.
  995. */
  996. size = roundup(size, PMD_SIZE);
  997. if (addr >= __START_KERNEL_map) {
  998. xen_cleanhighmap(addr, addr + size);
  999. size = PAGE_ALIGN(xen_start_info->nr_pages *
  1000. sizeof(unsigned long));
  1001. memblock_free((void *)addr, size);
  1002. } else {
  1003. xen_cleanmfnmap(addr);
  1004. }
  1005. }
  1006. static void __init xen_pagetable_cleanhighmap(void)
  1007. {
  1008. unsigned long size;
  1009. unsigned long addr;
  1010. /* At this stage, cleanup_highmap has already cleaned __ka space
  1011. * from _brk_limit way up to the max_pfn_mapped (which is the end of
  1012. * the ramdisk). We continue on, erasing PMD entries that point to page
  1013. * tables - do note that they are accessible at this stage via __va.
  1014. * As Xen is aligning the memory end to a 4MB boundary, for good
  1015. * measure we also round up to PMD_SIZE * 2 - which means that if
  1016. * anybody is using __ka address to the initial boot-stack - and try
  1017. * to use it - they are going to crash. The xen_start_info has been
  1018. * taken care of already in xen_setup_kernel_pagetable. */
  1019. addr = xen_start_info->pt_base;
  1020. size = xen_start_info->nr_pt_frames * PAGE_SIZE;
  1021. xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
  1022. xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
  1023. }
  1024. static void __init xen_pagetable_p2m_setup(void)
  1025. {
  1026. xen_vmalloc_p2m_tree();
  1027. xen_pagetable_p2m_free();
  1028. xen_pagetable_cleanhighmap();
  1029. /* And revector! Bye bye old array */
  1030. xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
  1031. }
  1032. static void __init xen_pagetable_init(void)
  1033. {
  1034. /*
  1035. * The majority of further PTE writes is to pagetables already
  1036. * announced as such to Xen. Hence it is more efficient to use
  1037. * hypercalls for these updates.
  1038. */
  1039. pv_ops.mmu.set_pte = __xen_set_pte;
  1040. paging_init();
  1041. xen_post_allocator_init();
  1042. xen_pagetable_p2m_setup();
  1043. /* Allocate and initialize top and mid mfn levels for p2m structure */
  1044. xen_build_mfn_list_list();
  1045. /* Remap memory freed due to conflicts with E820 map */
  1046. xen_remap_memory();
  1047. xen_setup_mfn_list_list();
  1048. }
  1049. static noinstr void xen_write_cr2(unsigned long cr2)
  1050. {
  1051. this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
  1052. }
  1053. static noinline void xen_flush_tlb(void)
  1054. {
  1055. struct mmuext_op *op;
  1056. struct multicall_space mcs;
  1057. preempt_disable();
  1058. mcs = xen_mc_entry(sizeof(*op));
  1059. op = mcs.args;
  1060. op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
  1061. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  1062. xen_mc_issue(XEN_LAZY_MMU);
  1063. preempt_enable();
  1064. }
  1065. static void xen_flush_tlb_one_user(unsigned long addr)
  1066. {
  1067. struct mmuext_op *op;
  1068. struct multicall_space mcs;
  1069. trace_xen_mmu_flush_tlb_one_user(addr);
  1070. preempt_disable();
  1071. mcs = xen_mc_entry(sizeof(*op));
  1072. op = mcs.args;
  1073. op->cmd = MMUEXT_INVLPG_LOCAL;
  1074. op->arg1.linear_addr = addr & PAGE_MASK;
  1075. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  1076. xen_mc_issue(XEN_LAZY_MMU);
  1077. preempt_enable();
  1078. }
  1079. static void xen_flush_tlb_multi(const struct cpumask *cpus,
  1080. const struct flush_tlb_info *info)
  1081. {
  1082. struct {
  1083. struct mmuext_op op;
  1084. DECLARE_BITMAP(mask, NR_CPUS);
  1085. } *args;
  1086. struct multicall_space mcs;
  1087. const size_t mc_entry_size = sizeof(args->op) +
  1088. sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
  1089. trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end);
  1090. if (cpumask_empty(cpus))
  1091. return; /* nothing to do */
  1092. mcs = xen_mc_entry(mc_entry_size);
  1093. args = mcs.args;
  1094. args->op.arg2.vcpumask = to_cpumask(args->mask);
  1095. /* Remove any offline CPUs */
  1096. cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
  1097. args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
  1098. if (info->end != TLB_FLUSH_ALL &&
  1099. (info->end - info->start) <= PAGE_SIZE) {
  1100. args->op.cmd = MMUEXT_INVLPG_MULTI;
  1101. args->op.arg1.linear_addr = info->start;
  1102. }
  1103. MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
  1104. xen_mc_issue(XEN_LAZY_MMU);
  1105. }
  1106. static unsigned long xen_read_cr3(void)
  1107. {
  1108. return this_cpu_read(xen_cr3);
  1109. }
  1110. static void set_current_cr3(void *v)
  1111. {
  1112. this_cpu_write(xen_current_cr3, (unsigned long)v);
  1113. }
  1114. static void __xen_write_cr3(bool kernel, unsigned long cr3)
  1115. {
  1116. struct mmuext_op op;
  1117. unsigned long mfn;
  1118. trace_xen_mmu_write_cr3(kernel, cr3);
  1119. if (cr3)
  1120. mfn = pfn_to_mfn(PFN_DOWN(cr3));
  1121. else
  1122. mfn = 0;
  1123. WARN_ON(mfn == 0 && kernel);
  1124. op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
  1125. op.arg1.mfn = mfn;
  1126. xen_extend_mmuext_op(&op);
  1127. if (kernel) {
  1128. this_cpu_write(xen_cr3, cr3);
  1129. /* Update xen_current_cr3 once the batch has actually
  1130. been submitted. */
  1131. xen_mc_callback(set_current_cr3, (void *)cr3);
  1132. }
  1133. }
  1134. static void xen_write_cr3(unsigned long cr3)
  1135. {
  1136. pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
  1137. BUG_ON(preemptible());
  1138. xen_mc_batch(); /* disables interrupts */
  1139. /* Update while interrupts are disabled, so its atomic with
  1140. respect to ipis */
  1141. this_cpu_write(xen_cr3, cr3);
  1142. __xen_write_cr3(true, cr3);
  1143. if (user_pgd)
  1144. __xen_write_cr3(false, __pa(user_pgd));
  1145. else
  1146. __xen_write_cr3(false, 0);
  1147. xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
  1148. }
  1149. /*
  1150. * At the start of the day - when Xen launches a guest, it has already
  1151. * built pagetables for the guest. We diligently look over them
  1152. * in xen_setup_kernel_pagetable and graft as appropriate them in the
  1153. * init_top_pgt and its friends. Then when we are happy we load
  1154. * the new init_top_pgt - and continue on.
  1155. *
  1156. * The generic code starts (start_kernel) and 'init_mem_mapping' sets
  1157. * up the rest of the pagetables. When it has completed it loads the cr3.
  1158. * N.B. that baremetal would start at 'start_kernel' (and the early
  1159. * #PF handler would create bootstrap pagetables) - so we are running
  1160. * with the same assumptions as what to do when write_cr3 is executed
  1161. * at this point.
  1162. *
  1163. * Since there are no user-page tables at all, we have two variants
  1164. * of xen_write_cr3 - the early bootup (this one), and the late one
  1165. * (xen_write_cr3). The reason we have to do that is that in 64-bit
  1166. * the Linux kernel and user-space are both in ring 3 while the
  1167. * hypervisor is in ring 0.
  1168. */
  1169. static void __init xen_write_cr3_init(unsigned long cr3)
  1170. {
  1171. BUG_ON(preemptible());
  1172. xen_mc_batch(); /* disables interrupts */
  1173. /* Update while interrupts are disabled, so its atomic with
  1174. respect to ipis */
  1175. this_cpu_write(xen_cr3, cr3);
  1176. __xen_write_cr3(true, cr3);
  1177. xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
  1178. }
  1179. static int xen_pgd_alloc(struct mm_struct *mm)
  1180. {
  1181. pgd_t *pgd = mm->pgd;
  1182. struct page *page = virt_to_page(pgd);
  1183. pgd_t *user_pgd;
  1184. int ret = -ENOMEM;
  1185. BUG_ON(PagePinned(virt_to_page(pgd)));
  1186. BUG_ON(page->private != 0);
  1187. user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  1188. page->private = (unsigned long)user_pgd;
  1189. if (user_pgd != NULL) {
  1190. #ifdef CONFIG_X86_VSYSCALL_EMULATION
  1191. user_pgd[pgd_index(VSYSCALL_ADDR)] =
  1192. __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
  1193. #endif
  1194. ret = 0;
  1195. }
  1196. BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
  1197. return ret;
  1198. }
  1199. static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
  1200. {
  1201. pgd_t *user_pgd = xen_get_user_pgd(pgd);
  1202. if (user_pgd)
  1203. free_page((unsigned long)user_pgd);
  1204. }
  1205. /*
  1206. * Init-time set_pte while constructing initial pagetables, which
  1207. * doesn't allow RO page table pages to be remapped RW.
  1208. *
  1209. * If there is no MFN for this PFN then this page is initially
  1210. * ballooned out so clear the PTE (as in decrease_reservation() in
  1211. * drivers/xen/balloon.c).
  1212. *
  1213. * Many of these PTE updates are done on unpinned and writable pages
  1214. * and doing a hypercall for these is unnecessary and expensive. At
  1215. * this point it is rarely possible to tell if a page is pinned, so
  1216. * mostly write the PTE directly and rely on Xen trapping and
  1217. * emulating any updates as necessary.
  1218. */
  1219. static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
  1220. {
  1221. if (unlikely(is_early_ioremap_ptep(ptep)))
  1222. __xen_set_pte(ptep, pte);
  1223. else
  1224. native_set_pte(ptep, pte);
  1225. }
  1226. __visible pte_t xen_make_pte_init(pteval_t pte)
  1227. {
  1228. unsigned long pfn;
  1229. /*
  1230. * Pages belonging to the initial p2m list mapped outside the default
  1231. * address range must be mapped read-only. This region contains the
  1232. * page tables for mapping the p2m list, too, and page tables MUST be
  1233. * mapped read-only.
  1234. */
  1235. pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
  1236. if (xen_start_info->mfn_list < __START_KERNEL_map &&
  1237. pfn >= xen_start_info->first_p2m_pfn &&
  1238. pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
  1239. pte &= ~_PAGE_RW;
  1240. pte = pte_pfn_to_mfn(pte);
  1241. return native_make_pte(pte);
  1242. }
  1243. PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
  1244. /* Early in boot, while setting up the initial pagetable, assume
  1245. everything is pinned. */
  1246. static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
  1247. {
  1248. #ifdef CONFIG_FLATMEM
  1249. BUG_ON(mem_map); /* should only be used early */
  1250. #endif
  1251. make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
  1252. pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
  1253. }
  1254. /* Used for pmd and pud */
  1255. static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
  1256. {
  1257. #ifdef CONFIG_FLATMEM
  1258. BUG_ON(mem_map); /* should only be used early */
  1259. #endif
  1260. make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
  1261. }
  1262. /* Early release_pte assumes that all pts are pinned, since there's
  1263. only init_mm and anything attached to that is pinned. */
  1264. static void __init xen_release_pte_init(unsigned long pfn)
  1265. {
  1266. pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
  1267. make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
  1268. }
  1269. static void __init xen_release_pmd_init(unsigned long pfn)
  1270. {
  1271. make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
  1272. }
  1273. static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
  1274. {
  1275. struct multicall_space mcs;
  1276. struct mmuext_op *op;
  1277. mcs = __xen_mc_entry(sizeof(*op));
  1278. op = mcs.args;
  1279. op->cmd = cmd;
  1280. op->arg1.mfn = pfn_to_mfn(pfn);
  1281. MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
  1282. }
  1283. static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
  1284. {
  1285. struct multicall_space mcs;
  1286. unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
  1287. mcs = __xen_mc_entry(0);
  1288. MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
  1289. pfn_pte(pfn, prot), 0);
  1290. }
  1291. /* This needs to make sure the new pte page is pinned iff its being
  1292. attached to a pinned pagetable. */
  1293. static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
  1294. unsigned level)
  1295. {
  1296. bool pinned = xen_page_pinned(mm->pgd);
  1297. trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
  1298. if (pinned) {
  1299. struct page *page = pfn_to_page(pfn);
  1300. pinned = false;
  1301. if (static_branch_likely(&xen_struct_pages_ready)) {
  1302. pinned = PagePinned(page);
  1303. SetPagePinned(page);
  1304. }
  1305. xen_mc_batch();
  1306. __set_pfn_prot(pfn, PAGE_KERNEL_RO);
  1307. if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) &&
  1308. !pinned)
  1309. __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
  1310. xen_mc_issue(XEN_LAZY_MMU);
  1311. }
  1312. }
  1313. static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
  1314. {
  1315. xen_alloc_ptpage(mm, pfn, PT_PTE);
  1316. }
  1317. static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
  1318. {
  1319. xen_alloc_ptpage(mm, pfn, PT_PMD);
  1320. }
  1321. /* This should never happen until we're OK to use struct page */
  1322. static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
  1323. {
  1324. struct page *page = pfn_to_page(pfn);
  1325. bool pinned = PagePinned(page);
  1326. trace_xen_mmu_release_ptpage(pfn, level, pinned);
  1327. if (pinned) {
  1328. xen_mc_batch();
  1329. if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS))
  1330. __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
  1331. __set_pfn_prot(pfn, PAGE_KERNEL);
  1332. xen_mc_issue(XEN_LAZY_MMU);
  1333. ClearPagePinned(page);
  1334. }
  1335. }
  1336. static void xen_release_pte(unsigned long pfn)
  1337. {
  1338. xen_release_ptpage(pfn, PT_PTE);
  1339. }
  1340. static void xen_release_pmd(unsigned long pfn)
  1341. {
  1342. xen_release_ptpage(pfn, PT_PMD);
  1343. }
  1344. static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  1345. {
  1346. xen_alloc_ptpage(mm, pfn, PT_PUD);
  1347. }
  1348. static void xen_release_pud(unsigned long pfn)
  1349. {
  1350. xen_release_ptpage(pfn, PT_PUD);
  1351. }
  1352. /*
  1353. * Like __va(), but returns address in the kernel mapping (which is
  1354. * all we have until the physical memory mapping has been set up.
  1355. */
  1356. static void * __init __ka(phys_addr_t paddr)
  1357. {
  1358. return (void *)(paddr + __START_KERNEL_map);
  1359. }
  1360. /* Convert a machine address to physical address */
  1361. static unsigned long __init m2p(phys_addr_t maddr)
  1362. {
  1363. phys_addr_t paddr;
  1364. maddr &= XEN_PTE_MFN_MASK;
  1365. paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
  1366. return paddr;
  1367. }
  1368. /* Convert a machine address to kernel virtual */
  1369. static void * __init m2v(phys_addr_t maddr)
  1370. {
  1371. return __ka(m2p(maddr));
  1372. }
  1373. /* Set the page permissions on an identity-mapped pages */
  1374. static void __init set_page_prot_flags(void *addr, pgprot_t prot,
  1375. unsigned long flags)
  1376. {
  1377. unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
  1378. pte_t pte = pfn_pte(pfn, prot);
  1379. if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
  1380. BUG();
  1381. }
  1382. static void __init set_page_prot(void *addr, pgprot_t prot)
  1383. {
  1384. return set_page_prot_flags(addr, prot, UVMF_NONE);
  1385. }
  1386. void __init xen_setup_machphys_mapping(void)
  1387. {
  1388. struct xen_machphys_mapping mapping;
  1389. if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
  1390. machine_to_phys_mapping = (unsigned long *)mapping.v_start;
  1391. machine_to_phys_nr = mapping.max_mfn + 1;
  1392. } else {
  1393. machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
  1394. }
  1395. }
  1396. static void __init convert_pfn_mfn(void *v)
  1397. {
  1398. pte_t *pte = v;
  1399. int i;
  1400. /* All levels are converted the same way, so just treat them
  1401. as ptes. */
  1402. for (i = 0; i < PTRS_PER_PTE; i++)
  1403. pte[i] = xen_make_pte(pte[i].pte);
  1404. }
  1405. static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
  1406. unsigned long addr)
  1407. {
  1408. if (*pt_base == PFN_DOWN(__pa(addr))) {
  1409. set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
  1410. clear_page((void *)addr);
  1411. (*pt_base)++;
  1412. }
  1413. if (*pt_end == PFN_DOWN(__pa(addr))) {
  1414. set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
  1415. clear_page((void *)addr);
  1416. (*pt_end)--;
  1417. }
  1418. }
  1419. /*
  1420. * Set up the initial kernel pagetable.
  1421. *
  1422. * We can construct this by grafting the Xen provided pagetable into
  1423. * head_64.S's preconstructed pagetables. We copy the Xen L2's into
  1424. * level2_ident_pgt, and level2_kernel_pgt. This means that only the
  1425. * kernel has a physical mapping to start with - but that's enough to
  1426. * get __va working. We need to fill in the rest of the physical
  1427. * mapping once some sort of allocator has been set up.
  1428. */
  1429. void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
  1430. {
  1431. pud_t *l3;
  1432. pmd_t *l2;
  1433. unsigned long addr[3];
  1434. unsigned long pt_base, pt_end;
  1435. unsigned i;
  1436. /* max_pfn_mapped is the last pfn mapped in the initial memory
  1437. * mappings. Considering that on Xen after the kernel mappings we
  1438. * have the mappings of some pages that don't exist in pfn space, we
  1439. * set max_pfn_mapped to the last real pfn mapped. */
  1440. if (xen_start_info->mfn_list < __START_KERNEL_map)
  1441. max_pfn_mapped = xen_start_info->first_p2m_pfn;
  1442. else
  1443. max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
  1444. pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
  1445. pt_end = pt_base + xen_start_info->nr_pt_frames;
  1446. /* Zap identity mapping */
  1447. init_top_pgt[0] = __pgd(0);
  1448. /* Pre-constructed entries are in pfn, so convert to mfn */
  1449. /* L4[273] -> level3_ident_pgt */
  1450. /* L4[511] -> level3_kernel_pgt */
  1451. convert_pfn_mfn(init_top_pgt);
  1452. /* L3_i[0] -> level2_ident_pgt */
  1453. convert_pfn_mfn(level3_ident_pgt);
  1454. /* L3_k[510] -> level2_kernel_pgt */
  1455. /* L3_k[511] -> level2_fixmap_pgt */
  1456. convert_pfn_mfn(level3_kernel_pgt);
  1457. /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
  1458. convert_pfn_mfn(level2_fixmap_pgt);
  1459. /* We get [511][511] and have Xen's version of level2_kernel_pgt */
  1460. l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
  1461. l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
  1462. addr[0] = (unsigned long)pgd;
  1463. addr[1] = (unsigned long)l3;
  1464. addr[2] = (unsigned long)l2;
  1465. /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
  1466. * Both L4[273][0] and L4[511][510] have entries that point to the same
  1467. * L2 (PMD) tables. Meaning that if you modify it in __va space
  1468. * it will be also modified in the __ka space! (But if you just
  1469. * modify the PMD table to point to other PTE's or none, then you
  1470. * are OK - which is what cleanup_highmap does) */
  1471. copy_page(level2_ident_pgt, l2);
  1472. /* Graft it onto L4[511][510] */
  1473. copy_page(level2_kernel_pgt, l2);
  1474. /*
  1475. * Zap execute permission from the ident map. Due to the sharing of
  1476. * L1 entries we need to do this in the L2.
  1477. */
  1478. if (__supported_pte_mask & _PAGE_NX) {
  1479. for (i = 0; i < PTRS_PER_PMD; ++i) {
  1480. if (pmd_none(level2_ident_pgt[i]))
  1481. continue;
  1482. level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
  1483. }
  1484. }
  1485. /* Copy the initial P->M table mappings if necessary. */
  1486. i = pgd_index(xen_start_info->mfn_list);
  1487. if (i && i < pgd_index(__START_KERNEL_map))
  1488. init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
  1489. /* Make pagetable pieces RO */
  1490. set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
  1491. set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
  1492. set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
  1493. set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
  1494. set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
  1495. set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
  1496. for (i = 0; i < FIXMAP_PMD_NUM; i++) {
  1497. set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
  1498. PAGE_KERNEL_RO);
  1499. }
  1500. /* Pin down new L4 */
  1501. pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
  1502. PFN_DOWN(__pa_symbol(init_top_pgt)));
  1503. /* Unpin Xen-provided one */
  1504. pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
  1505. #ifdef CONFIG_X86_VSYSCALL_EMULATION
  1506. /* Pin user vsyscall L3 */
  1507. set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
  1508. pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
  1509. PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
  1510. #endif
  1511. /*
  1512. * At this stage there can be no user pgd, and no page structure to
  1513. * attach it to, so make sure we just set kernel pgd.
  1514. */
  1515. xen_mc_batch();
  1516. __xen_write_cr3(true, __pa(init_top_pgt));
  1517. xen_mc_issue(XEN_LAZY_CPU);
  1518. /* We can't that easily rip out L3 and L2, as the Xen pagetables are
  1519. * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
  1520. * the initial domain. For guests using the toolstack, they are in:
  1521. * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
  1522. * rip out the [L4] (pgd), but for guests we shave off three pages.
  1523. */
  1524. for (i = 0; i < ARRAY_SIZE(addr); i++)
  1525. check_pt_base(&pt_base, &pt_end, addr[i]);
  1526. /* Our (by three pages) smaller Xen pagetable that we are using */
  1527. xen_pt_base = PFN_PHYS(pt_base);
  1528. xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
  1529. memblock_reserve(xen_pt_base, xen_pt_size);
  1530. /* Revector the xen_start_info */
  1531. xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
  1532. }
  1533. /*
  1534. * Read a value from a physical address.
  1535. */
  1536. static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
  1537. {
  1538. unsigned long *vaddr;
  1539. unsigned long val;
  1540. vaddr = early_memremap_ro(addr, sizeof(val));
  1541. val = *vaddr;
  1542. early_memunmap(vaddr, sizeof(val));
  1543. return val;
  1544. }
  1545. /*
  1546. * Translate a virtual address to a physical one without relying on mapped
  1547. * page tables. Don't rely on big pages being aligned in (guest) physical
  1548. * space!
  1549. */
  1550. static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
  1551. {
  1552. phys_addr_t pa;
  1553. pgd_t pgd;
  1554. pud_t pud;
  1555. pmd_t pmd;
  1556. pte_t pte;
  1557. pa = read_cr3_pa();
  1558. pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
  1559. sizeof(pgd)));
  1560. if (!pgd_present(pgd))
  1561. return 0;
  1562. pa = pgd_val(pgd) & PTE_PFN_MASK;
  1563. pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
  1564. sizeof(pud)));
  1565. if (!pud_present(pud))
  1566. return 0;
  1567. pa = pud_val(pud) & PTE_PFN_MASK;
  1568. if (pud_leaf(pud))
  1569. return pa + (vaddr & ~PUD_MASK);
  1570. pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
  1571. sizeof(pmd)));
  1572. if (!pmd_present(pmd))
  1573. return 0;
  1574. pa = pmd_val(pmd) & PTE_PFN_MASK;
  1575. if (pmd_leaf(pmd))
  1576. return pa + (vaddr & ~PMD_MASK);
  1577. pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
  1578. sizeof(pte)));
  1579. if (!pte_present(pte))
  1580. return 0;
  1581. pa = pte_pfn(pte) << PAGE_SHIFT;
  1582. return pa | (vaddr & ~PAGE_MASK);
  1583. }
  1584. /*
  1585. * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
  1586. * this area.
  1587. */
  1588. void __init xen_relocate_p2m(void)
  1589. {
  1590. phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
  1591. unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
  1592. int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
  1593. pte_t *pt;
  1594. pmd_t *pmd;
  1595. pud_t *pud;
  1596. pgd_t *pgd;
  1597. unsigned long *new_p2m;
  1598. size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
  1599. n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
  1600. n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
  1601. n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
  1602. n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
  1603. n_frames = n_pte + n_pt + n_pmd + n_pud;
  1604. new_area = xen_find_free_area(PFN_PHYS(n_frames));
  1605. if (!new_area) {
  1606. xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
  1607. BUG();
  1608. }
  1609. /*
  1610. * Setup the page tables for addressing the new p2m list.
  1611. * We have asked the hypervisor to map the p2m list at the user address
  1612. * PUD_SIZE. It may have done so, or it may have used a kernel space
  1613. * address depending on the Xen version.
  1614. * To avoid any possible virtual address collision, just use
  1615. * 2 * PUD_SIZE for the new area.
  1616. */
  1617. pud_phys = new_area;
  1618. pmd_phys = pud_phys + PFN_PHYS(n_pud);
  1619. pt_phys = pmd_phys + PFN_PHYS(n_pmd);
  1620. p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
  1621. pgd = __va(read_cr3_pa());
  1622. new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
  1623. for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
  1624. pud = early_memremap(pud_phys, PAGE_SIZE);
  1625. clear_page(pud);
  1626. for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
  1627. idx_pmd++) {
  1628. pmd = early_memremap(pmd_phys, PAGE_SIZE);
  1629. clear_page(pmd);
  1630. for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
  1631. idx_pt++) {
  1632. pt = early_memremap(pt_phys, PAGE_SIZE);
  1633. clear_page(pt);
  1634. for (idx_pte = 0;
  1635. idx_pte < min(n_pte, PTRS_PER_PTE);
  1636. idx_pte++) {
  1637. pt[idx_pte] = pfn_pte(p2m_pfn,
  1638. PAGE_KERNEL);
  1639. p2m_pfn++;
  1640. }
  1641. n_pte -= PTRS_PER_PTE;
  1642. early_memunmap(pt, PAGE_SIZE);
  1643. make_lowmem_page_readonly(__va(pt_phys));
  1644. pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
  1645. PFN_DOWN(pt_phys));
  1646. pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
  1647. pt_phys += PAGE_SIZE;
  1648. }
  1649. n_pt -= PTRS_PER_PMD;
  1650. early_memunmap(pmd, PAGE_SIZE);
  1651. make_lowmem_page_readonly(__va(pmd_phys));
  1652. pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
  1653. PFN_DOWN(pmd_phys));
  1654. pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
  1655. pmd_phys += PAGE_SIZE;
  1656. }
  1657. n_pmd -= PTRS_PER_PUD;
  1658. early_memunmap(pud, PAGE_SIZE);
  1659. make_lowmem_page_readonly(__va(pud_phys));
  1660. pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
  1661. set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
  1662. pud_phys += PAGE_SIZE;
  1663. }
  1664. /* Now copy the old p2m info to the new area. */
  1665. memcpy(new_p2m, xen_p2m_addr, size);
  1666. xen_p2m_addr = new_p2m;
  1667. /* Release the old p2m list and set new list info. */
  1668. p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
  1669. BUG_ON(!p2m_pfn);
  1670. p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
  1671. if (xen_start_info->mfn_list < __START_KERNEL_map) {
  1672. pfn = xen_start_info->first_p2m_pfn;
  1673. pfn_end = xen_start_info->first_p2m_pfn +
  1674. xen_start_info->nr_p2m_frames;
  1675. set_pgd(pgd + 1, __pgd(0));
  1676. } else {
  1677. pfn = p2m_pfn;
  1678. pfn_end = p2m_pfn_end;
  1679. }
  1680. memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
  1681. while (pfn < pfn_end) {
  1682. if (pfn == p2m_pfn) {
  1683. pfn = p2m_pfn_end;
  1684. continue;
  1685. }
  1686. make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
  1687. pfn++;
  1688. }
  1689. xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
  1690. xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
  1691. xen_start_info->nr_p2m_frames = n_frames;
  1692. }
  1693. void __init xen_reserve_special_pages(void)
  1694. {
  1695. phys_addr_t paddr;
  1696. memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
  1697. if (xen_start_info->store_mfn) {
  1698. paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
  1699. memblock_reserve(paddr, PAGE_SIZE);
  1700. }
  1701. if (!xen_initial_domain()) {
  1702. paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
  1703. memblock_reserve(paddr, PAGE_SIZE);
  1704. }
  1705. }
  1706. void __init xen_pt_check_e820(void)
  1707. {
  1708. xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
  1709. }
  1710. static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
  1711. static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  1712. {
  1713. pte_t pte;
  1714. unsigned long vaddr;
  1715. phys >>= PAGE_SHIFT;
  1716. switch (idx) {
  1717. case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
  1718. #ifdef CONFIG_X86_VSYSCALL_EMULATION
  1719. case VSYSCALL_PAGE:
  1720. #endif
  1721. /* All local page mappings */
  1722. pte = pfn_pte(phys, prot);
  1723. break;
  1724. #ifdef CONFIG_X86_LOCAL_APIC
  1725. case FIX_APIC_BASE: /* maps dummy local APIC */
  1726. pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
  1727. break;
  1728. #endif
  1729. #ifdef CONFIG_X86_IO_APIC
  1730. case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
  1731. /*
  1732. * We just don't map the IO APIC - all access is via
  1733. * hypercalls. Keep the address in the pte for reference.
  1734. */
  1735. pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
  1736. break;
  1737. #endif
  1738. case FIX_PARAVIRT_BOOTMAP:
  1739. /* This is an MFN, but it isn't an IO mapping from the
  1740. IO domain */
  1741. pte = mfn_pte(phys, prot);
  1742. break;
  1743. default:
  1744. /* By default, set_fixmap is used for hardware mappings */
  1745. pte = mfn_pte(phys, prot);
  1746. break;
  1747. }
  1748. vaddr = __fix_to_virt(idx);
  1749. if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
  1750. BUG();
  1751. #ifdef CONFIG_X86_VSYSCALL_EMULATION
  1752. /* Replicate changes to map the vsyscall page into the user
  1753. pagetable vsyscall mapping. */
  1754. if (idx == VSYSCALL_PAGE)
  1755. set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
  1756. #endif
  1757. }
  1758. static void xen_enter_lazy_mmu(void)
  1759. {
  1760. enter_lazy(XEN_LAZY_MMU);
  1761. }
  1762. static void xen_flush_lazy_mmu(void)
  1763. {
  1764. preempt_disable();
  1765. if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
  1766. arch_leave_lazy_mmu_mode();
  1767. arch_enter_lazy_mmu_mode();
  1768. }
  1769. preempt_enable();
  1770. }
  1771. static void __init xen_post_allocator_init(void)
  1772. {
  1773. pv_ops.mmu.set_pte = xen_set_pte;
  1774. pv_ops.mmu.set_pmd = xen_set_pmd;
  1775. pv_ops.mmu.set_pud = xen_set_pud;
  1776. pv_ops.mmu.set_p4d = xen_set_p4d;
  1777. /* This will work as long as patching hasn't happened yet
  1778. (which it hasn't) */
  1779. pv_ops.mmu.alloc_pte = xen_alloc_pte;
  1780. pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
  1781. pv_ops.mmu.release_pte = xen_release_pte;
  1782. pv_ops.mmu.release_pmd = xen_release_pmd;
  1783. pv_ops.mmu.alloc_pud = xen_alloc_pud;
  1784. pv_ops.mmu.release_pud = xen_release_pud;
  1785. pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
  1786. pv_ops.mmu.write_cr3 = &xen_write_cr3;
  1787. }
  1788. static void xen_leave_lazy_mmu(void)
  1789. {
  1790. preempt_disable();
  1791. xen_mc_flush();
  1792. leave_lazy(XEN_LAZY_MMU);
  1793. preempt_enable();
  1794. }
  1795. static const typeof(pv_ops) xen_mmu_ops __initconst = {
  1796. .mmu = {
  1797. .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
  1798. .write_cr2 = xen_write_cr2,
  1799. .read_cr3 = xen_read_cr3,
  1800. .write_cr3 = xen_write_cr3_init,
  1801. .flush_tlb_user = xen_flush_tlb,
  1802. .flush_tlb_kernel = xen_flush_tlb,
  1803. .flush_tlb_one_user = xen_flush_tlb_one_user,
  1804. .flush_tlb_multi = xen_flush_tlb_multi,
  1805. .tlb_remove_table = tlb_remove_table,
  1806. .pgd_alloc = xen_pgd_alloc,
  1807. .pgd_free = xen_pgd_free,
  1808. .alloc_pte = xen_alloc_pte_init,
  1809. .release_pte = xen_release_pte_init,
  1810. .alloc_pmd = xen_alloc_pmd_init,
  1811. .release_pmd = xen_release_pmd_init,
  1812. .set_pte = xen_set_pte_init,
  1813. .set_pmd = xen_set_pmd_hyper,
  1814. .ptep_modify_prot_start = xen_ptep_modify_prot_start,
  1815. .ptep_modify_prot_commit = xen_ptep_modify_prot_commit,
  1816. .pte_val = PV_CALLEE_SAVE(xen_pte_val),
  1817. .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
  1818. .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
  1819. .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
  1820. .set_pud = xen_set_pud_hyper,
  1821. .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
  1822. .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
  1823. .pud_val = PV_CALLEE_SAVE(xen_pud_val),
  1824. .make_pud = PV_CALLEE_SAVE(xen_make_pud),
  1825. .set_p4d = xen_set_p4d_hyper,
  1826. .alloc_pud = xen_alloc_pmd_init,
  1827. .release_pud = xen_release_pmd_init,
  1828. #if CONFIG_PGTABLE_LEVELS >= 5
  1829. .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
  1830. .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
  1831. #endif
  1832. .enter_mmap = xen_enter_mmap,
  1833. .exit_mmap = xen_exit_mmap,
  1834. .lazy_mode = {
  1835. .enter = xen_enter_lazy_mmu,
  1836. .leave = xen_leave_lazy_mmu,
  1837. .flush = xen_flush_lazy_mmu,
  1838. },
  1839. .set_fixmap = xen_set_fixmap,
  1840. },
  1841. };
  1842. void __init xen_init_mmu_ops(void)
  1843. {
  1844. x86_init.paging.pagetable_init = xen_pagetable_init;
  1845. x86_init.hyper.init_after_bootmem = xen_after_bootmem;
  1846. pv_ops.mmu = xen_mmu_ops.mmu;
  1847. memset(dummy_mapping, 0xff, PAGE_SIZE);
  1848. }
  1849. #define VOID_PTE (mfn_pte(0, __pgprot(0)))
  1850. static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
  1851. unsigned long *in_frames,
  1852. unsigned long *out_frames)
  1853. {
  1854. int i;
  1855. struct multicall_space mcs;
  1856. xen_mc_batch();
  1857. for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
  1858. mcs = __xen_mc_entry(0);
  1859. if (in_frames)
  1860. in_frames[i] = virt_to_mfn((void *)vaddr);
  1861. MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
  1862. __set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
  1863. if (out_frames)
  1864. out_frames[i] = virt_to_pfn((void *)vaddr);
  1865. }
  1866. xen_mc_issue(0);
  1867. }
  1868. /*
  1869. * Update the pfn-to-mfn mappings for a virtual address range, either to
  1870. * point to an array of mfns, or contiguously from a single starting
  1871. * mfn.
  1872. */
  1873. static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
  1874. unsigned long *mfns,
  1875. unsigned long first_mfn)
  1876. {
  1877. unsigned i, limit;
  1878. unsigned long mfn;
  1879. xen_mc_batch();
  1880. limit = 1u << order;
  1881. for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
  1882. struct multicall_space mcs;
  1883. unsigned flags;
  1884. mcs = __xen_mc_entry(0);
  1885. if (mfns)
  1886. mfn = mfns[i];
  1887. else
  1888. mfn = first_mfn + i;
  1889. if (i < (limit - 1))
  1890. flags = 0;
  1891. else {
  1892. if (order == 0)
  1893. flags = UVMF_INVLPG | UVMF_ALL;
  1894. else
  1895. flags = UVMF_TLB_FLUSH | UVMF_ALL;
  1896. }
  1897. MULTI_update_va_mapping(mcs.mc, vaddr,
  1898. mfn_pte(mfn, PAGE_KERNEL), flags);
  1899. set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn);
  1900. }
  1901. xen_mc_issue(0);
  1902. }
  1903. /*
  1904. * Perform the hypercall to exchange a region of our pfns to point to
  1905. * memory with the required contiguous alignment. Takes the pfns as
  1906. * input, and populates mfns as output.
  1907. *
  1908. * Returns a success code indicating whether the hypervisor was able to
  1909. * satisfy the request or not.
  1910. */
  1911. static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
  1912. unsigned long *pfns_in,
  1913. unsigned long extents_out,
  1914. unsigned int order_out,
  1915. unsigned long *mfns_out,
  1916. unsigned int address_bits)
  1917. {
  1918. long rc;
  1919. int success;
  1920. struct xen_memory_exchange exchange = {
  1921. .in = {
  1922. .nr_extents = extents_in,
  1923. .extent_order = order_in,
  1924. .extent_start = pfns_in,
  1925. .domid = DOMID_SELF
  1926. },
  1927. .out = {
  1928. .nr_extents = extents_out,
  1929. .extent_order = order_out,
  1930. .extent_start = mfns_out,
  1931. .address_bits = address_bits,
  1932. .domid = DOMID_SELF
  1933. }
  1934. };
  1935. BUG_ON(extents_in << order_in != extents_out << order_out);
  1936. rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
  1937. success = (exchange.nr_exchanged == extents_in);
  1938. BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
  1939. BUG_ON(success && (rc != 0));
  1940. return success;
  1941. }
  1942. int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
  1943. unsigned int address_bits,
  1944. dma_addr_t *dma_handle)
  1945. {
  1946. unsigned long *in_frames, out_frame;
  1947. unsigned long flags;
  1948. int success;
  1949. unsigned long vstart = (unsigned long)phys_to_virt(pstart);
  1950. if (unlikely(order > discontig_frames_order)) {
  1951. if (!discontig_frames_dyn)
  1952. return -ENOMEM;
  1953. if (alloc_discontig_frames(order))
  1954. return -ENOMEM;
  1955. }
  1956. memset((void *) vstart, 0, PAGE_SIZE << order);
  1957. spin_lock_irqsave(&xen_reservation_lock, flags);
  1958. in_frames = discontig_frames;
  1959. /* 1. Zap current PTEs, remembering MFNs. */
  1960. xen_zap_pfn_range(vstart, order, in_frames, NULL);
  1961. /* 2. Get a new contiguous memory extent. */
  1962. out_frame = virt_to_pfn((void *)vstart);
  1963. success = xen_exchange_memory(1UL << order, 0, in_frames,
  1964. 1, order, &out_frame,
  1965. address_bits);
  1966. /* 3. Map the new extent in place of old pages. */
  1967. if (success)
  1968. xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
  1969. else
  1970. xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
  1971. spin_unlock_irqrestore(&xen_reservation_lock, flags);
  1972. *dma_handle = virt_to_machine(vstart).maddr;
  1973. return success ? 0 : -ENOMEM;
  1974. }
  1975. void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
  1976. {
  1977. unsigned long *out_frames, in_frame;
  1978. unsigned long flags;
  1979. int success;
  1980. unsigned long vstart;
  1981. if (unlikely(order > discontig_frames_order))
  1982. return;
  1983. vstart = (unsigned long)phys_to_virt(pstart);
  1984. memset((void *) vstart, 0, PAGE_SIZE << order);
  1985. spin_lock_irqsave(&xen_reservation_lock, flags);
  1986. out_frames = discontig_frames;
  1987. /* 1. Find start MFN of contiguous extent. */
  1988. in_frame = virt_to_mfn((void *)vstart);
  1989. /* 2. Zap current PTEs. */
  1990. xen_zap_pfn_range(vstart, order, NULL, out_frames);
  1991. /* 3. Do the exchange for non-contiguous MFNs. */
  1992. success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
  1993. 0, out_frames, 0);
  1994. /* 4. Map new pages in place of old pages. */
  1995. if (success)
  1996. xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
  1997. else
  1998. xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
  1999. spin_unlock_irqrestore(&xen_reservation_lock, flags);
  2000. }
  2001. static noinline void xen_flush_tlb_all(void)
  2002. {
  2003. struct mmuext_op *op;
  2004. struct multicall_space mcs;
  2005. preempt_disable();
  2006. mcs = xen_mc_entry(sizeof(*op));
  2007. op = mcs.args;
  2008. op->cmd = MMUEXT_TLB_FLUSH_ALL;
  2009. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  2010. xen_mc_issue(XEN_LAZY_MMU);
  2011. preempt_enable();
  2012. }
  2013. #define REMAP_BATCH_SIZE 16
  2014. struct remap_data {
  2015. xen_pfn_t *pfn;
  2016. bool contiguous;
  2017. bool no_translate;
  2018. pgprot_t prot;
  2019. struct mmu_update *mmu_update;
  2020. };
  2021. static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
  2022. {
  2023. struct remap_data *rmd = data;
  2024. pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
  2025. /*
  2026. * If we have a contiguous range, just update the pfn itself,
  2027. * else update pointer to be "next pfn".
  2028. */
  2029. if (rmd->contiguous)
  2030. (*rmd->pfn)++;
  2031. else
  2032. rmd->pfn++;
  2033. rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
  2034. rmd->mmu_update->ptr |= rmd->no_translate ?
  2035. MMU_PT_UPDATE_NO_TRANSLATE :
  2036. MMU_NORMAL_PT_UPDATE;
  2037. rmd->mmu_update->val = pte_val_ma(pte);
  2038. rmd->mmu_update++;
  2039. return 0;
  2040. }
  2041. int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
  2042. xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
  2043. unsigned int domid, bool no_translate)
  2044. {
  2045. int err = 0;
  2046. struct remap_data rmd;
  2047. struct mmu_update mmu_update[REMAP_BATCH_SIZE];
  2048. unsigned long range;
  2049. int mapped = 0;
  2050. BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
  2051. rmd.pfn = pfn;
  2052. rmd.prot = prot;
  2053. /*
  2054. * We use the err_ptr to indicate if there we are doing a contiguous
  2055. * mapping or a discontiguous mapping.
  2056. */
  2057. rmd.contiguous = !err_ptr;
  2058. rmd.no_translate = no_translate;
  2059. while (nr) {
  2060. int index = 0;
  2061. int done = 0;
  2062. int batch = min(REMAP_BATCH_SIZE, nr);
  2063. int batch_left = batch;
  2064. range = (unsigned long)batch << PAGE_SHIFT;
  2065. rmd.mmu_update = mmu_update;
  2066. err = apply_to_page_range(vma->vm_mm, addr, range,
  2067. remap_area_pfn_pte_fn, &rmd);
  2068. if (err)
  2069. goto out;
  2070. /*
  2071. * We record the error for each page that gives an error, but
  2072. * continue mapping until the whole set is done
  2073. */
  2074. do {
  2075. int i;
  2076. err = HYPERVISOR_mmu_update(&mmu_update[index],
  2077. batch_left, &done, domid);
  2078. /*
  2079. * @err_ptr may be the same buffer as @gfn, so
  2080. * only clear it after each chunk of @gfn is
  2081. * used.
  2082. */
  2083. if (err_ptr) {
  2084. for (i = index; i < index + done; i++)
  2085. err_ptr[i] = 0;
  2086. }
  2087. if (err < 0) {
  2088. if (!err_ptr)
  2089. goto out;
  2090. err_ptr[i] = err;
  2091. done++; /* Skip failed frame. */
  2092. } else
  2093. mapped += done;
  2094. batch_left -= done;
  2095. index += done;
  2096. } while (batch_left);
  2097. nr -= batch;
  2098. addr += range;
  2099. if (err_ptr)
  2100. err_ptr += batch;
  2101. cond_resched();
  2102. }
  2103. out:
  2104. xen_flush_tlb_all();
  2105. return err < 0 ? err : mapped;
  2106. }
  2107. EXPORT_SYMBOL_GPL(xen_remap_pfn);
  2108. #ifdef CONFIG_VMCORE_INFO
  2109. phys_addr_t paddr_vmcoreinfo_note(void)
  2110. {
  2111. if (xen_pv_domain())
  2112. return virt_to_machine(vmcoreinfo_note).maddr;
  2113. else
  2114. return __pa(vmcoreinfo_note);
  2115. }
  2116. #endif /* CONFIG_KEXEC_CORE */