panthor_mmu.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816
  1. // SPDX-License-Identifier: GPL-2.0 or MIT
  2. /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
  3. /* Copyright 2023 Collabora ltd. */
  4. #include <drm/drm_debugfs.h>
  5. #include <drm/drm_drv.h>
  6. #include <drm/drm_exec.h>
  7. #include <drm/drm_gpuvm.h>
  8. #include <drm/drm_managed.h>
  9. #include <drm/gpu_scheduler.h>
  10. #include <drm/panthor_drm.h>
  11. #include <linux/atomic.h>
  12. #include <linux/bitfield.h>
  13. #include <linux/delay.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/iopoll.h>
  18. #include <linux/io-pgtable.h>
  19. #include <linux/iommu.h>
  20. #include <linux/kmemleak.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/rwsem.h>
  24. #include <linux/sched.h>
  25. #include <linux/shmem_fs.h>
  26. #include <linux/sizes.h>
  27. #include "panthor_device.h"
  28. #include "panthor_gem.h"
  29. #include "panthor_heap.h"
  30. #include "panthor_mmu.h"
  31. #include "panthor_regs.h"
  32. #include "panthor_sched.h"
  33. #define MAX_AS_SLOTS 32
  34. struct panthor_vm;
  35. /**
  36. * struct panthor_as_slot - Address space slot
  37. */
  38. struct panthor_as_slot {
  39. /** @vm: VM bound to this slot. NULL is no VM is bound. */
  40. struct panthor_vm *vm;
  41. };
  42. /**
  43. * struct panthor_mmu - MMU related data
  44. */
  45. struct panthor_mmu {
  46. /** @irq: The MMU irq. */
  47. struct panthor_irq irq;
  48. /** @as: Address space related fields.
  49. *
  50. * The GPU has a limited number of address spaces (AS) slots, forcing
  51. * us to re-assign them to re-assign slots on-demand.
  52. */
  53. struct {
  54. /** @slots_lock: Lock protecting access to all other AS fields. */
  55. struct mutex slots_lock;
  56. /** @alloc_mask: Bitmask encoding the allocated slots. */
  57. unsigned long alloc_mask;
  58. /** @faulty_mask: Bitmask encoding the faulty slots. */
  59. unsigned long faulty_mask;
  60. /** @slots: VMs currently bound to the AS slots. */
  61. struct panthor_as_slot slots[MAX_AS_SLOTS];
  62. /**
  63. * @lru_list: List of least recently used VMs.
  64. *
  65. * We use this list to pick a VM to evict when all slots are
  66. * used.
  67. *
  68. * There should be no more active VMs than there are AS slots,
  69. * so this LRU is just here to keep VMs bound until there's
  70. * a need to release a slot, thus avoid unnecessary TLB/cache
  71. * flushes.
  72. */
  73. struct list_head lru_list;
  74. } as;
  75. /** @vm: VMs management fields */
  76. struct {
  77. /** @lock: Lock protecting access to list. */
  78. struct mutex lock;
  79. /** @list: List containing all VMs. */
  80. struct list_head list;
  81. /** @reset_in_progress: True if a reset is in progress. */
  82. bool reset_in_progress;
  83. /** @wq: Workqueue used for the VM_BIND queues. */
  84. struct workqueue_struct *wq;
  85. } vm;
  86. };
  87. /**
  88. * struct panthor_vm_pool - VM pool object
  89. */
  90. struct panthor_vm_pool {
  91. /** @xa: Array used for VM handle tracking. */
  92. struct xarray xa;
  93. };
  94. /**
  95. * struct panthor_vma - GPU mapping object
  96. *
  97. * This is used to track GEM mappings in GPU space.
  98. */
  99. struct panthor_vma {
  100. /** @base: Inherits from drm_gpuva. */
  101. struct drm_gpuva base;
  102. /** @node: Used to implement deferred release of VMAs. */
  103. struct list_head node;
  104. /**
  105. * @flags: Combination of drm_panthor_vm_bind_op_flags.
  106. *
  107. * Only map related flags are accepted.
  108. */
  109. u32 flags;
  110. };
  111. /**
  112. * struct panthor_vm_op_ctx - VM operation context
  113. *
  114. * With VM operations potentially taking place in a dma-signaling path, we
  115. * need to make sure everything that might require resource allocation is
  116. * pre-allocated upfront. This is what this operation context is far.
  117. *
  118. * We also collect resources that have been freed, so we can release them
  119. * asynchronously, and let the VM_BIND scheduler process the next VM_BIND
  120. * request.
  121. */
  122. struct panthor_vm_op_ctx {
  123. /** @rsvd_page_tables: Pages reserved for the MMU page table update. */
  124. struct {
  125. /** @count: Number of pages reserved. */
  126. u32 count;
  127. /** @ptr: Point to the first unused page in the @pages table. */
  128. u32 ptr;
  129. /**
  130. * @page: Array of pages that can be used for an MMU page table update.
  131. *
  132. * After an VM operation, there might be free pages left in this array.
  133. * They should be returned to the pt_cache as part of the op_ctx cleanup.
  134. */
  135. void **pages;
  136. } rsvd_page_tables;
  137. /**
  138. * @preallocated_vmas: Pre-allocated VMAs to handle the remap case.
  139. *
  140. * Partial unmap requests or map requests overlapping existing mappings will
  141. * trigger a remap call, which need to register up to three panthor_vma objects
  142. * (one for the new mapping, and two for the previous and next mappings).
  143. */
  144. struct panthor_vma *preallocated_vmas[3];
  145. /** @flags: Combination of drm_panthor_vm_bind_op_flags. */
  146. u32 flags;
  147. /** @va: Virtual range targeted by the VM operation. */
  148. struct {
  149. /** @addr: Start address. */
  150. u64 addr;
  151. /** @range: Range size. */
  152. u64 range;
  153. } va;
  154. /**
  155. * @returned_vmas: List of panthor_vma objects returned after a VM operation.
  156. *
  157. * For unmap operations, this will contain all VMAs that were covered by the
  158. * specified VA range.
  159. *
  160. * For map operations, this will contain all VMAs that previously mapped to
  161. * the specified VA range.
  162. *
  163. * Those VMAs, and the resources they point to will be released as part of
  164. * the op_ctx cleanup operation.
  165. */
  166. struct list_head returned_vmas;
  167. /** @map: Fields specific to a map operation. */
  168. struct {
  169. /** @vm_bo: Buffer object to map. */
  170. struct drm_gpuvm_bo *vm_bo;
  171. /** @bo_offset: Offset in the buffer object. */
  172. u64 bo_offset;
  173. /**
  174. * @sgt: sg-table pointing to pages backing the GEM object.
  175. *
  176. * This is gathered at job creation time, such that we don't have
  177. * to allocate in ::run_job().
  178. */
  179. struct sg_table *sgt;
  180. /**
  181. * @new_vma: The new VMA object that will be inserted to the VA tree.
  182. */
  183. struct panthor_vma *new_vma;
  184. } map;
  185. };
  186. /**
  187. * struct panthor_vm - VM object
  188. *
  189. * A VM is an object representing a GPU (or MCU) virtual address space.
  190. * It embeds the MMU page table for this address space, a tree containing
  191. * all the virtual mappings of GEM objects, and other things needed to manage
  192. * the VM.
  193. *
  194. * Except for the MCU VM, which is managed by the kernel, all other VMs are
  195. * created by userspace and mostly managed by userspace, using the
  196. * %DRM_IOCTL_PANTHOR_VM_BIND ioctl.
  197. *
  198. * A portion of the virtual address space is reserved for kernel objects,
  199. * like heap chunks, and userspace gets to decide how much of the virtual
  200. * address space is left to the kernel (half of the virtual address space
  201. * by default).
  202. */
  203. struct panthor_vm {
  204. /**
  205. * @base: Inherit from drm_gpuvm.
  206. *
  207. * We delegate all the VA management to the common drm_gpuvm framework
  208. * and only implement hooks to update the MMU page table.
  209. */
  210. struct drm_gpuvm base;
  211. /**
  212. * @sched: Scheduler used for asynchronous VM_BIND request.
  213. *
  214. * We use a 1:1 scheduler here.
  215. */
  216. struct drm_gpu_scheduler sched;
  217. /**
  218. * @entity: Scheduling entity representing the VM_BIND queue.
  219. *
  220. * There's currently one bind queue per VM. It doesn't make sense to
  221. * allow more given the VM operations are serialized anyway.
  222. */
  223. struct drm_sched_entity entity;
  224. /** @ptdev: Device. */
  225. struct panthor_device *ptdev;
  226. /** @memattr: Value to program to the AS_MEMATTR register. */
  227. u64 memattr;
  228. /** @pgtbl_ops: Page table operations. */
  229. struct io_pgtable_ops *pgtbl_ops;
  230. /** @root_page_table: Stores the root page table pointer. */
  231. void *root_page_table;
  232. /**
  233. * @op_lock: Lock used to serialize operations on a VM.
  234. *
  235. * The serialization of jobs queued to the VM_BIND queue is already
  236. * taken care of by drm_sched, but we need to serialize synchronous
  237. * and asynchronous VM_BIND request. This is what this lock is for.
  238. */
  239. struct mutex op_lock;
  240. /**
  241. * @op_ctx: The context attached to the currently executing VM operation.
  242. *
  243. * NULL when no operation is in progress.
  244. */
  245. struct panthor_vm_op_ctx *op_ctx;
  246. /**
  247. * @mm: Memory management object representing the auto-VA/kernel-VA.
  248. *
  249. * Used to auto-allocate VA space for kernel-managed objects (tiler
  250. * heaps, ...).
  251. *
  252. * For the MCU VM, this is managing the VA range that's used to map
  253. * all shared interfaces.
  254. *
  255. * For user VMs, the range is specified by userspace, and must not
  256. * exceed half of the VA space addressable.
  257. */
  258. struct drm_mm mm;
  259. /** @mm_lock: Lock protecting the @mm field. */
  260. struct mutex mm_lock;
  261. /** @kernel_auto_va: Automatic VA-range for kernel BOs. */
  262. struct {
  263. /** @start: Start of the automatic VA-range for kernel BOs. */
  264. u64 start;
  265. /** @size: Size of the automatic VA-range for kernel BOs. */
  266. u64 end;
  267. } kernel_auto_va;
  268. /** @as: Address space related fields. */
  269. struct {
  270. /**
  271. * @id: ID of the address space this VM is bound to.
  272. *
  273. * A value of -1 means the VM is inactive/not bound.
  274. */
  275. int id;
  276. /** @active_cnt: Number of active users of this VM. */
  277. refcount_t active_cnt;
  278. /**
  279. * @lru_node: Used to instead the VM in the panthor_mmu::as::lru_list.
  280. *
  281. * Active VMs should not be inserted in the LRU list.
  282. */
  283. struct list_head lru_node;
  284. } as;
  285. /**
  286. * @heaps: Tiler heap related fields.
  287. */
  288. struct {
  289. /**
  290. * @pool: The heap pool attached to this VM.
  291. *
  292. * Will stay NULL until someone creates a heap context on this VM.
  293. */
  294. struct panthor_heap_pool *pool;
  295. /** @lock: Lock used to protect access to @pool. */
  296. struct mutex lock;
  297. } heaps;
  298. /** @node: Used to insert the VM in the panthor_mmu::vm::list. */
  299. struct list_head node;
  300. /** @for_mcu: True if this is the MCU VM. */
  301. bool for_mcu;
  302. /**
  303. * @destroyed: True if the VM was destroyed.
  304. *
  305. * No further bind requests should be queued to a destroyed VM.
  306. */
  307. bool destroyed;
  308. /**
  309. * @unusable: True if the VM has turned unusable because something
  310. * bad happened during an asynchronous request.
  311. *
  312. * We don't try to recover from such failures, because this implies
  313. * informing userspace about the specific operation that failed, and
  314. * hoping the userspace driver can replay things from there. This all
  315. * sounds very complicated for little gain.
  316. *
  317. * Instead, we should just flag the VM as unusable, and fail any
  318. * further request targeting this VM.
  319. *
  320. * We also provide a way to query a VM state, so userspace can destroy
  321. * it and create a new one.
  322. *
  323. * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST
  324. * situation, where the logical device needs to be re-created.
  325. */
  326. bool unusable;
  327. /**
  328. * @unhandled_fault: Unhandled fault happened.
  329. *
  330. * This should be reported to the scheduler, and the queue/group be
  331. * flagged as faulty as a result.
  332. */
  333. bool unhandled_fault;
  334. };
  335. /**
  336. * struct panthor_vm_bind_job - VM bind job
  337. */
  338. struct panthor_vm_bind_job {
  339. /** @base: Inherit from drm_sched_job. */
  340. struct drm_sched_job base;
  341. /** @refcount: Reference count. */
  342. struct kref refcount;
  343. /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */
  344. struct work_struct cleanup_op_ctx_work;
  345. /** @vm: VM targeted by the VM operation. */
  346. struct panthor_vm *vm;
  347. /** @ctx: Operation context. */
  348. struct panthor_vm_op_ctx ctx;
  349. };
  350. /**
  351. * @pt_cache: Cache used to allocate MMU page tables.
  352. *
  353. * The pre-allocation pattern forces us to over-allocate to plan for
  354. * the worst case scenario, and return the pages we didn't use.
  355. *
  356. * Having a kmem_cache allows us to speed allocations.
  357. */
  358. static struct kmem_cache *pt_cache;
  359. /**
  360. * alloc_pt() - Custom page table allocator
  361. * @cookie: Cookie passed at page table allocation time.
  362. * @size: Size of the page table. This size should be fixed,
  363. * and determined at creation time based on the granule size.
  364. * @gfp: GFP flags.
  365. *
  366. * We want a custom allocator so we can use a cache for page table
  367. * allocations and amortize the cost of the over-reservation that's
  368. * done to allow asynchronous VM operations.
  369. *
  370. * Return: non-NULL on success, NULL if the allocation failed for any
  371. * reason.
  372. */
  373. static void *alloc_pt(void *cookie, size_t size, gfp_t gfp)
  374. {
  375. struct panthor_vm *vm = cookie;
  376. void *page;
  377. /* Allocation of the root page table happening during init. */
  378. if (unlikely(!vm->root_page_table)) {
  379. struct page *p;
  380. drm_WARN_ON(&vm->ptdev->base, vm->op_ctx);
  381. p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev),
  382. gfp | __GFP_ZERO, get_order(size));
  383. page = p ? page_address(p) : NULL;
  384. vm->root_page_table = page;
  385. return page;
  386. }
  387. /* We're not supposed to have anything bigger than 4k here, because we picked a
  388. * 4k granule size at init time.
  389. */
  390. if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
  391. return NULL;
  392. /* We must have some op_ctx attached to the VM and it must have at least one
  393. * free page.
  394. */
  395. if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) ||
  396. drm_WARN_ON(&vm->ptdev->base,
  397. vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count))
  398. return NULL;
  399. page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++];
  400. memset(page, 0, SZ_4K);
  401. /* Page table entries don't use virtual addresses, which trips out
  402. * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
  403. * are mixed with other fields, and I fear kmemleak won't detect that
  404. * either.
  405. *
  406. * Let's just ignore memory passed to the page-table driver for now.
  407. */
  408. kmemleak_ignore(page);
  409. return page;
  410. }
  411. /**
  412. * @free_pt() - Custom page table free function
  413. * @cookie: Cookie passed at page table allocation time.
  414. * @data: Page table to free.
  415. * @size: Size of the page table. This size should be fixed,
  416. * and determined at creation time based on the granule size.
  417. */
  418. static void free_pt(void *cookie, void *data, size_t size)
  419. {
  420. struct panthor_vm *vm = cookie;
  421. if (unlikely(vm->root_page_table == data)) {
  422. free_pages((unsigned long)data, get_order(size));
  423. vm->root_page_table = NULL;
  424. return;
  425. }
  426. if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K))
  427. return;
  428. /* Return the page to the pt_cache. */
  429. kmem_cache_free(pt_cache, data);
  430. }
  431. static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
  432. {
  433. int ret;
  434. u32 val;
  435. /* Wait for the MMU status to indicate there is no active command, in
  436. * case one is pending.
  437. */
  438. ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
  439. val, !(val & AS_STATUS_AS_ACTIVE),
  440. 10, 100000);
  441. if (ret) {
  442. panthor_device_schedule_reset(ptdev);
  443. drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n");
  444. }
  445. return ret;
  446. }
  447. static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd)
  448. {
  449. int status;
  450. /* write AS_COMMAND when MMU is ready to accept another command */
  451. status = wait_ready(ptdev, as_nr);
  452. if (!status)
  453. gpu_write(ptdev, AS_COMMAND(as_nr), cmd);
  454. return status;
  455. }
  456. static void lock_region(struct panthor_device *ptdev, u32 as_nr,
  457. u64 region_start, u64 size)
  458. {
  459. u8 region_width;
  460. u64 region;
  461. u64 region_end = region_start + size;
  462. if (!size)
  463. return;
  464. /*
  465. * The locked region is a naturally aligned power of 2 block encoded as
  466. * log2 minus(1).
  467. * Calculate the desired start/end and look for the highest bit which
  468. * differs. The smallest naturally aligned block must include this bit
  469. * change, the desired region starts with this bit (and subsequent bits)
  470. * zeroed and ends with the bit (and subsequent bits) set to one.
  471. */
  472. region_width = max(fls64(region_start ^ (region_end - 1)),
  473. const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
  474. /*
  475. * Mask off the low bits of region_start (which would be ignored by
  476. * the hardware anyway)
  477. */
  478. region_start &= GENMASK_ULL(63, region_width);
  479. region = region_width | region_start;
  480. /* Lock the region that needs to be updated */
  481. gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
  482. gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
  483. write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
  484. }
  485. static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
  486. u64 iova, u64 size, u32 op)
  487. {
  488. lockdep_assert_held(&ptdev->mmu->as.slots_lock);
  489. if (as_nr < 0)
  490. return 0;
  491. /*
  492. * If the AS number is greater than zero, then we can be sure
  493. * the device is up and running, so we don't need to explicitly
  494. * power it up
  495. */
  496. if (op != AS_COMMAND_UNLOCK)
  497. lock_region(ptdev, as_nr, iova, size);
  498. /* Run the MMU operation */
  499. write_cmd(ptdev, as_nr, op);
  500. /* Wait for the flush to complete */
  501. return wait_ready(ptdev, as_nr);
  502. }
  503. static int mmu_hw_do_operation(struct panthor_vm *vm,
  504. u64 iova, u64 size, u32 op)
  505. {
  506. struct panthor_device *ptdev = vm->ptdev;
  507. int ret;
  508. mutex_lock(&ptdev->mmu->as.slots_lock);
  509. ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op);
  510. mutex_unlock(&ptdev->mmu->as.slots_lock);
  511. return ret;
  512. }
  513. static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
  514. u64 transtab, u64 transcfg, u64 memattr)
  515. {
  516. int ret;
  517. ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
  518. if (ret)
  519. return ret;
  520. gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
  521. gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
  522. gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
  523. gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
  524. gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
  525. gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
  526. return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
  527. }
  528. static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
  529. {
  530. int ret;
  531. ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
  532. if (ret)
  533. return ret;
  534. gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
  535. gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
  536. gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
  537. gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
  538. gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
  539. gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
  540. return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
  541. }
  542. static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value)
  543. {
  544. /* Bits 16 to 31 mean REQ_COMPLETE. */
  545. return value & GENMASK(15, 0);
  546. }
  547. static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as)
  548. {
  549. return BIT(as);
  550. }
  551. /**
  552. * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults
  553. * @vm: VM to check.
  554. *
  555. * Return: true if the VM has unhandled faults, false otherwise.
  556. */
  557. bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm)
  558. {
  559. return vm->unhandled_fault;
  560. }
  561. /**
  562. * panthor_vm_is_unusable() - Check if the VM is still usable
  563. * @vm: VM to check.
  564. *
  565. * Return: true if the VM is unusable, false otherwise.
  566. */
  567. bool panthor_vm_is_unusable(struct panthor_vm *vm)
  568. {
  569. return vm->unusable;
  570. }
  571. static void panthor_vm_release_as_locked(struct panthor_vm *vm)
  572. {
  573. struct panthor_device *ptdev = vm->ptdev;
  574. lockdep_assert_held(&ptdev->mmu->as.slots_lock);
  575. if (drm_WARN_ON(&ptdev->base, vm->as.id < 0))
  576. return;
  577. ptdev->mmu->as.slots[vm->as.id].vm = NULL;
  578. clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
  579. refcount_set(&vm->as.active_cnt, 0);
  580. list_del_init(&vm->as.lru_node);
  581. vm->as.id = -1;
  582. }
  583. /**
  584. * panthor_vm_active() - Flag a VM as active
  585. * @VM: VM to flag as active.
  586. *
  587. * Assigns an address space to a VM so it can be used by the GPU/MCU.
  588. *
  589. * Return: 0 on success, a negative error code otherwise.
  590. */
  591. int panthor_vm_active(struct panthor_vm *vm)
  592. {
  593. struct panthor_device *ptdev = vm->ptdev;
  594. u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
  595. struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg;
  596. int ret = 0, as, cookie;
  597. u64 transtab, transcfg;
  598. if (!drm_dev_enter(&ptdev->base, &cookie))
  599. return -ENODEV;
  600. if (refcount_inc_not_zero(&vm->as.active_cnt))
  601. goto out_dev_exit;
  602. mutex_lock(&ptdev->mmu->as.slots_lock);
  603. if (refcount_inc_not_zero(&vm->as.active_cnt))
  604. goto out_unlock;
  605. as = vm->as.id;
  606. if (as >= 0) {
  607. /* Unhandled pagefault on this AS, the MMU was disabled. We need to
  608. * re-enable the MMU after clearing+unmasking the AS interrupts.
  609. */
  610. if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as))
  611. goto out_enable_as;
  612. goto out_make_active;
  613. }
  614. /* Check for a free AS */
  615. if (vm->for_mcu) {
  616. drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0));
  617. as = 0;
  618. } else {
  619. as = ffz(ptdev->mmu->as.alloc_mask | BIT(0));
  620. }
  621. if (!(BIT(as) & ptdev->gpu_info.as_present)) {
  622. struct panthor_vm *lru_vm;
  623. lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list,
  624. struct panthor_vm,
  625. as.lru_node);
  626. if (drm_WARN_ON(&ptdev->base, !lru_vm)) {
  627. ret = -EBUSY;
  628. goto out_unlock;
  629. }
  630. drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt));
  631. as = lru_vm->as.id;
  632. panthor_vm_release_as_locked(lru_vm);
  633. }
  634. /* Assign the free or reclaimed AS to the FD */
  635. vm->as.id = as;
  636. set_bit(as, &ptdev->mmu->as.alloc_mask);
  637. ptdev->mmu->as.slots[as].vm = vm;
  638. out_enable_as:
  639. transtab = cfg->arm_lpae_s1_cfg.ttbr;
  640. transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
  641. AS_TRANSCFG_PTW_RA |
  642. AS_TRANSCFG_ADRMODE_AARCH64_4K |
  643. AS_TRANSCFG_INA_BITS(55 - va_bits);
  644. if (ptdev->coherent)
  645. transcfg |= AS_TRANSCFG_PTW_SH_OS;
  646. /* If the VM is re-activated, we clear the fault. */
  647. vm->unhandled_fault = false;
  648. /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts
  649. * before enabling the AS.
  650. */
  651. if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) {
  652. gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as));
  653. ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as);
  654. ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as);
  655. gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask);
  656. }
  657. ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr);
  658. out_make_active:
  659. if (!ret) {
  660. refcount_set(&vm->as.active_cnt, 1);
  661. list_del_init(&vm->as.lru_node);
  662. }
  663. out_unlock:
  664. mutex_unlock(&ptdev->mmu->as.slots_lock);
  665. out_dev_exit:
  666. drm_dev_exit(cookie);
  667. return ret;
  668. }
  669. /**
  670. * panthor_vm_idle() - Flag a VM idle
  671. * @VM: VM to flag as idle.
  672. *
  673. * When we know the GPU is done with the VM (no more jobs to process),
  674. * we can relinquish the AS slot attached to this VM, if any.
  675. *
  676. * We don't release the slot immediately, but instead place the VM in
  677. * the LRU list, so it can be evicted if another VM needs an AS slot.
  678. * This way, VMs keep attached to the AS they were given until we run
  679. * out of free slot, limiting the number of MMU operations (TLB flush
  680. * and other AS updates).
  681. */
  682. void panthor_vm_idle(struct panthor_vm *vm)
  683. {
  684. struct panthor_device *ptdev = vm->ptdev;
  685. if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock))
  686. return;
  687. if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node)))
  688. list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list);
  689. refcount_set(&vm->as.active_cnt, 0);
  690. mutex_unlock(&ptdev->mmu->as.slots_lock);
  691. }
  692. u32 panthor_vm_page_size(struct panthor_vm *vm)
  693. {
  694. const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
  695. u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
  696. return 1u << pg_shift;
  697. }
  698. static void panthor_vm_stop(struct panthor_vm *vm)
  699. {
  700. drm_sched_stop(&vm->sched, NULL);
  701. }
  702. static void panthor_vm_start(struct panthor_vm *vm)
  703. {
  704. drm_sched_start(&vm->sched);
  705. }
  706. /**
  707. * panthor_vm_as() - Get the AS slot attached to a VM
  708. * @vm: VM to get the AS slot of.
  709. *
  710. * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise.
  711. */
  712. int panthor_vm_as(struct panthor_vm *vm)
  713. {
  714. return vm->as.id;
  715. }
  716. static size_t get_pgsize(u64 addr, size_t size, size_t *count)
  717. {
  718. /*
  719. * io-pgtable only operates on multiple pages within a single table
  720. * entry, so we need to split at boundaries of the table size, i.e.
  721. * the next block size up. The distance from address A to the next
  722. * boundary of block size B is logically B - A % B, but in unsigned
  723. * two's complement where B is a power of two we get the equivalence
  724. * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
  725. */
  726. size_t blk_offset = -addr % SZ_2M;
  727. if (blk_offset || size < SZ_2M) {
  728. *count = min_not_zero(blk_offset, size) / SZ_4K;
  729. return SZ_4K;
  730. }
  731. blk_offset = -addr % SZ_1G ?: SZ_1G;
  732. *count = min(blk_offset, size) / SZ_2M;
  733. return SZ_2M;
  734. }
  735. static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
  736. {
  737. struct panthor_device *ptdev = vm->ptdev;
  738. int ret = 0, cookie;
  739. if (vm->as.id < 0)
  740. return 0;
  741. /* If the device is unplugged, we just silently skip the flush. */
  742. if (!drm_dev_enter(&ptdev->base, &cookie))
  743. return 0;
  744. ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
  745. drm_dev_exit(cookie);
  746. return ret;
  747. }
  748. /**
  749. * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
  750. * @vm: VM whose cache to flush
  751. *
  752. * Return: 0 on success, a negative error code if flush failed.
  753. */
  754. int panthor_vm_flush_all(struct panthor_vm *vm)
  755. {
  756. return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
  757. }
  758. static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
  759. {
  760. struct panthor_device *ptdev = vm->ptdev;
  761. struct io_pgtable_ops *ops = vm->pgtbl_ops;
  762. u64 offset = 0;
  763. drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
  764. while (offset < size) {
  765. size_t unmapped_sz = 0, pgcount;
  766. size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
  767. unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL);
  768. if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) {
  769. drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n",
  770. iova + offset + unmapped_sz,
  771. iova + offset + pgsize * pgcount,
  772. iova, iova + size);
  773. panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
  774. return -EINVAL;
  775. }
  776. offset += unmapped_sz;
  777. }
  778. return panthor_vm_flush_range(vm, iova, size);
  779. }
  780. static int
  781. panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
  782. struct sg_table *sgt, u64 offset, u64 size)
  783. {
  784. struct panthor_device *ptdev = vm->ptdev;
  785. unsigned int count;
  786. struct scatterlist *sgl;
  787. struct io_pgtable_ops *ops = vm->pgtbl_ops;
  788. u64 start_iova = iova;
  789. int ret;
  790. if (!size)
  791. return 0;
  792. for_each_sgtable_dma_sg(sgt, sgl, count) {
  793. dma_addr_t paddr = sg_dma_address(sgl);
  794. size_t len = sg_dma_len(sgl);
  795. if (len <= offset) {
  796. offset -= len;
  797. continue;
  798. }
  799. paddr += offset;
  800. len -= offset;
  801. len = min_t(size_t, len, size);
  802. size -= len;
  803. drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
  804. vm->as.id, iova, &paddr, len);
  805. while (len) {
  806. size_t pgcount, mapped = 0;
  807. size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
  808. ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
  809. GFP_KERNEL, &mapped);
  810. iova += mapped;
  811. paddr += mapped;
  812. len -= mapped;
  813. if (drm_WARN_ON(&ptdev->base, !ret && !mapped))
  814. ret = -ENOMEM;
  815. if (ret) {
  816. /* If something failed, unmap what we've already mapped before
  817. * returning. The unmap call is not supposed to fail.
  818. */
  819. drm_WARN_ON(&ptdev->base,
  820. panthor_vm_unmap_pages(vm, start_iova,
  821. iova - start_iova));
  822. return ret;
  823. }
  824. }
  825. if (!size)
  826. break;
  827. offset = 0;
  828. }
  829. return panthor_vm_flush_range(vm, start_iova, iova - start_iova);
  830. }
  831. static int flags_to_prot(u32 flags)
  832. {
  833. int prot = 0;
  834. if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC)
  835. prot |= IOMMU_NOEXEC;
  836. if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED))
  837. prot |= IOMMU_CACHE;
  838. if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY)
  839. prot |= IOMMU_READ;
  840. else
  841. prot |= IOMMU_READ | IOMMU_WRITE;
  842. return prot;
  843. }
  844. /**
  845. * panthor_vm_alloc_va() - Allocate a region in the auto-va space
  846. * @VM: VM to allocate a region on.
  847. * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user
  848. * wants the VA to be automatically allocated from the auto-VA range.
  849. * @size: size of the VA range.
  850. * @va_node: drm_mm_node to initialize. Must be zero-initialized.
  851. *
  852. * Some GPU objects, like heap chunks, are fully managed by the kernel and
  853. * need to be mapped to the userspace VM, in the region reserved for kernel
  854. * objects.
  855. *
  856. * This function takes care of allocating a region in the kernel auto-VA space.
  857. *
  858. * Return: 0 on success, an error code otherwise.
  859. */
  860. int
  861. panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
  862. struct drm_mm_node *va_node)
  863. {
  864. ssize_t vm_pgsz = panthor_vm_page_size(vm);
  865. int ret;
  866. if (!size || !IS_ALIGNED(size, vm_pgsz))
  867. return -EINVAL;
  868. if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
  869. return -EINVAL;
  870. mutex_lock(&vm->mm_lock);
  871. if (va != PANTHOR_VM_KERNEL_AUTO_VA) {
  872. va_node->start = va;
  873. va_node->size = size;
  874. ret = drm_mm_reserve_node(&vm->mm, va_node);
  875. } else {
  876. ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size,
  877. size >= SZ_2M ? SZ_2M : SZ_4K,
  878. 0, vm->kernel_auto_va.start,
  879. vm->kernel_auto_va.end,
  880. DRM_MM_INSERT_BEST);
  881. }
  882. mutex_unlock(&vm->mm_lock);
  883. return ret;
  884. }
  885. /**
  886. * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va()
  887. * @VM: VM to free the region on.
  888. * @va_node: Memory node representing the region to free.
  889. */
  890. void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
  891. {
  892. mutex_lock(&vm->mm_lock);
  893. drm_mm_remove_node(va_node);
  894. mutex_unlock(&vm->mm_lock);
  895. }
  896. static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
  897. {
  898. struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
  899. struct drm_gpuvm *vm = vm_bo->vm;
  900. bool unpin;
  901. /* We must retain the GEM before calling drm_gpuvm_bo_put(),
  902. * otherwise the mutex might be destroyed while we hold it.
  903. * Same goes for the VM, since we take the VM resv lock.
  904. */
  905. drm_gem_object_get(&bo->base.base);
  906. drm_gpuvm_get(vm);
  907. /* We take the resv lock to protect against concurrent accesses to the
  908. * gpuvm evicted/extobj lists that are modified in
  909. * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
  910. * releases sthe last vm_bo reference.
  911. * We take the BO GPUVA list lock to protect the vm_bo removal from the
  912. * GEM vm_bo list.
  913. */
  914. dma_resv_lock(drm_gpuvm_resv(vm), NULL);
  915. mutex_lock(&bo->gpuva_list_lock);
  916. unpin = drm_gpuvm_bo_put(vm_bo);
  917. mutex_unlock(&bo->gpuva_list_lock);
  918. dma_resv_unlock(drm_gpuvm_resv(vm));
  919. /* If the vm_bo object was destroyed, release the pin reference that
  920. * was hold by this object.
  921. */
  922. if (unpin && !bo->base.base.import_attach)
  923. drm_gem_shmem_unpin(&bo->base);
  924. drm_gpuvm_put(vm);
  925. drm_gem_object_put(&bo->base.base);
  926. }
  927. static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
  928. struct panthor_vm *vm)
  929. {
  930. struct panthor_vma *vma, *tmp_vma;
  931. u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
  932. op_ctx->rsvd_page_tables.ptr;
  933. if (remaining_pt_count) {
  934. kmem_cache_free_bulk(pt_cache, remaining_pt_count,
  935. op_ctx->rsvd_page_tables.pages +
  936. op_ctx->rsvd_page_tables.ptr);
  937. }
  938. kfree(op_ctx->rsvd_page_tables.pages);
  939. if (op_ctx->map.vm_bo)
  940. panthor_vm_bo_put(op_ctx->map.vm_bo);
  941. for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
  942. kfree(op_ctx->preallocated_vmas[i]);
  943. list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
  944. list_del(&vma->node);
  945. panthor_vm_bo_put(vma->base.vm_bo);
  946. kfree(vma);
  947. }
  948. }
  949. static struct panthor_vma *
  950. panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx)
  951. {
  952. for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
  953. struct panthor_vma *vma = op_ctx->preallocated_vmas[i];
  954. if (vma) {
  955. op_ctx->preallocated_vmas[i] = NULL;
  956. return vma;
  957. }
  958. }
  959. return NULL;
  960. }
  961. static int
  962. panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
  963. {
  964. u32 vma_count;
  965. switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
  966. case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
  967. /* One VMA for the new mapping, and two more VMAs for the remap case
  968. * which might contain both a prev and next VA.
  969. */
  970. vma_count = 3;
  971. break;
  972. case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
  973. /* Two VMAs can be needed for an unmap, as an unmap can happen
  974. * in the middle of a drm_gpuva, requiring a remap with both
  975. * prev & next VA. Or an unmap can span more than one drm_gpuva
  976. * where the first and last ones are covered partially, requring
  977. * a remap for the first with a prev VA and remap for the last
  978. * with a next VA.
  979. */
  980. vma_count = 2;
  981. break;
  982. default:
  983. return 0;
  984. }
  985. for (u32 i = 0; i < vma_count; i++) {
  986. struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  987. if (!vma)
  988. return -ENOMEM;
  989. op_ctx->preallocated_vmas[i] = vma;
  990. }
  991. return 0;
  992. }
  993. #define PANTHOR_VM_BIND_OP_MAP_FLAGS \
  994. (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
  995. DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
  996. DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
  997. DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
  998. static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
  999. struct panthor_vm *vm,
  1000. struct panthor_gem_object *bo,
  1001. u64 offset,
  1002. u64 size, u64 va,
  1003. u32 flags)
  1004. {
  1005. struct drm_gpuvm_bo *preallocated_vm_bo;
  1006. struct sg_table *sgt = NULL;
  1007. u64 pt_count;
  1008. int ret;
  1009. if (!bo)
  1010. return -EINVAL;
  1011. if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) ||
  1012. (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
  1013. return -EINVAL;
  1014. /* Make sure the VA and size are aligned and in-bounds. */
  1015. if (size > bo->base.base.size || offset > bo->base.base.size - size)
  1016. return -EINVAL;
  1017. /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
  1018. if (bo->exclusive_vm_root_gem &&
  1019. bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm))
  1020. return -EINVAL;
  1021. memset(op_ctx, 0, sizeof(*op_ctx));
  1022. INIT_LIST_HEAD(&op_ctx->returned_vmas);
  1023. op_ctx->flags = flags;
  1024. op_ctx->va.range = size;
  1025. op_ctx->va.addr = va;
  1026. ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
  1027. if (ret)
  1028. goto err_cleanup;
  1029. if (!bo->base.base.import_attach) {
  1030. /* Pre-reserve the BO pages, so the map operation doesn't have to
  1031. * allocate.
  1032. */
  1033. ret = drm_gem_shmem_pin(&bo->base);
  1034. if (ret)
  1035. goto err_cleanup;
  1036. }
  1037. sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
  1038. if (IS_ERR(sgt)) {
  1039. if (!bo->base.base.import_attach)
  1040. drm_gem_shmem_unpin(&bo->base);
  1041. ret = PTR_ERR(sgt);
  1042. goto err_cleanup;
  1043. }
  1044. op_ctx->map.sgt = sgt;
  1045. preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base);
  1046. if (!preallocated_vm_bo) {
  1047. if (!bo->base.base.import_attach)
  1048. drm_gem_shmem_unpin(&bo->base);
  1049. ret = -ENOMEM;
  1050. goto err_cleanup;
  1051. }
  1052. /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
  1053. * pre-allocated BO if the <BO,VM> association exists. Given we
  1054. * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
  1055. * be called immediately, and we have to hold the VM resv lock when
  1056. * calling this function.
  1057. */
  1058. dma_resv_lock(panthor_vm_resv(vm), NULL);
  1059. mutex_lock(&bo->gpuva_list_lock);
  1060. op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
  1061. mutex_unlock(&bo->gpuva_list_lock);
  1062. dma_resv_unlock(panthor_vm_resv(vm));
  1063. /* If the a vm_bo for this <VM,BO> combination exists, it already
  1064. * retains a pin ref, and we can release the one we took earlier.
  1065. *
  1066. * If our pre-allocated vm_bo is picked, it now retains the pin ref,
  1067. * which will be released in panthor_vm_bo_put().
  1068. */
  1069. if (preallocated_vm_bo != op_ctx->map.vm_bo &&
  1070. !bo->base.base.import_attach)
  1071. drm_gem_shmem_unpin(&bo->base);
  1072. op_ctx->map.bo_offset = offset;
  1073. /* L1, L2 and L3 page tables.
  1074. * We could optimize L3 allocation by iterating over the sgt and merging
  1075. * 2M contiguous blocks, but it's simpler to over-provision and return
  1076. * the pages if they're not used.
  1077. */
  1078. pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) +
  1079. ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) +
  1080. ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21);
  1081. op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
  1082. sizeof(*op_ctx->rsvd_page_tables.pages),
  1083. GFP_KERNEL);
  1084. if (!op_ctx->rsvd_page_tables.pages) {
  1085. ret = -ENOMEM;
  1086. goto err_cleanup;
  1087. }
  1088. ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
  1089. op_ctx->rsvd_page_tables.pages);
  1090. op_ctx->rsvd_page_tables.count = ret;
  1091. if (ret != pt_count) {
  1092. ret = -ENOMEM;
  1093. goto err_cleanup;
  1094. }
  1095. /* Insert BO into the extobj list last, when we know nothing can fail. */
  1096. dma_resv_lock(panthor_vm_resv(vm), NULL);
  1097. drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo);
  1098. dma_resv_unlock(panthor_vm_resv(vm));
  1099. return 0;
  1100. err_cleanup:
  1101. panthor_vm_cleanup_op_ctx(op_ctx, vm);
  1102. return ret;
  1103. }
  1104. static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
  1105. struct panthor_vm *vm,
  1106. u64 va, u64 size)
  1107. {
  1108. u32 pt_count = 0;
  1109. int ret;
  1110. memset(op_ctx, 0, sizeof(*op_ctx));
  1111. INIT_LIST_HEAD(&op_ctx->returned_vmas);
  1112. op_ctx->va.range = size;
  1113. op_ctx->va.addr = va;
  1114. op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
  1115. /* Pre-allocate L3 page tables to account for the split-2M-block
  1116. * situation on unmap.
  1117. */
  1118. if (va != ALIGN(va, SZ_2M))
  1119. pt_count++;
  1120. if (va + size != ALIGN(va + size, SZ_2M) &&
  1121. ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M))
  1122. pt_count++;
  1123. ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx);
  1124. if (ret)
  1125. goto err_cleanup;
  1126. if (pt_count) {
  1127. op_ctx->rsvd_page_tables.pages = kcalloc(pt_count,
  1128. sizeof(*op_ctx->rsvd_page_tables.pages),
  1129. GFP_KERNEL);
  1130. if (!op_ctx->rsvd_page_tables.pages) {
  1131. ret = -ENOMEM;
  1132. goto err_cleanup;
  1133. }
  1134. ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count,
  1135. op_ctx->rsvd_page_tables.pages);
  1136. if (ret != pt_count) {
  1137. ret = -ENOMEM;
  1138. goto err_cleanup;
  1139. }
  1140. op_ctx->rsvd_page_tables.count = pt_count;
  1141. }
  1142. return 0;
  1143. err_cleanup:
  1144. panthor_vm_cleanup_op_ctx(op_ctx, vm);
  1145. return ret;
  1146. }
  1147. static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx,
  1148. struct panthor_vm *vm)
  1149. {
  1150. memset(op_ctx, 0, sizeof(*op_ctx));
  1151. INIT_LIST_HEAD(&op_ctx->returned_vmas);
  1152. op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
  1153. }
  1154. /**
  1155. * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address
  1156. * @vm: VM to look into.
  1157. * @va: Virtual address to search for.
  1158. * @bo_offset: Offset of the GEM object mapped at this virtual address.
  1159. * Only valid on success.
  1160. *
  1161. * The object returned by this function might no longer be mapped when the
  1162. * function returns. It's the caller responsibility to ensure there's no
  1163. * concurrent map/unmap operations making the returned value invalid, or
  1164. * make sure it doesn't matter if the object is no longer mapped.
  1165. *
  1166. * Return: A valid pointer on success, an ERR_PTR() otherwise.
  1167. */
  1168. struct panthor_gem_object *
  1169. panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset)
  1170. {
  1171. struct panthor_gem_object *bo = ERR_PTR(-ENOENT);
  1172. struct drm_gpuva *gpuva;
  1173. struct panthor_vma *vma;
  1174. /* Take the VM lock to prevent concurrent map/unmap operations. */
  1175. mutex_lock(&vm->op_lock);
  1176. gpuva = drm_gpuva_find_first(&vm->base, va, 1);
  1177. vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL;
  1178. if (vma && vma->base.gem.obj) {
  1179. drm_gem_object_get(vma->base.gem.obj);
  1180. bo = to_panthor_bo(vma->base.gem.obj);
  1181. *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr);
  1182. }
  1183. mutex_unlock(&vm->op_lock);
  1184. return bo;
  1185. }
  1186. #define PANTHOR_VM_MIN_KERNEL_VA_SIZE SZ_256M
  1187. static u64
  1188. panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args,
  1189. u64 full_va_range)
  1190. {
  1191. u64 user_va_range;
  1192. /* Make sure we have a minimum amount of VA space for kernel objects. */
  1193. if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE)
  1194. return 0;
  1195. if (args->user_va_range) {
  1196. /* Use the user provided value if != 0. */
  1197. user_va_range = args->user_va_range;
  1198. } else if (TASK_SIZE_OF(current) < full_va_range) {
  1199. /* If the task VM size is smaller than the GPU VA range, pick this
  1200. * as our default user VA range, so userspace can CPU/GPU map buffers
  1201. * at the same address.
  1202. */
  1203. user_va_range = TASK_SIZE_OF(current);
  1204. } else {
  1205. /* If the GPU VA range is smaller than the task VM size, we
  1206. * just have to live with the fact we won't be able to map
  1207. * all buffers at the same GPU/CPU address.
  1208. *
  1209. * If the GPU VA range is bigger than 4G (more than 32-bit of
  1210. * VA), we split the range in two, and assign half of it to
  1211. * the user and the other half to the kernel, if it's not, we
  1212. * keep the kernel VA space as small as possible.
  1213. */
  1214. user_va_range = full_va_range > SZ_4G ?
  1215. full_va_range / 2 :
  1216. full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
  1217. }
  1218. if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range)
  1219. user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE;
  1220. return user_va_range;
  1221. }
  1222. #define PANTHOR_VM_CREATE_FLAGS 0
  1223. static int
  1224. panthor_vm_create_check_args(const struct panthor_device *ptdev,
  1225. const struct drm_panthor_vm_create *args,
  1226. u64 *kernel_va_start, u64 *kernel_va_range)
  1227. {
  1228. u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
  1229. u64 full_va_range = 1ull << va_bits;
  1230. u64 user_va_range;
  1231. if (args->flags & ~PANTHOR_VM_CREATE_FLAGS)
  1232. return -EINVAL;
  1233. user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range);
  1234. if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range))
  1235. return -EINVAL;
  1236. /* Pick a kernel VA range that's a power of two, to have a clear split. */
  1237. *kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range);
  1238. *kernel_va_start = full_va_range - *kernel_va_range;
  1239. return 0;
  1240. }
  1241. /*
  1242. * Only 32 VMs per open file. If that becomes a limiting factor, we can
  1243. * increase this number.
  1244. */
  1245. #define PANTHOR_MAX_VMS_PER_FILE 32
  1246. /**
  1247. * panthor_vm_pool_create_vm() - Create a VM
  1248. * @pool: The VM to create this VM on.
  1249. * @kernel_va_start: Start of the region reserved for kernel objects.
  1250. * @kernel_va_range: Size of the region reserved for kernel objects.
  1251. *
  1252. * Return: a positive VM ID on success, a negative error code otherwise.
  1253. */
  1254. int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
  1255. struct panthor_vm_pool *pool,
  1256. struct drm_panthor_vm_create *args)
  1257. {
  1258. u64 kernel_va_start, kernel_va_range;
  1259. struct panthor_vm *vm;
  1260. int ret;
  1261. u32 id;
  1262. ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range);
  1263. if (ret)
  1264. return ret;
  1265. vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range,
  1266. kernel_va_start, kernel_va_range);
  1267. if (IS_ERR(vm))
  1268. return PTR_ERR(vm);
  1269. ret = xa_alloc(&pool->xa, &id, vm,
  1270. XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL);
  1271. if (ret) {
  1272. panthor_vm_put(vm);
  1273. return ret;
  1274. }
  1275. args->user_va_range = kernel_va_start;
  1276. return id;
  1277. }
  1278. static void panthor_vm_destroy(struct panthor_vm *vm)
  1279. {
  1280. if (!vm)
  1281. return;
  1282. vm->destroyed = true;
  1283. mutex_lock(&vm->heaps.lock);
  1284. panthor_heap_pool_destroy(vm->heaps.pool);
  1285. vm->heaps.pool = NULL;
  1286. mutex_unlock(&vm->heaps.lock);
  1287. drm_WARN_ON(&vm->ptdev->base,
  1288. panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range));
  1289. panthor_vm_put(vm);
  1290. }
  1291. /**
  1292. * panthor_vm_pool_destroy_vm() - Destroy a VM.
  1293. * @pool: VM pool.
  1294. * @handle: VM handle.
  1295. *
  1296. * This function doesn't free the VM object or its resources, it just kills
  1297. * all mappings, and makes sure nothing can be mapped after that point.
  1298. *
  1299. * If there was any active jobs at the time this function is called, these
  1300. * jobs should experience page faults and be killed as a result.
  1301. *
  1302. * The VM resources are freed when the last reference on the VM object is
  1303. * dropped.
  1304. */
  1305. int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle)
  1306. {
  1307. struct panthor_vm *vm;
  1308. vm = xa_erase(&pool->xa, handle);
  1309. panthor_vm_destroy(vm);
  1310. return vm ? 0 : -EINVAL;
  1311. }
  1312. /**
  1313. * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle
  1314. * @pool: VM pool to check.
  1315. * @handle: Handle of the VM to retrieve.
  1316. *
  1317. * Return: A valid pointer if the VM exists, NULL otherwise.
  1318. */
  1319. struct panthor_vm *
  1320. panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle)
  1321. {
  1322. struct panthor_vm *vm;
  1323. xa_lock(&pool->xa);
  1324. vm = panthor_vm_get(xa_load(&pool->xa, handle));
  1325. xa_unlock(&pool->xa);
  1326. return vm;
  1327. }
  1328. /**
  1329. * panthor_vm_pool_destroy() - Destroy a VM pool.
  1330. * @pfile: File.
  1331. *
  1332. * Destroy all VMs in the pool, and release the pool resources.
  1333. *
  1334. * Note that VMs can outlive the pool they were created from if other
  1335. * objects hold a reference to there VMs.
  1336. */
  1337. void panthor_vm_pool_destroy(struct panthor_file *pfile)
  1338. {
  1339. struct panthor_vm *vm;
  1340. unsigned long i;
  1341. if (!pfile->vms)
  1342. return;
  1343. xa_for_each(&pfile->vms->xa, i, vm)
  1344. panthor_vm_destroy(vm);
  1345. xa_destroy(&pfile->vms->xa);
  1346. kfree(pfile->vms);
  1347. }
  1348. /**
  1349. * panthor_vm_pool_create() - Create a VM pool
  1350. * @pfile: File.
  1351. *
  1352. * Return: 0 on success, a negative error code otherwise.
  1353. */
  1354. int panthor_vm_pool_create(struct panthor_file *pfile)
  1355. {
  1356. pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL);
  1357. if (!pfile->vms)
  1358. return -ENOMEM;
  1359. xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1);
  1360. return 0;
  1361. }
  1362. /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */
  1363. static void mmu_tlb_flush_all(void *cookie)
  1364. {
  1365. }
  1366. static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie)
  1367. {
  1368. }
  1369. static const struct iommu_flush_ops mmu_tlb_ops = {
  1370. .tlb_flush_all = mmu_tlb_flush_all,
  1371. .tlb_flush_walk = mmu_tlb_flush_walk,
  1372. };
  1373. static const char *access_type_name(struct panthor_device *ptdev,
  1374. u32 fault_status)
  1375. {
  1376. switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
  1377. case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
  1378. return "ATOMIC";
  1379. case AS_FAULTSTATUS_ACCESS_TYPE_READ:
  1380. return "READ";
  1381. case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
  1382. return "WRITE";
  1383. case AS_FAULTSTATUS_ACCESS_TYPE_EX:
  1384. return "EXECUTE";
  1385. default:
  1386. drm_WARN_ON(&ptdev->base, 1);
  1387. return NULL;
  1388. }
  1389. }
  1390. static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
  1391. {
  1392. bool has_unhandled_faults = false;
  1393. status = panthor_mmu_fault_mask(ptdev, status);
  1394. while (status) {
  1395. u32 as = ffs(status | (status >> 16)) - 1;
  1396. u32 mask = panthor_mmu_as_fault_mask(ptdev, as);
  1397. u32 new_int_mask;
  1398. u64 addr;
  1399. u32 fault_status;
  1400. u32 exception_type;
  1401. u32 access_type;
  1402. u32 source_id;
  1403. fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
  1404. addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
  1405. addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
  1406. /* decode the fault status */
  1407. exception_type = fault_status & 0xFF;
  1408. access_type = (fault_status >> 8) & 0x3;
  1409. source_id = (fault_status >> 16);
  1410. mutex_lock(&ptdev->mmu->as.slots_lock);
  1411. ptdev->mmu->as.faulty_mask |= mask;
  1412. new_int_mask =
  1413. panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask);
  1414. /* terminal fault, print info about the fault */
  1415. drm_err(&ptdev->base,
  1416. "Unhandled Page fault in AS%d at VA 0x%016llX\n"
  1417. "raw fault status: 0x%X\n"
  1418. "decoded fault status: %s\n"
  1419. "exception type 0x%X: %s\n"
  1420. "access type 0x%X: %s\n"
  1421. "source id 0x%X\n",
  1422. as, addr,
  1423. fault_status,
  1424. (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
  1425. exception_type, panthor_exception_name(ptdev, exception_type),
  1426. access_type, access_type_name(ptdev, fault_status),
  1427. source_id);
  1428. /* Ignore MMU interrupts on this AS until it's been
  1429. * re-enabled.
  1430. */
  1431. ptdev->mmu->irq.mask = new_int_mask;
  1432. gpu_write(ptdev, MMU_INT_MASK, new_int_mask);
  1433. if (ptdev->mmu->as.slots[as].vm)
  1434. ptdev->mmu->as.slots[as].vm->unhandled_fault = true;
  1435. /* Disable the MMU to kill jobs on this AS. */
  1436. panthor_mmu_as_disable(ptdev, as);
  1437. mutex_unlock(&ptdev->mmu->as.slots_lock);
  1438. status &= ~mask;
  1439. has_unhandled_faults = true;
  1440. }
  1441. if (has_unhandled_faults)
  1442. panthor_sched_report_mmu_fault(ptdev);
  1443. }
  1444. PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler);
  1445. /**
  1446. * panthor_mmu_suspend() - Suspend the MMU logic
  1447. * @ptdev: Device.
  1448. *
  1449. * All we do here is de-assign the AS slots on all active VMs, so things
  1450. * get flushed to the main memory, and no further access to these VMs are
  1451. * possible.
  1452. *
  1453. * We also suspend the MMU IRQ.
  1454. */
  1455. void panthor_mmu_suspend(struct panthor_device *ptdev)
  1456. {
  1457. mutex_lock(&ptdev->mmu->as.slots_lock);
  1458. for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
  1459. struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
  1460. if (vm) {
  1461. drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
  1462. panthor_vm_release_as_locked(vm);
  1463. }
  1464. }
  1465. mutex_unlock(&ptdev->mmu->as.slots_lock);
  1466. panthor_mmu_irq_suspend(&ptdev->mmu->irq);
  1467. }
  1468. /**
  1469. * panthor_mmu_resume() - Resume the MMU logic
  1470. * @ptdev: Device.
  1471. *
  1472. * Resume the IRQ.
  1473. *
  1474. * We don't re-enable previously active VMs. We assume other parts of the
  1475. * driver will call panthor_vm_active() on the VMs they intend to use.
  1476. */
  1477. void panthor_mmu_resume(struct panthor_device *ptdev)
  1478. {
  1479. mutex_lock(&ptdev->mmu->as.slots_lock);
  1480. ptdev->mmu->as.alloc_mask = 0;
  1481. ptdev->mmu->as.faulty_mask = 0;
  1482. mutex_unlock(&ptdev->mmu->as.slots_lock);
  1483. panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
  1484. }
  1485. /**
  1486. * panthor_mmu_pre_reset() - Prepare for a reset
  1487. * @ptdev: Device.
  1488. *
  1489. * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we
  1490. * don't get asked to do a VM operation while the GPU is down.
  1491. *
  1492. * We don't cleanly shutdown the AS slots here, because the reset might
  1493. * come from an AS_ACTIVE_BIT stuck situation.
  1494. */
  1495. void panthor_mmu_pre_reset(struct panthor_device *ptdev)
  1496. {
  1497. struct panthor_vm *vm;
  1498. panthor_mmu_irq_suspend(&ptdev->mmu->irq);
  1499. mutex_lock(&ptdev->mmu->vm.lock);
  1500. ptdev->mmu->vm.reset_in_progress = true;
  1501. list_for_each_entry(vm, &ptdev->mmu->vm.list, node)
  1502. panthor_vm_stop(vm);
  1503. mutex_unlock(&ptdev->mmu->vm.lock);
  1504. }
  1505. /**
  1506. * panthor_mmu_post_reset() - Restore things after a reset
  1507. * @ptdev: Device.
  1508. *
  1509. * Put the MMU logic back in action after a reset. That implies resuming the
  1510. * IRQ and re-enabling the VM_BIND queues.
  1511. */
  1512. void panthor_mmu_post_reset(struct panthor_device *ptdev)
  1513. {
  1514. struct panthor_vm *vm;
  1515. mutex_lock(&ptdev->mmu->as.slots_lock);
  1516. /* Now that the reset is effective, we can assume that none of the
  1517. * AS slots are setup, and clear the faulty flags too.
  1518. */
  1519. ptdev->mmu->as.alloc_mask = 0;
  1520. ptdev->mmu->as.faulty_mask = 0;
  1521. for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
  1522. struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
  1523. if (vm)
  1524. panthor_vm_release_as_locked(vm);
  1525. }
  1526. mutex_unlock(&ptdev->mmu->as.slots_lock);
  1527. panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0));
  1528. /* Restart the VM_BIND queues. */
  1529. mutex_lock(&ptdev->mmu->vm.lock);
  1530. list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
  1531. panthor_vm_start(vm);
  1532. }
  1533. ptdev->mmu->vm.reset_in_progress = false;
  1534. mutex_unlock(&ptdev->mmu->vm.lock);
  1535. }
  1536. static void panthor_vm_free(struct drm_gpuvm *gpuvm)
  1537. {
  1538. struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base);
  1539. struct panthor_device *ptdev = vm->ptdev;
  1540. mutex_lock(&vm->heaps.lock);
  1541. if (drm_WARN_ON(&ptdev->base, vm->heaps.pool))
  1542. panthor_heap_pool_destroy(vm->heaps.pool);
  1543. mutex_unlock(&vm->heaps.lock);
  1544. mutex_destroy(&vm->heaps.lock);
  1545. mutex_lock(&ptdev->mmu->vm.lock);
  1546. list_del(&vm->node);
  1547. /* Restore the scheduler state so we can call drm_sched_entity_destroy()
  1548. * and drm_sched_fini(). If get there, that means we have no job left
  1549. * and no new jobs can be queued, so we can start the scheduler without
  1550. * risking interfering with the reset.
  1551. */
  1552. if (ptdev->mmu->vm.reset_in_progress)
  1553. panthor_vm_start(vm);
  1554. mutex_unlock(&ptdev->mmu->vm.lock);
  1555. drm_sched_entity_destroy(&vm->entity);
  1556. drm_sched_fini(&vm->sched);
  1557. mutex_lock(&ptdev->mmu->as.slots_lock);
  1558. if (vm->as.id >= 0) {
  1559. int cookie;
  1560. if (drm_dev_enter(&ptdev->base, &cookie)) {
  1561. panthor_mmu_as_disable(ptdev, vm->as.id);
  1562. drm_dev_exit(cookie);
  1563. }
  1564. ptdev->mmu->as.slots[vm->as.id].vm = NULL;
  1565. clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask);
  1566. list_del(&vm->as.lru_node);
  1567. }
  1568. mutex_unlock(&ptdev->mmu->as.slots_lock);
  1569. free_io_pgtable_ops(vm->pgtbl_ops);
  1570. drm_mm_takedown(&vm->mm);
  1571. kfree(vm);
  1572. }
  1573. /**
  1574. * panthor_vm_put() - Release a reference on a VM
  1575. * @vm: VM to release the reference on. Can be NULL.
  1576. */
  1577. void panthor_vm_put(struct panthor_vm *vm)
  1578. {
  1579. drm_gpuvm_put(vm ? &vm->base : NULL);
  1580. }
  1581. /**
  1582. * panthor_vm_get() - Get a VM reference
  1583. * @vm: VM to get the reference on. Can be NULL.
  1584. *
  1585. * Return: @vm value.
  1586. */
  1587. struct panthor_vm *panthor_vm_get(struct panthor_vm *vm)
  1588. {
  1589. if (vm)
  1590. drm_gpuvm_get(&vm->base);
  1591. return vm;
  1592. }
  1593. /**
  1594. * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM
  1595. * @vm: VM to query the heap pool on.
  1596. * @create: True if the heap pool should be created when it doesn't exist.
  1597. *
  1598. * Heap pools are per-VM. This function allows one to retrieve the heap pool
  1599. * attached to a VM.
  1600. *
  1601. * If no heap pool exists yet, and @create is true, we create one.
  1602. *
  1603. * The returned panthor_heap_pool should be released with panthor_heap_pool_put().
  1604. *
  1605. * Return: A valid pointer on success, an ERR_PTR() otherwise.
  1606. */
  1607. struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create)
  1608. {
  1609. struct panthor_heap_pool *pool;
  1610. mutex_lock(&vm->heaps.lock);
  1611. if (!vm->heaps.pool && create) {
  1612. if (vm->destroyed)
  1613. pool = ERR_PTR(-EINVAL);
  1614. else
  1615. pool = panthor_heap_pool_create(vm->ptdev, vm);
  1616. if (!IS_ERR(pool))
  1617. vm->heaps.pool = panthor_heap_pool_get(pool);
  1618. } else {
  1619. pool = panthor_heap_pool_get(vm->heaps.pool);
  1620. if (!pool)
  1621. pool = ERR_PTR(-ENOENT);
  1622. }
  1623. mutex_unlock(&vm->heaps.lock);
  1624. return pool;
  1625. }
  1626. static u64 mair_to_memattr(u64 mair)
  1627. {
  1628. u64 memattr = 0;
  1629. u32 i;
  1630. for (i = 0; i < 8; i++) {
  1631. u8 in_attr = mair >> (8 * i), out_attr;
  1632. u8 outer = in_attr >> 4, inner = in_attr & 0xf;
  1633. /* For caching to be enabled, inner and outer caching policy
  1634. * have to be both write-back, if one of them is write-through
  1635. * or non-cacheable, we just choose non-cacheable. Device
  1636. * memory is also translated to non-cacheable.
  1637. */
  1638. if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
  1639. out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
  1640. AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
  1641. AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
  1642. } else {
  1643. /* Use SH_CPU_INNER mode so SH_IS, which is used when
  1644. * IOMMU_CACHE is set, actually maps to the standard
  1645. * definition of inner-shareable and not Mali's
  1646. * internal-shareable mode.
  1647. */
  1648. out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
  1649. AS_MEMATTR_AARCH64_SH_CPU_INNER |
  1650. AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
  1651. }
  1652. memattr |= (u64)out_attr << (8 * i);
  1653. }
  1654. return memattr;
  1655. }
  1656. static void panthor_vma_link(struct panthor_vm *vm,
  1657. struct panthor_vma *vma,
  1658. struct drm_gpuvm_bo *vm_bo)
  1659. {
  1660. struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
  1661. mutex_lock(&bo->gpuva_list_lock);
  1662. drm_gpuva_link(&vma->base, vm_bo);
  1663. drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
  1664. mutex_unlock(&bo->gpuva_list_lock);
  1665. }
  1666. static void panthor_vma_unlink(struct panthor_vm *vm,
  1667. struct panthor_vma *vma)
  1668. {
  1669. struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
  1670. struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
  1671. mutex_lock(&bo->gpuva_list_lock);
  1672. drm_gpuva_unlink(&vma->base);
  1673. mutex_unlock(&bo->gpuva_list_lock);
  1674. /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
  1675. * when entering this function, so we can implement deferred VMA
  1676. * destruction. Re-assign it here.
  1677. */
  1678. vma->base.vm_bo = vm_bo;
  1679. list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
  1680. }
  1681. static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
  1682. {
  1683. INIT_LIST_HEAD(&vma->node);
  1684. vma->flags = flags;
  1685. }
  1686. #define PANTHOR_VM_MAP_FLAGS \
  1687. (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
  1688. DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
  1689. DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)
  1690. static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
  1691. {
  1692. struct panthor_vm *vm = priv;
  1693. struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
  1694. struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx);
  1695. int ret;
  1696. if (!vma)
  1697. return -EINVAL;
  1698. panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
  1699. ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
  1700. op_ctx->map.sgt, op->map.gem.offset,
  1701. op->map.va.range);
  1702. if (ret)
  1703. return ret;
  1704. /* Ref owned by the mapping now, clear the obj field so we don't release the
  1705. * pinning/obj ref behind GPUVA's back.
  1706. */
  1707. drm_gpuva_map(&vm->base, &vma->base, &op->map);
  1708. panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
  1709. op_ctx->map.vm_bo = NULL;
  1710. return 0;
  1711. }
  1712. static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
  1713. void *priv)
  1714. {
  1715. struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base);
  1716. struct panthor_vm *vm = priv;
  1717. struct panthor_vm_op_ctx *op_ctx = vm->op_ctx;
  1718. struct panthor_vma *prev_vma = NULL, *next_vma = NULL;
  1719. u64 unmap_start, unmap_range;
  1720. int ret;
  1721. drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
  1722. ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range);
  1723. if (ret)
  1724. return ret;
  1725. if (op->remap.prev) {
  1726. prev_vma = panthor_vm_op_ctx_get_vma(op_ctx);
  1727. panthor_vma_init(prev_vma, unmap_vma->flags);
  1728. }
  1729. if (op->remap.next) {
  1730. next_vma = panthor_vm_op_ctx_get_vma(op_ctx);
  1731. panthor_vma_init(next_vma, unmap_vma->flags);
  1732. }
  1733. drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL,
  1734. next_vma ? &next_vma->base : NULL,
  1735. &op->remap);
  1736. if (prev_vma) {
  1737. /* panthor_vma_link() transfers the vm_bo ownership to
  1738. * the VMA object. Since the vm_bo we're passing is still
  1739. * owned by the old mapping which will be released when this
  1740. * mapping is destroyed, we need to grab a ref here.
  1741. */
  1742. panthor_vma_link(vm, prev_vma,
  1743. drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
  1744. }
  1745. if (next_vma) {
  1746. panthor_vma_link(vm, next_vma,
  1747. drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
  1748. }
  1749. panthor_vma_unlink(vm, unmap_vma);
  1750. return 0;
  1751. }
  1752. static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
  1753. void *priv)
  1754. {
  1755. struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
  1756. struct panthor_vm *vm = priv;
  1757. int ret;
  1758. ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
  1759. unmap_vma->base.va.range);
  1760. if (drm_WARN_ON(&vm->ptdev->base, ret))
  1761. return ret;
  1762. drm_gpuva_unmap(&op->unmap);
  1763. panthor_vma_unlink(vm, unmap_vma);
  1764. return 0;
  1765. }
  1766. static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
  1767. .vm_free = panthor_vm_free,
  1768. .sm_step_map = panthor_gpuva_sm_step_map,
  1769. .sm_step_remap = panthor_gpuva_sm_step_remap,
  1770. .sm_step_unmap = panthor_gpuva_sm_step_unmap,
  1771. };
  1772. /**
  1773. * panthor_vm_resv() - Get the dma_resv object attached to a VM.
  1774. * @vm: VM to get the dma_resv of.
  1775. *
  1776. * Return: A dma_resv object.
  1777. */
  1778. struct dma_resv *panthor_vm_resv(struct panthor_vm *vm)
  1779. {
  1780. return drm_gpuvm_resv(&vm->base);
  1781. }
  1782. struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm)
  1783. {
  1784. if (!vm)
  1785. return NULL;
  1786. return vm->base.r_obj;
  1787. }
  1788. static int
  1789. panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
  1790. bool flag_vm_unusable_on_failure)
  1791. {
  1792. u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK;
  1793. int ret;
  1794. if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY)
  1795. return 0;
  1796. mutex_lock(&vm->op_lock);
  1797. vm->op_ctx = op;
  1798. switch (op_type) {
  1799. case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
  1800. if (vm->unusable) {
  1801. ret = -EINVAL;
  1802. break;
  1803. }
  1804. ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
  1805. op->map.vm_bo->obj, op->map.bo_offset);
  1806. break;
  1807. case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
  1808. ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
  1809. break;
  1810. default:
  1811. ret = -EINVAL;
  1812. break;
  1813. }
  1814. if (ret && flag_vm_unusable_on_failure)
  1815. vm->unusable = true;
  1816. vm->op_ctx = NULL;
  1817. mutex_unlock(&vm->op_lock);
  1818. return ret;
  1819. }
  1820. static struct dma_fence *
  1821. panthor_vm_bind_run_job(struct drm_sched_job *sched_job)
  1822. {
  1823. struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
  1824. bool cookie;
  1825. int ret;
  1826. /* Not only we report an error whose result is propagated to the
  1827. * drm_sched finished fence, but we also flag the VM as unusable, because
  1828. * a failure in the async VM_BIND results in an inconsistent state. VM needs
  1829. * to be destroyed and recreated.
  1830. */
  1831. cookie = dma_fence_begin_signalling();
  1832. ret = panthor_vm_exec_op(job->vm, &job->ctx, true);
  1833. dma_fence_end_signalling(cookie);
  1834. return ret ? ERR_PTR(ret) : NULL;
  1835. }
  1836. static void panthor_vm_bind_job_release(struct kref *kref)
  1837. {
  1838. struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount);
  1839. if (job->base.s_fence)
  1840. drm_sched_job_cleanup(&job->base);
  1841. panthor_vm_cleanup_op_ctx(&job->ctx, job->vm);
  1842. panthor_vm_put(job->vm);
  1843. kfree(job);
  1844. }
  1845. /**
  1846. * panthor_vm_bind_job_put() - Release a VM_BIND job reference
  1847. * @sched_job: Job to release the reference on.
  1848. */
  1849. void panthor_vm_bind_job_put(struct drm_sched_job *sched_job)
  1850. {
  1851. struct panthor_vm_bind_job *job =
  1852. container_of(sched_job, struct panthor_vm_bind_job, base);
  1853. if (sched_job)
  1854. kref_put(&job->refcount, panthor_vm_bind_job_release);
  1855. }
  1856. static void
  1857. panthor_vm_bind_free_job(struct drm_sched_job *sched_job)
  1858. {
  1859. struct panthor_vm_bind_job *job =
  1860. container_of(sched_job, struct panthor_vm_bind_job, base);
  1861. drm_sched_job_cleanup(sched_job);
  1862. /* Do the heavy cleanups asynchronously, so we're out of the
  1863. * dma-signaling path and can acquire dma-resv locks safely.
  1864. */
  1865. queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work);
  1866. }
  1867. static enum drm_gpu_sched_stat
  1868. panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job)
  1869. {
  1870. WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!");
  1871. return DRM_GPU_SCHED_STAT_NOMINAL;
  1872. }
  1873. static const struct drm_sched_backend_ops panthor_vm_bind_ops = {
  1874. .run_job = panthor_vm_bind_run_job,
  1875. .free_job = panthor_vm_bind_free_job,
  1876. .timedout_job = panthor_vm_bind_timedout_job,
  1877. };
  1878. /**
  1879. * panthor_vm_create() - Create a VM
  1880. * @ptdev: Device.
  1881. * @for_mcu: True if this is the FW MCU VM.
  1882. * @kernel_va_start: Start of the range reserved for kernel BO mapping.
  1883. * @kernel_va_size: Size of the range reserved for kernel BO mapping.
  1884. * @auto_kernel_va_start: Start of the auto-VA kernel range.
  1885. * @auto_kernel_va_size: Size of the auto-VA kernel range.
  1886. *
  1887. * Return: A valid pointer on success, an ERR_PTR() otherwise.
  1888. */
  1889. struct panthor_vm *
  1890. panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
  1891. u64 kernel_va_start, u64 kernel_va_size,
  1892. u64 auto_kernel_va_start, u64 auto_kernel_va_size)
  1893. {
  1894. u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
  1895. u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
  1896. u64 full_va_range = 1ull << va_bits;
  1897. struct drm_gem_object *dummy_gem;
  1898. struct drm_gpu_scheduler *sched;
  1899. struct io_pgtable_cfg pgtbl_cfg;
  1900. u64 mair, min_va, va_range;
  1901. struct panthor_vm *vm;
  1902. int ret;
  1903. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  1904. if (!vm)
  1905. return ERR_PTR(-ENOMEM);
  1906. /* We allocate a dummy GEM for the VM. */
  1907. dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base);
  1908. if (!dummy_gem) {
  1909. ret = -ENOMEM;
  1910. goto err_free_vm;
  1911. }
  1912. mutex_init(&vm->heaps.lock);
  1913. vm->for_mcu = for_mcu;
  1914. vm->ptdev = ptdev;
  1915. mutex_init(&vm->op_lock);
  1916. if (for_mcu) {
  1917. /* CSF MCU is a cortex M7, and can only address 4G */
  1918. min_va = 0;
  1919. va_range = SZ_4G;
  1920. } else {
  1921. min_va = 0;
  1922. va_range = full_va_range;
  1923. }
  1924. mutex_init(&vm->mm_lock);
  1925. drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size);
  1926. vm->kernel_auto_va.start = auto_kernel_va_start;
  1927. vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1;
  1928. INIT_LIST_HEAD(&vm->node);
  1929. INIT_LIST_HEAD(&vm->as.lru_node);
  1930. vm->as.id = -1;
  1931. refcount_set(&vm->as.active_cnt, 0);
  1932. pgtbl_cfg = (struct io_pgtable_cfg) {
  1933. .pgsize_bitmap = SZ_4K | SZ_2M,
  1934. .ias = va_bits,
  1935. .oas = pa_bits,
  1936. .coherent_walk = ptdev->coherent,
  1937. .tlb = &mmu_tlb_ops,
  1938. .iommu_dev = ptdev->base.dev,
  1939. .alloc = alloc_pt,
  1940. .free = free_pt,
  1941. };
  1942. vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm);
  1943. if (!vm->pgtbl_ops) {
  1944. ret = -EINVAL;
  1945. goto err_mm_takedown;
  1946. }
  1947. /* Bind operations are synchronous for now, no timeout needed. */
  1948. ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq,
  1949. 1, 1, 0,
  1950. MAX_SCHEDULE_TIMEOUT, NULL, NULL,
  1951. "panthor-vm-bind", ptdev->base.dev);
  1952. if (ret)
  1953. goto err_free_io_pgtable;
  1954. sched = &vm->sched;
  1955. ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL);
  1956. if (ret)
  1957. goto err_sched_fini;
  1958. mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair;
  1959. vm->memattr = mair_to_memattr(mair);
  1960. mutex_lock(&ptdev->mmu->vm.lock);
  1961. list_add_tail(&vm->node, &ptdev->mmu->vm.list);
  1962. /* If a reset is in progress, stop the scheduler. */
  1963. if (ptdev->mmu->vm.reset_in_progress)
  1964. panthor_vm_stop(vm);
  1965. mutex_unlock(&ptdev->mmu->vm.lock);
  1966. /* We intentionally leave the reserved range to zero, because we want kernel VMAs
  1967. * to be handled the same way user VMAs are.
  1968. */
  1969. drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
  1970. DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
  1971. min_va, va_range, 0, 0, &panthor_gpuvm_ops);
  1972. drm_gem_object_put(dummy_gem);
  1973. return vm;
  1974. err_sched_fini:
  1975. drm_sched_fini(&vm->sched);
  1976. err_free_io_pgtable:
  1977. free_io_pgtable_ops(vm->pgtbl_ops);
  1978. err_mm_takedown:
  1979. drm_mm_takedown(&vm->mm);
  1980. drm_gem_object_put(dummy_gem);
  1981. err_free_vm:
  1982. kfree(vm);
  1983. return ERR_PTR(ret);
  1984. }
  1985. static int
  1986. panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
  1987. struct panthor_vm *vm,
  1988. const struct drm_panthor_vm_bind_op *op,
  1989. struct panthor_vm_op_ctx *op_ctx)
  1990. {
  1991. ssize_t vm_pgsz = panthor_vm_page_size(vm);
  1992. struct drm_gem_object *gem;
  1993. int ret;
  1994. /* Aligned on page size. */
  1995. if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
  1996. return -EINVAL;
  1997. switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
  1998. case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
  1999. gem = drm_gem_object_lookup(file, op->bo_handle);
  2000. ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
  2001. gem ? to_panthor_bo(gem) : NULL,
  2002. op->bo_offset,
  2003. op->size,
  2004. op->va,
  2005. op->flags);
  2006. drm_gem_object_put(gem);
  2007. return ret;
  2008. case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
  2009. if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
  2010. return -EINVAL;
  2011. if (op->bo_handle || op->bo_offset)
  2012. return -EINVAL;
  2013. return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size);
  2014. case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY:
  2015. if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
  2016. return -EINVAL;
  2017. if (op->bo_handle || op->bo_offset)
  2018. return -EINVAL;
  2019. if (op->va || op->size)
  2020. return -EINVAL;
  2021. if (!op->syncs.count)
  2022. return -EINVAL;
  2023. panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm);
  2024. return 0;
  2025. default:
  2026. return -EINVAL;
  2027. }
  2028. }
  2029. static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work)
  2030. {
  2031. struct panthor_vm_bind_job *job =
  2032. container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work);
  2033. panthor_vm_bind_job_put(&job->base);
  2034. }
  2035. /**
  2036. * panthor_vm_bind_job_create() - Create a VM_BIND job
  2037. * @file: File.
  2038. * @vm: VM targeted by the VM_BIND job.
  2039. * @op: VM operation data.
  2040. *
  2041. * Return: A valid pointer on success, an ERR_PTR() otherwise.
  2042. */
  2043. struct drm_sched_job *
  2044. panthor_vm_bind_job_create(struct drm_file *file,
  2045. struct panthor_vm *vm,
  2046. const struct drm_panthor_vm_bind_op *op)
  2047. {
  2048. struct panthor_vm_bind_job *job;
  2049. int ret;
  2050. if (!vm)
  2051. return ERR_PTR(-EINVAL);
  2052. if (vm->destroyed || vm->unusable)
  2053. return ERR_PTR(-EINVAL);
  2054. job = kzalloc(sizeof(*job), GFP_KERNEL);
  2055. if (!job)
  2056. return ERR_PTR(-ENOMEM);
  2057. ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx);
  2058. if (ret) {
  2059. kfree(job);
  2060. return ERR_PTR(ret);
  2061. }
  2062. INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work);
  2063. kref_init(&job->refcount);
  2064. job->vm = panthor_vm_get(vm);
  2065. ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
  2066. if (ret)
  2067. goto err_put_job;
  2068. return &job->base;
  2069. err_put_job:
  2070. panthor_vm_bind_job_put(&job->base);
  2071. return ERR_PTR(ret);
  2072. }
  2073. /**
  2074. * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs
  2075. * @exec: The locking/preparation context.
  2076. * @sched_job: The job to prepare resvs on.
  2077. *
  2078. * Locks and prepare the VM resv.
  2079. *
  2080. * If this is a map operation, locks and prepares the GEM resv.
  2081. *
  2082. * Return: 0 on success, a negative error code otherwise.
  2083. */
  2084. int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec,
  2085. struct drm_sched_job *sched_job)
  2086. {
  2087. struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
  2088. int ret;
  2089. /* Acquire the VM lock an reserve a slot for this VM bind job. */
  2090. ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1);
  2091. if (ret)
  2092. return ret;
  2093. if (job->ctx.map.vm_bo) {
  2094. /* Lock/prepare the GEM being mapped. */
  2095. ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1);
  2096. if (ret)
  2097. return ret;
  2098. }
  2099. return 0;
  2100. }
  2101. /**
  2102. * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job
  2103. * @exec: drm_exec context.
  2104. * @sched_job: Job to update the resvs on.
  2105. */
  2106. void panthor_vm_bind_job_update_resvs(struct drm_exec *exec,
  2107. struct drm_sched_job *sched_job)
  2108. {
  2109. struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base);
  2110. /* Explicit sync => we just register our job finished fence as bookkeep. */
  2111. drm_gpuvm_resv_add_fence(&job->vm->base, exec,
  2112. &sched_job->s_fence->finished,
  2113. DMA_RESV_USAGE_BOOKKEEP,
  2114. DMA_RESV_USAGE_BOOKKEEP);
  2115. }
  2116. void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec,
  2117. struct dma_fence *fence,
  2118. enum dma_resv_usage private_usage,
  2119. enum dma_resv_usage extobj_usage)
  2120. {
  2121. drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage);
  2122. }
  2123. /**
  2124. * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously.
  2125. * @file: File.
  2126. * @vm: VM targeted by the VM operation.
  2127. * @op: Data describing the VM operation.
  2128. *
  2129. * Return: 0 on success, a negative error code otherwise.
  2130. */
  2131. int panthor_vm_bind_exec_sync_op(struct drm_file *file,
  2132. struct panthor_vm *vm,
  2133. struct drm_panthor_vm_bind_op *op)
  2134. {
  2135. struct panthor_vm_op_ctx op_ctx;
  2136. int ret;
  2137. /* No sync objects allowed on synchronous operations. */
  2138. if (op->syncs.count)
  2139. return -EINVAL;
  2140. if (!op->size)
  2141. return 0;
  2142. ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx);
  2143. if (ret)
  2144. return ret;
  2145. ret = panthor_vm_exec_op(vm, &op_ctx, false);
  2146. panthor_vm_cleanup_op_ctx(&op_ctx, vm);
  2147. return ret;
  2148. }
  2149. /**
  2150. * panthor_vm_map_bo_range() - Map a GEM object range to a VM
  2151. * @vm: VM to map the GEM to.
  2152. * @bo: GEM object to map.
  2153. * @offset: Offset in the GEM object.
  2154. * @size: Size to map.
  2155. * @va: Virtual address to map the object to.
  2156. * @flags: Combination of drm_panthor_vm_bind_op_flags flags.
  2157. * Only map-related flags are valid.
  2158. *
  2159. * Internal use only. For userspace requests, use
  2160. * panthor_vm_bind_exec_sync_op() instead.
  2161. *
  2162. * Return: 0 on success, a negative error code otherwise.
  2163. */
  2164. int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo,
  2165. u64 offset, u64 size, u64 va, u32 flags)
  2166. {
  2167. struct panthor_vm_op_ctx op_ctx;
  2168. int ret;
  2169. ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
  2170. if (ret)
  2171. return ret;
  2172. ret = panthor_vm_exec_op(vm, &op_ctx, false);
  2173. panthor_vm_cleanup_op_ctx(&op_ctx, vm);
  2174. return ret;
  2175. }
  2176. /**
  2177. * panthor_vm_unmap_range() - Unmap a portion of the VA space
  2178. * @vm: VM to unmap the region from.
  2179. * @va: Virtual address to unmap. Must be 4k aligned.
  2180. * @size: Size of the region to unmap. Must be 4k aligned.
  2181. *
  2182. * Internal use only. For userspace requests, use
  2183. * panthor_vm_bind_exec_sync_op() instead.
  2184. *
  2185. * Return: 0 on success, a negative error code otherwise.
  2186. */
  2187. int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size)
  2188. {
  2189. struct panthor_vm_op_ctx op_ctx;
  2190. int ret;
  2191. ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size);
  2192. if (ret)
  2193. return ret;
  2194. ret = panthor_vm_exec_op(vm, &op_ctx, false);
  2195. panthor_vm_cleanup_op_ctx(&op_ctx, vm);
  2196. return ret;
  2197. }
  2198. /**
  2199. * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs.
  2200. * @exec: Locking/preparation context.
  2201. * @vm: VM targeted by the GPU job.
  2202. * @slot_count: Number of slots to reserve.
  2203. *
  2204. * GPU jobs assume all BOs bound to the VM at the time the job is submitted
  2205. * are available when the job is executed. In order to guarantee that, we
  2206. * need to reserve a slot on all BOs mapped to a VM and update this slot with
  2207. * the job fence after its submission.
  2208. *
  2209. * Return: 0 on success, a negative error code otherwise.
  2210. */
  2211. int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm,
  2212. u32 slot_count)
  2213. {
  2214. int ret;
  2215. /* Acquire the VM lock and reserve a slot for this GPU job. */
  2216. ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count);
  2217. if (ret)
  2218. return ret;
  2219. return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count);
  2220. }
  2221. /**
  2222. * panthor_mmu_unplug() - Unplug the MMU logic
  2223. * @ptdev: Device.
  2224. *
  2225. * No access to the MMU regs should be done after this function is called.
  2226. * We suspend the IRQ and disable all VMs to guarantee that.
  2227. */
  2228. void panthor_mmu_unplug(struct panthor_device *ptdev)
  2229. {
  2230. panthor_mmu_irq_suspend(&ptdev->mmu->irq);
  2231. mutex_lock(&ptdev->mmu->as.slots_lock);
  2232. for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) {
  2233. struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm;
  2234. if (vm) {
  2235. drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i));
  2236. panthor_vm_release_as_locked(vm);
  2237. }
  2238. }
  2239. mutex_unlock(&ptdev->mmu->as.slots_lock);
  2240. }
  2241. static void panthor_mmu_release_wq(struct drm_device *ddev, void *res)
  2242. {
  2243. destroy_workqueue(res);
  2244. }
  2245. /**
  2246. * panthor_mmu_init() - Initialize the MMU logic.
  2247. * @ptdev: Device.
  2248. *
  2249. * Return: 0 on success, a negative error code otherwise.
  2250. */
  2251. int panthor_mmu_init(struct panthor_device *ptdev)
  2252. {
  2253. u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features);
  2254. struct panthor_mmu *mmu;
  2255. int ret, irq;
  2256. mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL);
  2257. if (!mmu)
  2258. return -ENOMEM;
  2259. INIT_LIST_HEAD(&mmu->as.lru_list);
  2260. ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock);
  2261. if (ret)
  2262. return ret;
  2263. INIT_LIST_HEAD(&mmu->vm.list);
  2264. ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock);
  2265. if (ret)
  2266. return ret;
  2267. ptdev->mmu = mmu;
  2268. irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu");
  2269. if (irq <= 0)
  2270. return -ENODEV;
  2271. ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq,
  2272. panthor_mmu_fault_mask(ptdev, ~0));
  2273. if (ret)
  2274. return ret;
  2275. mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0);
  2276. if (!mmu->vm.wq)
  2277. return -ENOMEM;
  2278. /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction,
  2279. * which passes iova as an unsigned long. Patch the mmu_features to reflect this
  2280. * limitation.
  2281. */
  2282. if (sizeof(unsigned long) * 8 < va_bits) {
  2283. ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0);
  2284. ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8;
  2285. }
  2286. return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq);
  2287. }
  2288. #ifdef CONFIG_DEBUG_FS
  2289. static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m)
  2290. {
  2291. int ret;
  2292. mutex_lock(&vm->op_lock);
  2293. ret = drm_debugfs_gpuva_info(m, &vm->base);
  2294. mutex_unlock(&vm->op_lock);
  2295. return ret;
  2296. }
  2297. static int show_each_vm(struct seq_file *m, void *arg)
  2298. {
  2299. struct drm_info_node *node = (struct drm_info_node *)m->private;
  2300. struct drm_device *ddev = node->minor->dev;
  2301. struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
  2302. int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data;
  2303. struct panthor_vm *vm;
  2304. int ret = 0;
  2305. mutex_lock(&ptdev->mmu->vm.lock);
  2306. list_for_each_entry(vm, &ptdev->mmu->vm.list, node) {
  2307. ret = show(vm, m);
  2308. if (ret < 0)
  2309. break;
  2310. seq_puts(m, "\n");
  2311. }
  2312. mutex_unlock(&ptdev->mmu->vm.lock);
  2313. return ret;
  2314. }
  2315. static struct drm_info_list panthor_mmu_debugfs_list[] = {
  2316. DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas),
  2317. };
  2318. /**
  2319. * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries
  2320. * @minor: Minor.
  2321. */
  2322. void panthor_mmu_debugfs_init(struct drm_minor *minor)
  2323. {
  2324. drm_debugfs_create_files(panthor_mmu_debugfs_list,
  2325. ARRAY_SIZE(panthor_mmu_debugfs_list),
  2326. minor->debugfs_root, minor);
  2327. }
  2328. #endif /* CONFIG_DEBUG_FS */
  2329. /**
  2330. * panthor_mmu_pt_cache_init() - Initialize the page table cache.
  2331. *
  2332. * Return: 0 on success, a negative error code otherwise.
  2333. */
  2334. int panthor_mmu_pt_cache_init(void)
  2335. {
  2336. pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL);
  2337. if (!pt_cache)
  2338. return -ENOMEM;
  2339. return 0;
  2340. }
  2341. /**
  2342. * panthor_mmu_pt_cache_fini() - Destroy the page table cache.
  2343. */
  2344. void panthor_mmu_pt_cache_fini(void)
  2345. {
  2346. kmem_cache_destroy(pt_cache);
  2347. }