mmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /**************************************************************************
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. **************************************************************************/
  6. #include <linux/highmem.h>
  7. #include <linux/vmalloc.h>
  8. #include "mmu.h"
  9. #include "psb_drv.h"
  10. #include "psb_reg.h"
  11. /*
  12. * Code for the SGX MMU:
  13. */
  14. /*
  15. * clflush on one processor only:
  16. * clflush should apparently flush the cache line on all processors in an
  17. * SMP system.
  18. */
  19. /*
  20. * kmap atomic:
  21. * The usage of the slots must be completely encapsulated within a spinlock, and
  22. * no other functions that may be using the locks for other purposed may be
  23. * called from within the locked region.
  24. * Since the slots are per processor, this will guarantee that we are the only
  25. * user.
  26. */
  27. /*
  28. * TODO: Inserting ptes from an interrupt handler:
  29. * This may be desirable for some SGX functionality where the GPU can fault in
  30. * needed pages. For that, we need to make an atomic insert_pages function, that
  31. * may fail.
  32. * If it fails, the caller need to insert the page using a workqueue function,
  33. * but on average it should be fast.
  34. */
  35. static inline uint32_t psb_mmu_pt_index(uint32_t offset)
  36. {
  37. return (offset >> PSB_PTE_SHIFT) & 0x3FF;
  38. }
  39. static inline uint32_t psb_mmu_pd_index(uint32_t offset)
  40. {
  41. return offset >> PSB_PDE_SHIFT;
  42. }
  43. static inline void psb_clflush(void *addr)
  44. {
  45. __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
  46. }
  47. static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
  48. {
  49. if (!driver->has_clflush)
  50. return;
  51. mb();
  52. psb_clflush(addr);
  53. mb();
  54. }
  55. static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
  56. {
  57. struct drm_device *dev = driver->dev;
  58. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  59. if (atomic_read(&driver->needs_tlbflush) || force) {
  60. uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
  61. PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
  62. /* Make sure data cache is turned off before enabling it */
  63. wmb();
  64. PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
  65. (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
  66. if (driver->msvdx_mmu_invaldc)
  67. atomic_set(driver->msvdx_mmu_invaldc, 1);
  68. }
  69. atomic_set(&driver->needs_tlbflush, 0);
  70. }
  71. #if 0
  72. static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
  73. {
  74. down_write(&driver->sem);
  75. psb_mmu_flush_pd_locked(driver, force);
  76. up_write(&driver->sem);
  77. }
  78. #endif
  79. void psb_mmu_flush(struct psb_mmu_driver *driver)
  80. {
  81. struct drm_device *dev = driver->dev;
  82. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  83. uint32_t val;
  84. down_write(&driver->sem);
  85. val = PSB_RSGX32(PSB_CR_BIF_CTRL);
  86. if (atomic_read(&driver->needs_tlbflush))
  87. PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
  88. else
  89. PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
  90. /* Make sure data cache is turned off and MMU is flushed before
  91. restoring bank interface control register */
  92. wmb();
  93. PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
  94. PSB_CR_BIF_CTRL);
  95. (void)PSB_RSGX32(PSB_CR_BIF_CTRL);
  96. atomic_set(&driver->needs_tlbflush, 0);
  97. if (driver->msvdx_mmu_invaldc)
  98. atomic_set(driver->msvdx_mmu_invaldc, 1);
  99. up_write(&driver->sem);
  100. }
  101. void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
  102. {
  103. struct drm_device *dev = pd->driver->dev;
  104. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  105. uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
  106. PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
  107. down_write(&pd->driver->sem);
  108. PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
  109. wmb();
  110. psb_mmu_flush_pd_locked(pd->driver, 1);
  111. pd->hw_context = hw_context;
  112. up_write(&pd->driver->sem);
  113. }
  114. static inline unsigned long psb_pd_addr_end(unsigned long addr,
  115. unsigned long end)
  116. {
  117. addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
  118. return (addr < end) ? addr : end;
  119. }
  120. static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
  121. {
  122. uint32_t mask = PSB_PTE_VALID;
  123. if (type & PSB_MMU_CACHED_MEMORY)
  124. mask |= PSB_PTE_CACHED;
  125. if (type & PSB_MMU_RO_MEMORY)
  126. mask |= PSB_PTE_RO;
  127. if (type & PSB_MMU_WO_MEMORY)
  128. mask |= PSB_PTE_WO;
  129. return (pfn << PAGE_SHIFT) | mask;
  130. }
  131. struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
  132. int trap_pagefaults, int invalid_type)
  133. {
  134. struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
  135. uint32_t *v;
  136. int i;
  137. if (!pd)
  138. return NULL;
  139. pd->p = alloc_page(GFP_DMA32);
  140. if (!pd->p)
  141. goto out_err1;
  142. pd->dummy_pt = alloc_page(GFP_DMA32);
  143. if (!pd->dummy_pt)
  144. goto out_err2;
  145. pd->dummy_page = alloc_page(GFP_DMA32);
  146. if (!pd->dummy_page)
  147. goto out_err3;
  148. if (!trap_pagefaults) {
  149. pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
  150. invalid_type);
  151. pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
  152. invalid_type);
  153. } else {
  154. pd->invalid_pde = 0;
  155. pd->invalid_pte = 0;
  156. }
  157. v = kmap_local_page(pd->dummy_pt);
  158. for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
  159. v[i] = pd->invalid_pte;
  160. kunmap_local(v);
  161. v = kmap_local_page(pd->p);
  162. for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
  163. v[i] = pd->invalid_pde;
  164. kunmap_local(v);
  165. clear_page(kmap(pd->dummy_page));
  166. kunmap(pd->dummy_page);
  167. pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
  168. if (!pd->tables)
  169. goto out_err4;
  170. pd->hw_context = -1;
  171. pd->pd_mask = PSB_PTE_VALID;
  172. pd->driver = driver;
  173. return pd;
  174. out_err4:
  175. __free_page(pd->dummy_page);
  176. out_err3:
  177. __free_page(pd->dummy_pt);
  178. out_err2:
  179. __free_page(pd->p);
  180. out_err1:
  181. kfree(pd);
  182. return NULL;
  183. }
  184. static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
  185. {
  186. __free_page(pt->p);
  187. kfree(pt);
  188. }
  189. void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
  190. {
  191. struct psb_mmu_driver *driver = pd->driver;
  192. struct drm_device *dev = driver->dev;
  193. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  194. struct psb_mmu_pt *pt;
  195. int i;
  196. down_write(&driver->sem);
  197. if (pd->hw_context != -1) {
  198. PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
  199. psb_mmu_flush_pd_locked(driver, 1);
  200. }
  201. /* Should take the spinlock here, but we don't need to do that
  202. since we have the semaphore in write mode. */
  203. for (i = 0; i < 1024; ++i) {
  204. pt = pd->tables[i];
  205. if (pt)
  206. psb_mmu_free_pt(pt);
  207. }
  208. vfree(pd->tables);
  209. __free_page(pd->dummy_page);
  210. __free_page(pd->dummy_pt);
  211. __free_page(pd->p);
  212. kfree(pd);
  213. up_write(&driver->sem);
  214. }
  215. static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
  216. {
  217. struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
  218. void *v;
  219. uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
  220. uint32_t clflush_count = PAGE_SIZE / clflush_add;
  221. spinlock_t *lock = &pd->driver->lock;
  222. uint8_t *clf;
  223. uint32_t *ptes;
  224. int i;
  225. if (!pt)
  226. return NULL;
  227. pt->p = alloc_page(GFP_DMA32);
  228. if (!pt->p) {
  229. kfree(pt);
  230. return NULL;
  231. }
  232. spin_lock(lock);
  233. v = kmap_atomic(pt->p);
  234. clf = (uint8_t *) v;
  235. ptes = (uint32_t *) v;
  236. for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
  237. *ptes++ = pd->invalid_pte;
  238. if (pd->driver->has_clflush && pd->hw_context != -1) {
  239. mb();
  240. for (i = 0; i < clflush_count; ++i) {
  241. psb_clflush(clf);
  242. clf += clflush_add;
  243. }
  244. mb();
  245. }
  246. kunmap_atomic(v);
  247. spin_unlock(lock);
  248. pt->count = 0;
  249. pt->pd = pd;
  250. pt->index = 0;
  251. return pt;
  252. }
  253. static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
  254. unsigned long addr)
  255. {
  256. uint32_t index = psb_mmu_pd_index(addr);
  257. struct psb_mmu_pt *pt;
  258. uint32_t *v;
  259. spinlock_t *lock = &pd->driver->lock;
  260. spin_lock(lock);
  261. pt = pd->tables[index];
  262. while (!pt) {
  263. spin_unlock(lock);
  264. pt = psb_mmu_alloc_pt(pd);
  265. if (!pt)
  266. return NULL;
  267. spin_lock(lock);
  268. if (pd->tables[index]) {
  269. spin_unlock(lock);
  270. psb_mmu_free_pt(pt);
  271. spin_lock(lock);
  272. pt = pd->tables[index];
  273. continue;
  274. }
  275. v = kmap_atomic(pd->p);
  276. pd->tables[index] = pt;
  277. v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
  278. pt->index = index;
  279. kunmap_atomic((void *) v);
  280. if (pd->hw_context != -1) {
  281. psb_mmu_clflush(pd->driver, (void *)&v[index]);
  282. atomic_set(&pd->driver->needs_tlbflush, 1);
  283. }
  284. }
  285. pt->v = kmap_atomic(pt->p);
  286. return pt;
  287. }
  288. static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
  289. unsigned long addr)
  290. {
  291. uint32_t index = psb_mmu_pd_index(addr);
  292. struct psb_mmu_pt *pt;
  293. spinlock_t *lock = &pd->driver->lock;
  294. spin_lock(lock);
  295. pt = pd->tables[index];
  296. if (!pt) {
  297. spin_unlock(lock);
  298. return NULL;
  299. }
  300. pt->v = kmap_atomic(pt->p);
  301. return pt;
  302. }
  303. static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
  304. {
  305. struct psb_mmu_pd *pd = pt->pd;
  306. uint32_t *v;
  307. kunmap_atomic(pt->v);
  308. if (pt->count == 0) {
  309. v = kmap_atomic(pd->p);
  310. v[pt->index] = pd->invalid_pde;
  311. pd->tables[pt->index] = NULL;
  312. if (pd->hw_context != -1) {
  313. psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
  314. atomic_set(&pd->driver->needs_tlbflush, 1);
  315. }
  316. kunmap_atomic(v);
  317. spin_unlock(&pd->driver->lock);
  318. psb_mmu_free_pt(pt);
  319. return;
  320. }
  321. spin_unlock(&pd->driver->lock);
  322. }
  323. static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
  324. uint32_t pte)
  325. {
  326. pt->v[psb_mmu_pt_index(addr)] = pte;
  327. }
  328. static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
  329. unsigned long addr)
  330. {
  331. pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
  332. }
  333. struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
  334. {
  335. struct psb_mmu_pd *pd;
  336. down_read(&driver->sem);
  337. pd = driver->default_pd;
  338. up_read(&driver->sem);
  339. return pd;
  340. }
  341. void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
  342. {
  343. struct drm_device *dev = driver->dev;
  344. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  345. PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
  346. psb_mmu_free_pagedir(driver->default_pd);
  347. kfree(driver);
  348. }
  349. struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
  350. int trap_pagefaults,
  351. int invalid_type,
  352. atomic_t *msvdx_mmu_invaldc)
  353. {
  354. struct psb_mmu_driver *driver;
  355. struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
  356. driver = kmalloc(sizeof(*driver), GFP_KERNEL);
  357. if (!driver)
  358. return NULL;
  359. driver->dev = dev;
  360. driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
  361. invalid_type);
  362. if (!driver->default_pd)
  363. goto out_err1;
  364. spin_lock_init(&driver->lock);
  365. init_rwsem(&driver->sem);
  366. down_write(&driver->sem);
  367. atomic_set(&driver->needs_tlbflush, 1);
  368. driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
  369. driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
  370. PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
  371. PSB_CR_BIF_CTRL);
  372. PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
  373. PSB_CR_BIF_CTRL);
  374. driver->has_clflush = 0;
  375. if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
  376. uint32_t tfms, misc, cap0, cap4, clflush_size;
  377. /*
  378. * clflush size is determined at kernel setup for x86_64 but not
  379. * for i386. We have to do it here.
  380. */
  381. cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
  382. clflush_size = ((misc >> 8) & 0xff) * 8;
  383. driver->has_clflush = 1;
  384. driver->clflush_add =
  385. PAGE_SIZE * clflush_size / sizeof(uint32_t);
  386. driver->clflush_mask = driver->clflush_add - 1;
  387. driver->clflush_mask = ~driver->clflush_mask;
  388. }
  389. up_write(&driver->sem);
  390. return driver;
  391. out_err1:
  392. kfree(driver);
  393. return NULL;
  394. }
  395. static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
  396. uint32_t num_pages, uint32_t desired_tile_stride,
  397. uint32_t hw_tile_stride)
  398. {
  399. struct psb_mmu_pt *pt;
  400. uint32_t rows = 1;
  401. uint32_t i;
  402. unsigned long addr;
  403. unsigned long end;
  404. unsigned long next;
  405. unsigned long add;
  406. unsigned long row_add;
  407. unsigned long clflush_add = pd->driver->clflush_add;
  408. unsigned long clflush_mask = pd->driver->clflush_mask;
  409. if (!pd->driver->has_clflush)
  410. return;
  411. if (hw_tile_stride)
  412. rows = num_pages / desired_tile_stride;
  413. else
  414. desired_tile_stride = num_pages;
  415. add = desired_tile_stride << PAGE_SHIFT;
  416. row_add = hw_tile_stride << PAGE_SHIFT;
  417. mb();
  418. for (i = 0; i < rows; ++i) {
  419. addr = address;
  420. end = addr + add;
  421. do {
  422. next = psb_pd_addr_end(addr, end);
  423. pt = psb_mmu_pt_map_lock(pd, addr);
  424. if (!pt)
  425. continue;
  426. do {
  427. psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
  428. } while (addr += clflush_add,
  429. (addr & clflush_mask) < next);
  430. psb_mmu_pt_unmap_unlock(pt);
  431. } while (addr = next, next != end);
  432. address += row_add;
  433. }
  434. mb();
  435. }
  436. void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
  437. unsigned long address, uint32_t num_pages)
  438. {
  439. struct psb_mmu_pt *pt;
  440. unsigned long addr;
  441. unsigned long end;
  442. unsigned long next;
  443. unsigned long f_address = address;
  444. down_read(&pd->driver->sem);
  445. addr = address;
  446. end = addr + (num_pages << PAGE_SHIFT);
  447. do {
  448. next = psb_pd_addr_end(addr, end);
  449. pt = psb_mmu_pt_alloc_map_lock(pd, addr);
  450. if (!pt)
  451. goto out;
  452. do {
  453. psb_mmu_invalidate_pte(pt, addr);
  454. --pt->count;
  455. } while (addr += PAGE_SIZE, addr < next);
  456. psb_mmu_pt_unmap_unlock(pt);
  457. } while (addr = next, next != end);
  458. out:
  459. if (pd->hw_context != -1)
  460. psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
  461. up_read(&pd->driver->sem);
  462. if (pd->hw_context != -1)
  463. psb_mmu_flush(pd->driver);
  464. return;
  465. }
  466. void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
  467. uint32_t num_pages, uint32_t desired_tile_stride,
  468. uint32_t hw_tile_stride)
  469. {
  470. struct psb_mmu_pt *pt;
  471. uint32_t rows = 1;
  472. uint32_t i;
  473. unsigned long addr;
  474. unsigned long end;
  475. unsigned long next;
  476. unsigned long add;
  477. unsigned long row_add;
  478. unsigned long f_address = address;
  479. if (hw_tile_stride)
  480. rows = num_pages / desired_tile_stride;
  481. else
  482. desired_tile_stride = num_pages;
  483. add = desired_tile_stride << PAGE_SHIFT;
  484. row_add = hw_tile_stride << PAGE_SHIFT;
  485. down_read(&pd->driver->sem);
  486. /* Make sure we only need to flush this processor's cache */
  487. for (i = 0; i < rows; ++i) {
  488. addr = address;
  489. end = addr + add;
  490. do {
  491. next = psb_pd_addr_end(addr, end);
  492. pt = psb_mmu_pt_map_lock(pd, addr);
  493. if (!pt)
  494. continue;
  495. do {
  496. psb_mmu_invalidate_pte(pt, addr);
  497. --pt->count;
  498. } while (addr += PAGE_SIZE, addr < next);
  499. psb_mmu_pt_unmap_unlock(pt);
  500. } while (addr = next, next != end);
  501. address += row_add;
  502. }
  503. if (pd->hw_context != -1)
  504. psb_mmu_flush_ptes(pd, f_address, num_pages,
  505. desired_tile_stride, hw_tile_stride);
  506. up_read(&pd->driver->sem);
  507. if (pd->hw_context != -1)
  508. psb_mmu_flush(pd->driver);
  509. }
  510. int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
  511. unsigned long address, uint32_t num_pages,
  512. int type)
  513. {
  514. struct psb_mmu_pt *pt;
  515. uint32_t pte;
  516. unsigned long addr;
  517. unsigned long end;
  518. unsigned long next;
  519. unsigned long f_address = address;
  520. int ret = -ENOMEM;
  521. down_read(&pd->driver->sem);
  522. addr = address;
  523. end = addr + (num_pages << PAGE_SHIFT);
  524. do {
  525. next = psb_pd_addr_end(addr, end);
  526. pt = psb_mmu_pt_alloc_map_lock(pd, addr);
  527. if (!pt) {
  528. ret = -ENOMEM;
  529. goto out;
  530. }
  531. do {
  532. pte = psb_mmu_mask_pte(start_pfn++, type);
  533. psb_mmu_set_pte(pt, addr, pte);
  534. pt->count++;
  535. } while (addr += PAGE_SIZE, addr < next);
  536. psb_mmu_pt_unmap_unlock(pt);
  537. } while (addr = next, next != end);
  538. ret = 0;
  539. out:
  540. if (pd->hw_context != -1)
  541. psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
  542. up_read(&pd->driver->sem);
  543. if (pd->hw_context != -1)
  544. psb_mmu_flush(pd->driver);
  545. return ret;
  546. }
  547. int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
  548. unsigned long address, uint32_t num_pages,
  549. uint32_t desired_tile_stride, uint32_t hw_tile_stride,
  550. int type)
  551. {
  552. struct psb_mmu_pt *pt;
  553. uint32_t rows = 1;
  554. uint32_t i;
  555. uint32_t pte;
  556. unsigned long addr;
  557. unsigned long end;
  558. unsigned long next;
  559. unsigned long add;
  560. unsigned long row_add;
  561. unsigned long f_address = address;
  562. int ret = -ENOMEM;
  563. if (hw_tile_stride) {
  564. if (num_pages % desired_tile_stride != 0)
  565. return -EINVAL;
  566. rows = num_pages / desired_tile_stride;
  567. } else {
  568. desired_tile_stride = num_pages;
  569. }
  570. add = desired_tile_stride << PAGE_SHIFT;
  571. row_add = hw_tile_stride << PAGE_SHIFT;
  572. down_read(&pd->driver->sem);
  573. for (i = 0; i < rows; ++i) {
  574. addr = address;
  575. end = addr + add;
  576. do {
  577. next = psb_pd_addr_end(addr, end);
  578. pt = psb_mmu_pt_alloc_map_lock(pd, addr);
  579. if (!pt)
  580. goto out;
  581. do {
  582. pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
  583. type);
  584. psb_mmu_set_pte(pt, addr, pte);
  585. pt->count++;
  586. } while (addr += PAGE_SIZE, addr < next);
  587. psb_mmu_pt_unmap_unlock(pt);
  588. } while (addr = next, next != end);
  589. address += row_add;
  590. }
  591. ret = 0;
  592. out:
  593. if (pd->hw_context != -1)
  594. psb_mmu_flush_ptes(pd, f_address, num_pages,
  595. desired_tile_stride, hw_tile_stride);
  596. up_read(&pd->driver->sem);
  597. if (pd->hw_context != -1)
  598. psb_mmu_flush(pd->driver);
  599. return ret;
  600. }
  601. int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
  602. unsigned long *pfn)
  603. {
  604. int ret;
  605. struct psb_mmu_pt *pt;
  606. uint32_t tmp;
  607. spinlock_t *lock = &pd->driver->lock;
  608. down_read(&pd->driver->sem);
  609. pt = psb_mmu_pt_map_lock(pd, virtual);
  610. if (!pt) {
  611. uint32_t *v;
  612. spin_lock(lock);
  613. v = kmap_atomic(pd->p);
  614. tmp = v[psb_mmu_pd_index(virtual)];
  615. kunmap_atomic(v);
  616. spin_unlock(lock);
  617. if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
  618. !(pd->invalid_pte & PSB_PTE_VALID)) {
  619. ret = -EINVAL;
  620. goto out;
  621. }
  622. ret = 0;
  623. *pfn = pd->invalid_pte >> PAGE_SHIFT;
  624. goto out;
  625. }
  626. tmp = pt->v[psb_mmu_pt_index(virtual)];
  627. if (!(tmp & PSB_PTE_VALID)) {
  628. ret = -EINVAL;
  629. } else {
  630. ret = 0;
  631. *pfn = tmp >> PAGE_SHIFT;
  632. }
  633. psb_mmu_pt_unmap_unlock(pt);
  634. out:
  635. up_read(&pd->driver->sem);
  636. return ret;
  637. }