job.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. /*
  2. * Tegra host1x Job
  3. *
  4. * Copyright (c) 2010-2015, NVIDIA Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/dma-mapping.h>
  19. #include <linux/err.h>
  20. #include <linux/host1x.h>
  21. #include <linux/kref.h>
  22. #include <linux/module.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <trace/events/host1x.h>
  27. #include "channel.h"
  28. #include "dev.h"
  29. #include "job.h"
  30. #include "syncpt.h"
  31. #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
  32. struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
  33. u32 num_cmdbufs, u32 num_relocs)
  34. {
  35. struct host1x_job *job = NULL;
  36. unsigned int num_unpins = num_cmdbufs + num_relocs;
  37. u64 total;
  38. void *mem;
  39. /* Check that we're not going to overflow */
  40. total = sizeof(struct host1x_job) +
  41. (u64)num_relocs * sizeof(struct host1x_reloc) +
  42. (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
  43. (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
  44. (u64)num_unpins * sizeof(dma_addr_t) +
  45. (u64)num_unpins * sizeof(u32 *);
  46. if (total > ULONG_MAX)
  47. return NULL;
  48. mem = job = kzalloc(total, GFP_KERNEL);
  49. if (!job)
  50. return NULL;
  51. kref_init(&job->ref);
  52. job->channel = ch;
  53. /* Redistribute memory to the structs */
  54. mem += sizeof(struct host1x_job);
  55. job->relocs = num_relocs ? mem : NULL;
  56. mem += num_relocs * sizeof(struct host1x_reloc);
  57. job->unpins = num_unpins ? mem : NULL;
  58. mem += num_unpins * sizeof(struct host1x_job_unpin_data);
  59. job->gathers = num_cmdbufs ? mem : NULL;
  60. mem += num_cmdbufs * sizeof(struct host1x_job_gather);
  61. job->addr_phys = num_unpins ? mem : NULL;
  62. job->reloc_addr_phys = job->addr_phys;
  63. job->gather_addr_phys = &job->addr_phys[num_relocs];
  64. return job;
  65. }
  66. EXPORT_SYMBOL(host1x_job_alloc);
  67. struct host1x_job *host1x_job_get(struct host1x_job *job)
  68. {
  69. kref_get(&job->ref);
  70. return job;
  71. }
  72. EXPORT_SYMBOL(host1x_job_get);
  73. static void job_free(struct kref *ref)
  74. {
  75. struct host1x_job *job = container_of(ref, struct host1x_job, ref);
  76. kfree(job);
  77. }
  78. void host1x_job_put(struct host1x_job *job)
  79. {
  80. kref_put(&job->ref, job_free);
  81. }
  82. EXPORT_SYMBOL(host1x_job_put);
  83. void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
  84. unsigned int words, unsigned int offset)
  85. {
  86. struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
  87. gather->words = words;
  88. gather->bo = bo;
  89. gather->offset = offset;
  90. job->num_gathers++;
  91. }
  92. EXPORT_SYMBOL(host1x_job_add_gather);
  93. static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
  94. {
  95. unsigned int i;
  96. int err;
  97. job->num_unpins = 0;
  98. for (i = 0; i < job->num_relocs; i++) {
  99. struct host1x_reloc *reloc = &job->relocs[i];
  100. struct sg_table *sgt;
  101. dma_addr_t phys_addr;
  102. reloc->target.bo = host1x_bo_get(reloc->target.bo);
  103. if (!reloc->target.bo) {
  104. err = -EINVAL;
  105. goto unpin;
  106. }
  107. phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
  108. job->addr_phys[job->num_unpins] = phys_addr;
  109. job->unpins[job->num_unpins].bo = reloc->target.bo;
  110. job->unpins[job->num_unpins].sgt = sgt;
  111. job->num_unpins++;
  112. }
  113. for (i = 0; i < job->num_gathers; i++) {
  114. struct host1x_job_gather *g = &job->gathers[i];
  115. size_t gather_size = 0;
  116. struct scatterlist *sg;
  117. struct sg_table *sgt;
  118. dma_addr_t phys_addr;
  119. unsigned long shift;
  120. struct iova *alloc;
  121. unsigned int j;
  122. g->bo = host1x_bo_get(g->bo);
  123. if (!g->bo) {
  124. err = -EINVAL;
  125. goto unpin;
  126. }
  127. phys_addr = host1x_bo_pin(g->bo, &sgt);
  128. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
  129. for_each_sg(sgt->sgl, sg, sgt->nents, j)
  130. gather_size += sg->length;
  131. gather_size = iova_align(&host->iova, gather_size);
  132. shift = iova_shift(&host->iova);
  133. alloc = alloc_iova(&host->iova, gather_size >> shift,
  134. host->iova_end >> shift, true);
  135. if (!alloc) {
  136. err = -ENOMEM;
  137. goto unpin;
  138. }
  139. err = iommu_map_sg(host->domain,
  140. iova_dma_addr(&host->iova, alloc),
  141. sgt->sgl, sgt->nents, IOMMU_READ);
  142. if (err == 0) {
  143. __free_iova(&host->iova, alloc);
  144. err = -EINVAL;
  145. goto unpin;
  146. }
  147. job->addr_phys[job->num_unpins] =
  148. iova_dma_addr(&host->iova, alloc);
  149. job->unpins[job->num_unpins].size = gather_size;
  150. } else {
  151. job->addr_phys[job->num_unpins] = phys_addr;
  152. }
  153. job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
  154. job->unpins[job->num_unpins].bo = g->bo;
  155. job->unpins[job->num_unpins].sgt = sgt;
  156. job->num_unpins++;
  157. }
  158. return 0;
  159. unpin:
  160. host1x_job_unpin(job);
  161. return err;
  162. }
  163. static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
  164. {
  165. u32 last_page = ~0;
  166. void *cmdbuf_page_addr = NULL;
  167. struct host1x_bo *cmdbuf = g->bo;
  168. unsigned int i;
  169. /* pin & patch the relocs for one gather */
  170. for (i = 0; i < job->num_relocs; i++) {
  171. struct host1x_reloc *reloc = &job->relocs[i];
  172. u32 reloc_addr = (job->reloc_addr_phys[i] +
  173. reloc->target.offset) >> reloc->shift;
  174. u32 *target;
  175. /* skip all other gathers */
  176. if (cmdbuf != reloc->cmdbuf.bo)
  177. continue;
  178. if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
  179. target = (u32 *)job->gather_copy_mapped +
  180. reloc->cmdbuf.offset / sizeof(u32) +
  181. g->offset / sizeof(u32);
  182. goto patch_reloc;
  183. }
  184. if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
  185. if (cmdbuf_page_addr)
  186. host1x_bo_kunmap(cmdbuf, last_page,
  187. cmdbuf_page_addr);
  188. cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
  189. reloc->cmdbuf.offset >> PAGE_SHIFT);
  190. last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
  191. if (unlikely(!cmdbuf_page_addr)) {
  192. pr_err("Could not map cmdbuf for relocation\n");
  193. return -ENOMEM;
  194. }
  195. }
  196. target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
  197. patch_reloc:
  198. *target = reloc_addr;
  199. }
  200. if (cmdbuf_page_addr)
  201. host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
  202. return 0;
  203. }
  204. static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
  205. unsigned int offset)
  206. {
  207. offset *= sizeof(u32);
  208. if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
  209. return false;
  210. /* relocation shift value validation isn't implemented yet */
  211. if (reloc->shift)
  212. return false;
  213. return true;
  214. }
  215. struct host1x_firewall {
  216. struct host1x_job *job;
  217. struct device *dev;
  218. unsigned int num_relocs;
  219. struct host1x_reloc *reloc;
  220. struct host1x_bo *cmdbuf;
  221. unsigned int offset;
  222. u32 words;
  223. u32 class;
  224. u32 reg;
  225. u32 mask;
  226. u32 count;
  227. };
  228. static int check_register(struct host1x_firewall *fw, unsigned long offset)
  229. {
  230. if (!fw->job->is_addr_reg)
  231. return 0;
  232. if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
  233. if (!fw->num_relocs)
  234. return -EINVAL;
  235. if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
  236. return -EINVAL;
  237. fw->num_relocs--;
  238. fw->reloc++;
  239. }
  240. return 0;
  241. }
  242. static int check_class(struct host1x_firewall *fw, u32 class)
  243. {
  244. if (!fw->job->is_valid_class) {
  245. if (fw->class != class)
  246. return -EINVAL;
  247. } else {
  248. if (!fw->job->is_valid_class(fw->class))
  249. return -EINVAL;
  250. }
  251. return 0;
  252. }
  253. static int check_mask(struct host1x_firewall *fw)
  254. {
  255. u32 mask = fw->mask;
  256. u32 reg = fw->reg;
  257. int ret;
  258. while (mask) {
  259. if (fw->words == 0)
  260. return -EINVAL;
  261. if (mask & 1) {
  262. ret = check_register(fw, reg);
  263. if (ret < 0)
  264. return ret;
  265. fw->words--;
  266. fw->offset++;
  267. }
  268. mask >>= 1;
  269. reg++;
  270. }
  271. return 0;
  272. }
  273. static int check_incr(struct host1x_firewall *fw)
  274. {
  275. u32 count = fw->count;
  276. u32 reg = fw->reg;
  277. int ret;
  278. while (count) {
  279. if (fw->words == 0)
  280. return -EINVAL;
  281. ret = check_register(fw, reg);
  282. if (ret < 0)
  283. return ret;
  284. reg++;
  285. fw->words--;
  286. fw->offset++;
  287. count--;
  288. }
  289. return 0;
  290. }
  291. static int check_nonincr(struct host1x_firewall *fw)
  292. {
  293. u32 count = fw->count;
  294. int ret;
  295. while (count) {
  296. if (fw->words == 0)
  297. return -EINVAL;
  298. ret = check_register(fw, fw->reg);
  299. if (ret < 0)
  300. return ret;
  301. fw->words--;
  302. fw->offset++;
  303. count--;
  304. }
  305. return 0;
  306. }
  307. static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
  308. {
  309. u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
  310. (g->offset / sizeof(u32));
  311. u32 job_class = fw->class;
  312. int err = 0;
  313. fw->words = g->words;
  314. fw->cmdbuf = g->bo;
  315. fw->offset = 0;
  316. while (fw->words && !err) {
  317. u32 word = cmdbuf_base[fw->offset];
  318. u32 opcode = (word & 0xf0000000) >> 28;
  319. fw->mask = 0;
  320. fw->reg = 0;
  321. fw->count = 0;
  322. fw->words--;
  323. fw->offset++;
  324. switch (opcode) {
  325. case 0:
  326. fw->class = word >> 6 & 0x3ff;
  327. fw->mask = word & 0x3f;
  328. fw->reg = word >> 16 & 0xfff;
  329. err = check_class(fw, job_class);
  330. if (!err)
  331. err = check_mask(fw);
  332. if (err)
  333. goto out;
  334. break;
  335. case 1:
  336. fw->reg = word >> 16 & 0xfff;
  337. fw->count = word & 0xffff;
  338. err = check_incr(fw);
  339. if (err)
  340. goto out;
  341. break;
  342. case 2:
  343. fw->reg = word >> 16 & 0xfff;
  344. fw->count = word & 0xffff;
  345. err = check_nonincr(fw);
  346. if (err)
  347. goto out;
  348. break;
  349. case 3:
  350. fw->mask = word & 0xffff;
  351. fw->reg = word >> 16 & 0xfff;
  352. err = check_mask(fw);
  353. if (err)
  354. goto out;
  355. break;
  356. case 4:
  357. case 14:
  358. break;
  359. default:
  360. err = -EINVAL;
  361. break;
  362. }
  363. }
  364. out:
  365. return err;
  366. }
  367. static inline int copy_gathers(struct device *host, struct host1x_job *job,
  368. struct device *dev)
  369. {
  370. struct host1x_firewall fw;
  371. size_t size = 0;
  372. size_t offset = 0;
  373. unsigned int i;
  374. fw.job = job;
  375. fw.dev = dev;
  376. fw.reloc = job->relocs;
  377. fw.num_relocs = job->num_relocs;
  378. fw.class = job->class;
  379. for (i = 0; i < job->num_gathers; i++) {
  380. struct host1x_job_gather *g = &job->gathers[i];
  381. size += g->words * sizeof(u32);
  382. }
  383. /*
  384. * Try a non-blocking allocation from a higher priority pools first,
  385. * as awaiting for the allocation here is a major performance hit.
  386. */
  387. job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
  388. GFP_NOWAIT);
  389. /* the higher priority allocation failed, try the generic-blocking */
  390. if (!job->gather_copy_mapped)
  391. job->gather_copy_mapped = dma_alloc_wc(host, size,
  392. &job->gather_copy,
  393. GFP_KERNEL);
  394. if (!job->gather_copy_mapped)
  395. return -ENOMEM;
  396. job->gather_copy_size = size;
  397. for (i = 0; i < job->num_gathers; i++) {
  398. struct host1x_job_gather *g = &job->gathers[i];
  399. void *gather;
  400. /* Copy the gather */
  401. gather = host1x_bo_mmap(g->bo);
  402. memcpy(job->gather_copy_mapped + offset, gather + g->offset,
  403. g->words * sizeof(u32));
  404. host1x_bo_munmap(g->bo, gather);
  405. /* Store the location in the buffer */
  406. g->base = job->gather_copy;
  407. g->offset = offset;
  408. /* Validate the job */
  409. if (validate(&fw, g))
  410. return -EINVAL;
  411. offset += g->words * sizeof(u32);
  412. }
  413. /* No relocs should remain at this point */
  414. if (fw.num_relocs)
  415. return -EINVAL;
  416. return 0;
  417. }
  418. int host1x_job_pin(struct host1x_job *job, struct device *dev)
  419. {
  420. int err;
  421. unsigned int i, j;
  422. struct host1x *host = dev_get_drvdata(dev->parent);
  423. /* pin memory */
  424. err = pin_job(host, job);
  425. if (err)
  426. goto out;
  427. if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
  428. err = copy_gathers(host->dev, job, dev);
  429. if (err)
  430. goto out;
  431. }
  432. /* patch gathers */
  433. for (i = 0; i < job->num_gathers; i++) {
  434. struct host1x_job_gather *g = &job->gathers[i];
  435. /* process each gather mem only once */
  436. if (g->handled)
  437. continue;
  438. /* copy_gathers() sets gathers base if firewall is enabled */
  439. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
  440. g->base = job->gather_addr_phys[i];
  441. for (j = i + 1; j < job->num_gathers; j++) {
  442. if (job->gathers[j].bo == g->bo) {
  443. job->gathers[j].handled = true;
  444. job->gathers[j].base = g->base;
  445. }
  446. }
  447. err = do_relocs(job, g);
  448. if (err)
  449. break;
  450. }
  451. out:
  452. if (err)
  453. host1x_job_unpin(job);
  454. wmb();
  455. return err;
  456. }
  457. EXPORT_SYMBOL(host1x_job_pin);
  458. void host1x_job_unpin(struct host1x_job *job)
  459. {
  460. struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
  461. unsigned int i;
  462. for (i = 0; i < job->num_unpins; i++) {
  463. struct host1x_job_unpin_data *unpin = &job->unpins[i];
  464. if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
  465. unpin->size && host->domain) {
  466. iommu_unmap(host->domain, job->addr_phys[i],
  467. unpin->size);
  468. free_iova(&host->iova,
  469. iova_pfn(&host->iova, job->addr_phys[i]));
  470. }
  471. host1x_bo_unpin(unpin->bo, unpin->sgt);
  472. host1x_bo_put(unpin->bo);
  473. }
  474. job->num_unpins = 0;
  475. if (job->gather_copy_size)
  476. dma_free_wc(host->dev, job->gather_copy_size,
  477. job->gather_copy_mapped, job->gather_copy);
  478. }
  479. EXPORT_SYMBOL(host1x_job_unpin);
  480. /*
  481. * Debug routine used to dump job entries
  482. */
  483. void host1x_job_dump(struct device *dev, struct host1x_job *job)
  484. {
  485. dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
  486. dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
  487. dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
  488. dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
  489. dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
  490. dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);
  491. }