exynos_drm_ipp.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /*
  2. * Copyright (C) 2017 Samsung Electronics Co.Ltd
  3. * Authors:
  4. * Marek Szyprowski <m.szyprowski@samsung.com>
  5. *
  6. * Exynos DRM Image Post Processing (IPP) related functions
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a
  9. * copy of this software and associated documentation files (the "Software"),
  10. * to deal in the Software without restriction, including without limitation
  11. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  12. * and/or sell copies of the Software, and to permit persons to whom the
  13. * Software is furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. */
  18. #include <drm/drmP.h>
  19. #include <drm/drm_mode.h>
  20. #include <uapi/drm/exynos_drm.h>
  21. #include "exynos_drm_drv.h"
  22. #include "exynos_drm_gem.h"
  23. #include "exynos_drm_ipp.h"
  24. static int num_ipp;
  25. static LIST_HEAD(ipp_list);
  26. /**
  27. * exynos_drm_ipp_register - Register a new picture processor hardware module
  28. * @dev: DRM device
  29. * @ipp: ipp module to init
  30. * @funcs: callbacks for the new ipp object
  31. * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
  32. * @formats: array of supported formats
  33. * @num_formats: size of the supported formats array
  34. * @name: name (for debugging purposes)
  35. *
  36. * Initializes a ipp module.
  37. *
  38. * Returns:
  39. * Zero on success, error code on failure.
  40. */
  41. int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
  42. const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
  43. const struct exynos_drm_ipp_formats *formats,
  44. unsigned int num_formats, const char *name)
  45. {
  46. WARN_ON(!ipp);
  47. WARN_ON(!funcs);
  48. WARN_ON(!formats);
  49. WARN_ON(!num_formats);
  50. spin_lock_init(&ipp->lock);
  51. INIT_LIST_HEAD(&ipp->todo_list);
  52. init_waitqueue_head(&ipp->done_wq);
  53. ipp->dev = dev;
  54. ipp->funcs = funcs;
  55. ipp->capabilities = caps;
  56. ipp->name = name;
  57. ipp->formats = formats;
  58. ipp->num_formats = num_formats;
  59. /* ipp_list modification is serialized by component framework */
  60. list_add_tail(&ipp->head, &ipp_list);
  61. ipp->id = num_ipp++;
  62. DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
  63. return 0;
  64. }
  65. /**
  66. * exynos_drm_ipp_unregister - Unregister the picture processor module
  67. * @dev: DRM device
  68. * @ipp: ipp module
  69. */
  70. void exynos_drm_ipp_unregister(struct drm_device *dev,
  71. struct exynos_drm_ipp *ipp)
  72. {
  73. WARN_ON(ipp->task);
  74. WARN_ON(!list_empty(&ipp->todo_list));
  75. list_del(&ipp->head);
  76. }
  77. /**
  78. * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
  79. * @dev: DRM device
  80. * @data: ioctl data
  81. * @file_priv: DRM file info
  82. *
  83. * Construct a list of ipp ids.
  84. *
  85. * Called by the user via ioctl.
  86. *
  87. * Returns:
  88. * Zero on success, negative errno on failure.
  89. */
  90. int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
  91. struct drm_file *file_priv)
  92. {
  93. struct drm_exynos_ioctl_ipp_get_res *resp = data;
  94. struct exynos_drm_ipp *ipp;
  95. uint32_t __user *ipp_ptr = (uint32_t __user *)
  96. (unsigned long)resp->ipp_id_ptr;
  97. unsigned int count = num_ipp, copied = 0;
  98. /*
  99. * This ioctl is called twice, once to determine how much space is
  100. * needed, and the 2nd time to fill it.
  101. */
  102. if (count && resp->count_ipps >= count) {
  103. list_for_each_entry(ipp, &ipp_list, head) {
  104. if (put_user(ipp->id, ipp_ptr + copied))
  105. return -EFAULT;
  106. copied++;
  107. }
  108. }
  109. resp->count_ipps = count;
  110. return 0;
  111. }
  112. static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
  113. {
  114. struct exynos_drm_ipp *ipp;
  115. list_for_each_entry(ipp, &ipp_list, head)
  116. if (ipp->id == id)
  117. return ipp;
  118. return NULL;
  119. }
  120. /**
  121. * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
  122. * @dev: DRM device
  123. * @data: ioctl data
  124. * @file_priv: DRM file info
  125. *
  126. * Construct a structure describing ipp module capabilities.
  127. *
  128. * Called by the user via ioctl.
  129. *
  130. * Returns:
  131. * Zero on success, negative errno on failure.
  132. */
  133. int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
  134. struct drm_file *file_priv)
  135. {
  136. struct drm_exynos_ioctl_ipp_get_caps *resp = data;
  137. void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
  138. struct exynos_drm_ipp *ipp;
  139. int i;
  140. ipp = __ipp_get(resp->ipp_id);
  141. if (!ipp)
  142. return -ENOENT;
  143. resp->ipp_id = ipp->id;
  144. resp->capabilities = ipp->capabilities;
  145. /*
  146. * This ioctl is called twice, once to determine how much space is
  147. * needed, and the 2nd time to fill it.
  148. */
  149. if (resp->formats_count >= ipp->num_formats) {
  150. for (i = 0; i < ipp->num_formats; i++) {
  151. struct drm_exynos_ipp_format tmp = {
  152. .fourcc = ipp->formats[i].fourcc,
  153. .type = ipp->formats[i].type,
  154. .modifier = ipp->formats[i].modifier,
  155. };
  156. if (copy_to_user(ptr, &tmp, sizeof(tmp)))
  157. return -EFAULT;
  158. ptr += sizeof(tmp);
  159. }
  160. }
  161. resp->formats_count = ipp->num_formats;
  162. return 0;
  163. }
  164. static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
  165. struct exynos_drm_ipp *ipp, uint32_t fourcc,
  166. uint64_t mod, unsigned int type)
  167. {
  168. int i;
  169. for (i = 0; i < ipp->num_formats; i++) {
  170. if ((ipp->formats[i].type & type) &&
  171. ipp->formats[i].fourcc == fourcc &&
  172. ipp->formats[i].modifier == mod)
  173. return &ipp->formats[i];
  174. }
  175. return NULL;
  176. }
  177. /**
  178. * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
  179. * @dev: DRM device
  180. * @data: ioctl data
  181. * @file_priv: DRM file info
  182. *
  183. * Construct a structure describing ipp module limitations for provided
  184. * picture format.
  185. *
  186. * Called by the user via ioctl.
  187. *
  188. * Returns:
  189. * Zero on success, negative errno on failure.
  190. */
  191. int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
  192. struct drm_file *file_priv)
  193. {
  194. struct drm_exynos_ioctl_ipp_get_limits *resp = data;
  195. void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
  196. const struct exynos_drm_ipp_formats *format;
  197. struct exynos_drm_ipp *ipp;
  198. if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
  199. resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
  200. return -EINVAL;
  201. ipp = __ipp_get(resp->ipp_id);
  202. if (!ipp)
  203. return -ENOENT;
  204. format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
  205. resp->type);
  206. if (!format)
  207. return -EINVAL;
  208. /*
  209. * This ioctl is called twice, once to determine how much space is
  210. * needed, and the 2nd time to fill it.
  211. */
  212. if (format->num_limits && resp->limits_count >= format->num_limits)
  213. if (copy_to_user((void __user *)ptr, format->limits,
  214. sizeof(*format->limits) * format->num_limits))
  215. return -EFAULT;
  216. resp->limits_count = format->num_limits;
  217. return 0;
  218. }
  219. struct drm_pending_exynos_ipp_event {
  220. struct drm_pending_event base;
  221. struct drm_exynos_ipp_event event;
  222. };
  223. static inline struct exynos_drm_ipp_task *
  224. exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
  225. {
  226. struct exynos_drm_ipp_task *task;
  227. task = kzalloc(sizeof(*task), GFP_KERNEL);
  228. if (!task)
  229. return NULL;
  230. task->dev = ipp->dev;
  231. task->ipp = ipp;
  232. /* some defaults */
  233. task->src.rect.w = task->dst.rect.w = UINT_MAX;
  234. task->src.rect.h = task->dst.rect.h = UINT_MAX;
  235. task->transform.rotation = DRM_MODE_ROTATE_0;
  236. DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
  237. return task;
  238. }
  239. static const struct exynos_drm_param_map {
  240. unsigned int id;
  241. unsigned int size;
  242. unsigned int offset;
  243. } exynos_drm_ipp_params_maps[] = {
  244. {
  245. DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
  246. sizeof(struct drm_exynos_ipp_task_buffer),
  247. offsetof(struct exynos_drm_ipp_task, src.buf),
  248. }, {
  249. DRM_EXYNOS_IPP_TASK_BUFFER |
  250. DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
  251. sizeof(struct drm_exynos_ipp_task_buffer),
  252. offsetof(struct exynos_drm_ipp_task, dst.buf),
  253. }, {
  254. DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
  255. sizeof(struct drm_exynos_ipp_task_rect),
  256. offsetof(struct exynos_drm_ipp_task, src.rect),
  257. }, {
  258. DRM_EXYNOS_IPP_TASK_RECTANGLE |
  259. DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
  260. sizeof(struct drm_exynos_ipp_task_rect),
  261. offsetof(struct exynos_drm_ipp_task, dst.rect),
  262. }, {
  263. DRM_EXYNOS_IPP_TASK_TRANSFORM,
  264. sizeof(struct drm_exynos_ipp_task_transform),
  265. offsetof(struct exynos_drm_ipp_task, transform),
  266. }, {
  267. DRM_EXYNOS_IPP_TASK_ALPHA,
  268. sizeof(struct drm_exynos_ipp_task_alpha),
  269. offsetof(struct exynos_drm_ipp_task, alpha),
  270. },
  271. };
  272. static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
  273. struct drm_exynos_ioctl_ipp_commit *arg)
  274. {
  275. const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
  276. void __user *params = (void __user *)(unsigned long)arg->params_ptr;
  277. unsigned int size = arg->params_size;
  278. uint32_t id;
  279. int i;
  280. while (size) {
  281. if (get_user(id, (uint32_t __user *)params))
  282. return -EFAULT;
  283. for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
  284. if (map[i].id == id)
  285. break;
  286. if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
  287. map[i].size > size)
  288. return -EINVAL;
  289. if (copy_from_user((void *)task + map[i].offset, params,
  290. map[i].size))
  291. return -EFAULT;
  292. params += map[i].size;
  293. size -= map[i].size;
  294. }
  295. DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
  296. return 0;
  297. }
  298. static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
  299. struct drm_file *filp)
  300. {
  301. int ret = 0;
  302. int i;
  303. /* get GEM buffers and check their size */
  304. for (i = 0; i < buf->format->num_planes; i++) {
  305. unsigned int height = (i == 0) ? buf->buf.height :
  306. DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
  307. unsigned long size = height * buf->buf.pitch[i];
  308. struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
  309. buf->buf.gem_id[i]);
  310. if (!gem) {
  311. ret = -ENOENT;
  312. goto gem_free;
  313. }
  314. buf->exynos_gem[i] = gem;
  315. if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
  316. i++;
  317. ret = -EINVAL;
  318. goto gem_free;
  319. }
  320. buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
  321. buf->buf.offset[i];
  322. }
  323. return 0;
  324. gem_free:
  325. while (i--) {
  326. exynos_drm_gem_put(buf->exynos_gem[i]);
  327. buf->exynos_gem[i] = NULL;
  328. }
  329. return ret;
  330. }
  331. static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
  332. {
  333. int i;
  334. if (!buf->exynos_gem[0])
  335. return;
  336. for (i = 0; i < buf->format->num_planes; i++)
  337. exynos_drm_gem_put(buf->exynos_gem[i]);
  338. }
  339. static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
  340. struct exynos_drm_ipp_task *task)
  341. {
  342. DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
  343. exynos_drm_ipp_task_release_buf(&task->src);
  344. exynos_drm_ipp_task_release_buf(&task->dst);
  345. if (task->event)
  346. drm_event_cancel_free(ipp->dev, &task->event->base);
  347. kfree(task);
  348. }
  349. struct drm_ipp_limit {
  350. struct drm_exynos_ipp_limit_val h;
  351. struct drm_exynos_ipp_limit_val v;
  352. };
  353. enum drm_ipp_size_id {
  354. IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
  355. };
  356. static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
  357. [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
  358. [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
  359. DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
  360. [IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
  361. DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
  362. DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
  363. };
  364. static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
  365. {
  366. if (!*ptr)
  367. *ptr = val;
  368. }
  369. static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
  370. unsigned int num_limits, enum drm_ipp_size_id id,
  371. struct drm_ipp_limit *res)
  372. {
  373. const struct drm_exynos_ipp_limit *l = limits;
  374. int i = 0;
  375. memset(res, 0, sizeof(*res));
  376. for (i = 0; limit_id_fallback[id][i]; i++)
  377. for (l = limits; l - limits < num_limits; l++) {
  378. if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
  379. DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
  380. ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
  381. limit_id_fallback[id][i]))
  382. continue;
  383. __limit_set_val(&res->h.min, l->h.min);
  384. __limit_set_val(&res->h.max, l->h.max);
  385. __limit_set_val(&res->h.align, l->h.align);
  386. __limit_set_val(&res->v.min, l->v.min);
  387. __limit_set_val(&res->v.max, l->v.max);
  388. __limit_set_val(&res->v.align, l->v.align);
  389. }
  390. }
  391. static inline bool __align_check(unsigned int val, unsigned int align)
  392. {
  393. if (align && (val & (align - 1))) {
  394. DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
  395. val, align);
  396. return false;
  397. }
  398. return true;
  399. }
  400. static inline bool __size_limit_check(unsigned int val,
  401. struct drm_exynos_ipp_limit_val *l)
  402. {
  403. if ((l->min && val < l->min) || (l->max && val > l->max)) {
  404. DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
  405. val, l->min, l->max);
  406. return false;
  407. }
  408. return __align_check(val, l->align);
  409. }
  410. static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
  411. const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
  412. bool rotate, bool swap)
  413. {
  414. enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
  415. struct drm_ipp_limit l;
  416. struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
  417. int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
  418. if (!limits)
  419. return 0;
  420. __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
  421. if (!__size_limit_check(real_width, &l.h) ||
  422. !__size_limit_check(buf->buf.height, &l.v))
  423. return -EINVAL;
  424. if (swap) {
  425. lv = &l.h;
  426. lh = &l.v;
  427. }
  428. __get_size_limit(limits, num_limits, id, &l);
  429. if (!__size_limit_check(buf->rect.w, lh) ||
  430. !__align_check(buf->rect.x, lh->align) ||
  431. !__size_limit_check(buf->rect.h, lv) ||
  432. !__align_check(buf->rect.y, lv->align))
  433. return -EINVAL;
  434. return 0;
  435. }
  436. static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
  437. unsigned int min, unsigned int max)
  438. {
  439. if ((max && (dst << 16) > src * max) ||
  440. (min && (dst << 16) < src * min)) {
  441. DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
  442. src, dst,
  443. min >> 16, 100000 * (min & 0xffff) / (1 << 16),
  444. max >> 16, 100000 * (max & 0xffff) / (1 << 16));
  445. return false;
  446. }
  447. return true;
  448. }
  449. static int exynos_drm_ipp_check_scale_limits(
  450. struct drm_exynos_ipp_task_rect *src,
  451. struct drm_exynos_ipp_task_rect *dst,
  452. const struct drm_exynos_ipp_limit *limits,
  453. unsigned int num_limits, bool swap)
  454. {
  455. const struct drm_exynos_ipp_limit_val *lh, *lv;
  456. int dw, dh;
  457. for (; num_limits; limits++, num_limits--)
  458. if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
  459. DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
  460. break;
  461. if (!num_limits)
  462. return 0;
  463. lh = (!swap) ? &limits->h : &limits->v;
  464. lv = (!swap) ? &limits->v : &limits->h;
  465. dw = (!swap) ? dst->w : dst->h;
  466. dh = (!swap) ? dst->h : dst->w;
  467. if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
  468. !__scale_limit_check(src->h, dh, lv->min, lv->max))
  469. return -EINVAL;
  470. return 0;
  471. }
  472. static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
  473. struct exynos_drm_ipp_buffer *buf,
  474. struct exynos_drm_ipp_buffer *src,
  475. struct exynos_drm_ipp_buffer *dst,
  476. bool rotate, bool swap)
  477. {
  478. const struct exynos_drm_ipp_formats *fmt;
  479. int ret, i;
  480. fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
  481. buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
  482. DRM_EXYNOS_IPP_FORMAT_DESTINATION);
  483. if (!fmt) {
  484. DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
  485. buf == src ? "src" : "dst");
  486. return -EINVAL;
  487. }
  488. /* basic checks */
  489. if (buf->buf.width == 0 || buf->buf.height == 0)
  490. return -EINVAL;
  491. buf->format = drm_format_info(buf->buf.fourcc);
  492. for (i = 0; i < buf->format->num_planes; i++) {
  493. unsigned int width = (i == 0) ? buf->buf.width :
  494. DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
  495. if (buf->buf.pitch[i] == 0)
  496. buf->buf.pitch[i] = width * buf->format->cpp[i];
  497. if (buf->buf.pitch[i] < width * buf->format->cpp[i])
  498. return -EINVAL;
  499. if (!buf->buf.gem_id[i])
  500. return -ENOENT;
  501. }
  502. /* pitch for additional planes must match */
  503. if (buf->format->num_planes > 2 &&
  504. buf->buf.pitch[1] != buf->buf.pitch[2])
  505. return -EINVAL;
  506. /* check driver limits */
  507. ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
  508. fmt->num_limits,
  509. rotate,
  510. buf == dst ? swap : false);
  511. if (ret)
  512. return ret;
  513. ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
  514. fmt->limits,
  515. fmt->num_limits, swap);
  516. return ret;
  517. }
  518. static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
  519. {
  520. struct exynos_drm_ipp *ipp = task->ipp;
  521. struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
  522. unsigned int rotation = task->transform.rotation;
  523. int ret = 0;
  524. bool swap = drm_rotation_90_or_270(rotation);
  525. bool rotate = (rotation != DRM_MODE_ROTATE_0);
  526. bool scale = false;
  527. DRM_DEBUG_DRIVER("Checking task %pK\n", task);
  528. if (src->rect.w == UINT_MAX)
  529. src->rect.w = src->buf.width;
  530. if (src->rect.h == UINT_MAX)
  531. src->rect.h = src->buf.height;
  532. if (dst->rect.w == UINT_MAX)
  533. dst->rect.w = dst->buf.width;
  534. if (dst->rect.h == UINT_MAX)
  535. dst->rect.h = dst->buf.height;
  536. if (src->rect.x + src->rect.w > (src->buf.width) ||
  537. src->rect.y + src->rect.h > (src->buf.height) ||
  538. dst->rect.x + dst->rect.w > (dst->buf.width) ||
  539. dst->rect.y + dst->rect.h > (dst->buf.height)) {
  540. DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
  541. task);
  542. return -EINVAL;
  543. }
  544. if ((!swap && (src->rect.w != dst->rect.w ||
  545. src->rect.h != dst->rect.h)) ||
  546. (swap && (src->rect.w != dst->rect.h ||
  547. src->rect.h != dst->rect.w)))
  548. scale = true;
  549. if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
  550. (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
  551. (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
  552. (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
  553. (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
  554. src->buf.fourcc != dst->buf.fourcc)) {
  555. DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
  556. return -EINVAL;
  557. }
  558. ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
  559. if (ret)
  560. return ret;
  561. ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
  562. if (ret)
  563. return ret;
  564. DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
  565. return ret;
  566. }
  567. static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
  568. struct drm_file *filp)
  569. {
  570. struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
  571. int ret = 0;
  572. DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
  573. ret = exynos_drm_ipp_task_setup_buffer(src, filp);
  574. if (ret) {
  575. DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
  576. return ret;
  577. }
  578. ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
  579. if (ret) {
  580. DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
  581. return ret;
  582. }
  583. DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
  584. return ret;
  585. }
  586. static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
  587. struct drm_file *file_priv, uint64_t user_data)
  588. {
  589. struct drm_pending_exynos_ipp_event *e = NULL;
  590. int ret;
  591. e = kzalloc(sizeof(*e), GFP_KERNEL);
  592. if (!e)
  593. return -ENOMEM;
  594. e->event.base.type = DRM_EXYNOS_IPP_EVENT;
  595. e->event.base.length = sizeof(e->event);
  596. e->event.user_data = user_data;
  597. ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
  598. &e->event.base);
  599. if (ret)
  600. goto free;
  601. task->event = e;
  602. return 0;
  603. free:
  604. kfree(e);
  605. return ret;
  606. }
  607. static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
  608. {
  609. struct timespec64 now;
  610. ktime_get_ts64(&now);
  611. task->event->event.tv_sec = now.tv_sec;
  612. task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
  613. task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
  614. drm_send_event(task->dev, &task->event->base);
  615. }
  616. static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
  617. {
  618. int ret = task->ret;
  619. if (ret == 0 && task->event) {
  620. exynos_drm_ipp_event_send(task);
  621. /* ensure event won't be canceled on task free */
  622. task->event = NULL;
  623. }
  624. exynos_drm_ipp_task_free(task->ipp, task);
  625. return ret;
  626. }
  627. static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
  628. {
  629. struct exynos_drm_ipp_task *task = container_of(work,
  630. struct exynos_drm_ipp_task, cleanup_work);
  631. exynos_drm_ipp_task_cleanup(task);
  632. }
  633. static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
  634. /**
  635. * exynos_drm_ipp_task_done - finish given task and set return code
  636. * @task: ipp task to finish
  637. * @ret: error code or 0 if operation has been performed successfully
  638. */
  639. void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
  640. {
  641. struct exynos_drm_ipp *ipp = task->ipp;
  642. unsigned long flags;
  643. DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
  644. spin_lock_irqsave(&ipp->lock, flags);
  645. if (ipp->task == task)
  646. ipp->task = NULL;
  647. task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
  648. task->ret = ret;
  649. spin_unlock_irqrestore(&ipp->lock, flags);
  650. exynos_drm_ipp_next_task(ipp);
  651. wake_up(&ipp->done_wq);
  652. if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
  653. INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
  654. schedule_work(&task->cleanup_work);
  655. }
  656. }
  657. static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
  658. {
  659. struct exynos_drm_ipp_task *task;
  660. unsigned long flags;
  661. int ret;
  662. DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
  663. spin_lock_irqsave(&ipp->lock, flags);
  664. if (ipp->task || list_empty(&ipp->todo_list)) {
  665. spin_unlock_irqrestore(&ipp->lock, flags);
  666. return;
  667. }
  668. task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
  669. head);
  670. list_del_init(&task->head);
  671. ipp->task = task;
  672. spin_unlock_irqrestore(&ipp->lock, flags);
  673. DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
  674. ret = ipp->funcs->commit(ipp, task);
  675. if (ret)
  676. exynos_drm_ipp_task_done(task, ret);
  677. }
  678. static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
  679. struct exynos_drm_ipp_task *task)
  680. {
  681. unsigned long flags;
  682. spin_lock_irqsave(&ipp->lock, flags);
  683. list_add(&task->head, &ipp->todo_list);
  684. spin_unlock_irqrestore(&ipp->lock, flags);
  685. exynos_drm_ipp_next_task(ipp);
  686. }
  687. static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
  688. struct exynos_drm_ipp_task *task)
  689. {
  690. unsigned long flags;
  691. spin_lock_irqsave(&ipp->lock, flags);
  692. if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
  693. /* already completed task */
  694. exynos_drm_ipp_task_cleanup(task);
  695. } else if (ipp->task != task) {
  696. /* task has not been scheduled for execution yet */
  697. list_del_init(&task->head);
  698. exynos_drm_ipp_task_cleanup(task);
  699. } else {
  700. /*
  701. * currently processed task, call abort() and perform
  702. * cleanup with async worker
  703. */
  704. task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
  705. spin_unlock_irqrestore(&ipp->lock, flags);
  706. if (ipp->funcs->abort)
  707. ipp->funcs->abort(ipp, task);
  708. return;
  709. }
  710. spin_unlock_irqrestore(&ipp->lock, flags);
  711. }
  712. /**
  713. * exynos_drm_ipp_commit_ioctl - perform image processing operation
  714. * @dev: DRM device
  715. * @data: ioctl data
  716. * @file_priv: DRM file info
  717. *
  718. * Construct a ipp task from the set of properties provided from the user
  719. * and try to schedule it to framebuffer processor hardware.
  720. *
  721. * Called by the user via ioctl.
  722. *
  723. * Returns:
  724. * Zero on success, negative errno on failure.
  725. */
  726. int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
  727. struct drm_file *file_priv)
  728. {
  729. struct drm_exynos_ioctl_ipp_commit *arg = data;
  730. struct exynos_drm_ipp *ipp;
  731. struct exynos_drm_ipp_task *task;
  732. int ret = 0;
  733. if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
  734. return -EINVAL;
  735. /* can't test and expect an event at the same time */
  736. if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
  737. (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
  738. return -EINVAL;
  739. ipp = __ipp_get(arg->ipp_id);
  740. if (!ipp)
  741. return -ENOENT;
  742. task = exynos_drm_ipp_task_alloc(ipp);
  743. if (!task)
  744. return -ENOMEM;
  745. ret = exynos_drm_ipp_task_set(task, arg);
  746. if (ret)
  747. goto free;
  748. ret = exynos_drm_ipp_task_check(task);
  749. if (ret)
  750. goto free;
  751. ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
  752. if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
  753. goto free;
  754. if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
  755. ret = exynos_drm_ipp_event_create(task, file_priv,
  756. arg->user_data);
  757. if (ret)
  758. goto free;
  759. }
  760. /*
  761. * Queue task for processing on the hardware. task object will be
  762. * then freed after exynos_drm_ipp_task_done()
  763. */
  764. if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
  765. DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
  766. ipp->id, task);
  767. task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
  768. exynos_drm_ipp_schedule_task(task->ipp, task);
  769. ret = 0;
  770. } else {
  771. DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
  772. task);
  773. exynos_drm_ipp_schedule_task(ipp, task);
  774. ret = wait_event_interruptible(ipp->done_wq,
  775. task->flags & DRM_EXYNOS_IPP_TASK_DONE);
  776. if (ret)
  777. exynos_drm_ipp_task_abort(ipp, task);
  778. else
  779. ret = exynos_drm_ipp_task_cleanup(task);
  780. }
  781. return ret;
  782. free:
  783. exynos_drm_ipp_task_free(ipp, task);
  784. return ret;
  785. }