drm_atomic.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors:
  25. * Rob Clark <robdclark@gmail.com>
  26. * Daniel Vetter <daniel.vetter@ffwll.ch>
  27. */
  28. #include <linux/sync_file.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_atomic_uapi.h>
  31. #include <drm/drm_blend.h>
  32. #include <drm/drm_bridge.h>
  33. #include <drm/drm_debugfs.h>
  34. #include <drm/drm_device.h>
  35. #include <drm/drm_drv.h>
  36. #include <drm/drm_file.h>
  37. #include <drm/drm_fourcc.h>
  38. #include <drm/drm_framebuffer.h>
  39. #include <drm/drm_mode.h>
  40. #include <drm/drm_print.h>
  41. #include <drm/drm_writeback.h>
  42. #include "drm_crtc_internal.h"
  43. #include "drm_internal.h"
  44. void __drm_crtc_commit_free(struct kref *kref)
  45. {
  46. struct drm_crtc_commit *commit =
  47. container_of(kref, struct drm_crtc_commit, ref);
  48. kfree(commit);
  49. }
  50. EXPORT_SYMBOL(__drm_crtc_commit_free);
  51. /**
  52. * drm_crtc_commit_wait - Waits for a commit to complete
  53. * @commit: &drm_crtc_commit to wait for
  54. *
  55. * Waits for a given &drm_crtc_commit to be programmed into the
  56. * hardware and flipped to.
  57. *
  58. * Returns:
  59. * 0 on success, a negative error code otherwise.
  60. */
  61. int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
  62. {
  63. unsigned long timeout = 10 * HZ;
  64. int ret;
  65. if (!commit)
  66. return 0;
  67. ret = wait_for_completion_timeout(&commit->hw_done, timeout);
  68. if (!ret) {
  69. drm_err(commit->crtc->dev, "hw_done timed out\n");
  70. return -ETIMEDOUT;
  71. }
  72. /*
  73. * Currently no support for overwriting flips, hence
  74. * stall for previous one to execute completely.
  75. */
  76. ret = wait_for_completion_timeout(&commit->flip_done, timeout);
  77. if (!ret) {
  78. drm_err(commit->crtc->dev, "flip_done timed out\n");
  79. return -ETIMEDOUT;
  80. }
  81. return 0;
  82. }
  83. EXPORT_SYMBOL(drm_crtc_commit_wait);
  84. /**
  85. * drm_atomic_state_default_release -
  86. * release memory initialized by drm_atomic_state_init
  87. * @state: atomic state
  88. *
  89. * Free all the memory allocated by drm_atomic_state_init.
  90. * This should only be used by drivers which are still subclassing
  91. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  92. */
  93. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  94. {
  95. kfree(state->connectors);
  96. kfree(state->crtcs);
  97. kfree(state->planes);
  98. kfree(state->private_objs);
  99. }
  100. EXPORT_SYMBOL(drm_atomic_state_default_release);
  101. /**
  102. * drm_atomic_state_init - init new atomic state
  103. * @dev: DRM device
  104. * @state: atomic state
  105. *
  106. * Default implementation for filling in a new atomic state.
  107. * This should only be used by drivers which are still subclassing
  108. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  109. */
  110. int
  111. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  112. {
  113. kref_init(&state->ref);
  114. /* TODO legacy paths should maybe do a better job about
  115. * setting this appropriately?
  116. */
  117. state->allow_modeset = true;
  118. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  119. sizeof(*state->crtcs), GFP_KERNEL);
  120. if (!state->crtcs)
  121. goto fail;
  122. state->planes = kcalloc(dev->mode_config.num_total_plane,
  123. sizeof(*state->planes), GFP_KERNEL);
  124. if (!state->planes)
  125. goto fail;
  126. /*
  127. * Because drm_atomic_state can be committed asynchronously we need our
  128. * own reference and cannot rely on the on implied by drm_file in the
  129. * ioctl call.
  130. */
  131. drm_dev_get(dev);
  132. state->dev = dev;
  133. drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
  134. return 0;
  135. fail:
  136. drm_atomic_state_default_release(state);
  137. return -ENOMEM;
  138. }
  139. EXPORT_SYMBOL(drm_atomic_state_init);
  140. /**
  141. * drm_atomic_state_alloc - allocate atomic state
  142. * @dev: DRM device
  143. *
  144. * This allocates an empty atomic state to track updates.
  145. */
  146. struct drm_atomic_state *
  147. drm_atomic_state_alloc(struct drm_device *dev)
  148. {
  149. struct drm_mode_config *config = &dev->mode_config;
  150. if (!config->funcs->atomic_state_alloc) {
  151. struct drm_atomic_state *state;
  152. state = kzalloc(sizeof(*state), GFP_KERNEL);
  153. if (!state)
  154. return NULL;
  155. if (drm_atomic_state_init(dev, state) < 0) {
  156. kfree(state);
  157. return NULL;
  158. }
  159. return state;
  160. }
  161. return config->funcs->atomic_state_alloc(dev);
  162. }
  163. EXPORT_SYMBOL(drm_atomic_state_alloc);
  164. /**
  165. * drm_atomic_state_default_clear - clear base atomic state
  166. * @state: atomic state
  167. *
  168. * Default implementation for clearing atomic state.
  169. * This should only be used by drivers which are still subclassing
  170. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  171. */
  172. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  173. {
  174. struct drm_device *dev = state->dev;
  175. struct drm_mode_config *config = &dev->mode_config;
  176. int i;
  177. drm_dbg_atomic(dev, "Clearing atomic state %p\n", state);
  178. for (i = 0; i < state->num_connector; i++) {
  179. struct drm_connector *connector = state->connectors[i].ptr;
  180. if (!connector)
  181. continue;
  182. connector->funcs->atomic_destroy_state(connector,
  183. state->connectors[i].state);
  184. state->connectors[i].ptr = NULL;
  185. state->connectors[i].state = NULL;
  186. state->connectors[i].old_state = NULL;
  187. state->connectors[i].new_state = NULL;
  188. drm_connector_put(connector);
  189. }
  190. for (i = 0; i < config->num_crtc; i++) {
  191. struct drm_crtc *crtc = state->crtcs[i].ptr;
  192. if (!crtc)
  193. continue;
  194. crtc->funcs->atomic_destroy_state(crtc,
  195. state->crtcs[i].state);
  196. state->crtcs[i].ptr = NULL;
  197. state->crtcs[i].state = NULL;
  198. state->crtcs[i].old_state = NULL;
  199. state->crtcs[i].new_state = NULL;
  200. if (state->crtcs[i].commit) {
  201. drm_crtc_commit_put(state->crtcs[i].commit);
  202. state->crtcs[i].commit = NULL;
  203. }
  204. }
  205. for (i = 0; i < config->num_total_plane; i++) {
  206. struct drm_plane *plane = state->planes[i].ptr;
  207. if (!plane)
  208. continue;
  209. plane->funcs->atomic_destroy_state(plane,
  210. state->planes[i].state);
  211. state->planes[i].ptr = NULL;
  212. state->planes[i].state = NULL;
  213. state->planes[i].old_state = NULL;
  214. state->planes[i].new_state = NULL;
  215. }
  216. for (i = 0; i < state->num_private_objs; i++) {
  217. struct drm_private_obj *obj = state->private_objs[i].ptr;
  218. obj->funcs->atomic_destroy_state(obj,
  219. state->private_objs[i].state);
  220. state->private_objs[i].ptr = NULL;
  221. state->private_objs[i].state = NULL;
  222. state->private_objs[i].old_state = NULL;
  223. state->private_objs[i].new_state = NULL;
  224. }
  225. state->num_private_objs = 0;
  226. if (state->fake_commit) {
  227. drm_crtc_commit_put(state->fake_commit);
  228. state->fake_commit = NULL;
  229. }
  230. }
  231. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  232. /**
  233. * drm_atomic_state_clear - clear state object
  234. * @state: atomic state
  235. *
  236. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  237. * all locks. So someone else could sneak in and change the current modeset
  238. * configuration. Which means that all the state assembled in @state is no
  239. * longer an atomic update to the current state, but to some arbitrary earlier
  240. * state. Which could break assumptions the driver's
  241. * &drm_mode_config_funcs.atomic_check likely relies on.
  242. *
  243. * Hence we must clear all cached state and completely start over, using this
  244. * function.
  245. */
  246. void drm_atomic_state_clear(struct drm_atomic_state *state)
  247. {
  248. struct drm_device *dev = state->dev;
  249. struct drm_mode_config *config = &dev->mode_config;
  250. if (config->funcs->atomic_state_clear)
  251. config->funcs->atomic_state_clear(state);
  252. else
  253. drm_atomic_state_default_clear(state);
  254. }
  255. EXPORT_SYMBOL(drm_atomic_state_clear);
  256. /**
  257. * __drm_atomic_state_free - free all memory for an atomic state
  258. * @ref: This atomic state to deallocate
  259. *
  260. * This frees all memory associated with an atomic state, including all the
  261. * per-object state for planes, CRTCs and connectors.
  262. */
  263. void __drm_atomic_state_free(struct kref *ref)
  264. {
  265. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  266. struct drm_device *dev = state->dev;
  267. struct drm_mode_config *config = &dev->mode_config;
  268. drm_atomic_state_clear(state);
  269. drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state);
  270. if (config->funcs->atomic_state_free) {
  271. config->funcs->atomic_state_free(state);
  272. } else {
  273. drm_atomic_state_default_release(state);
  274. kfree(state);
  275. }
  276. drm_dev_put(dev);
  277. }
  278. EXPORT_SYMBOL(__drm_atomic_state_free);
  279. /**
  280. * drm_atomic_get_crtc_state - get CRTC state
  281. * @state: global atomic state object
  282. * @crtc: CRTC to get state object for
  283. *
  284. * This function returns the CRTC state for the given CRTC, allocating it if
  285. * needed. It will also grab the relevant CRTC lock to make sure that the state
  286. * is consistent.
  287. *
  288. * WARNING: Drivers may only add new CRTC states to a @state if
  289. * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit
  290. * not created by userspace through an IOCTL call.
  291. *
  292. * Returns:
  293. * Either the allocated state or the error code encoded into the pointer. When
  294. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  295. * entire atomic sequence must be restarted. All other errors are fatal.
  296. */
  297. struct drm_crtc_state *
  298. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  299. struct drm_crtc *crtc)
  300. {
  301. int ret, index = drm_crtc_index(crtc);
  302. struct drm_crtc_state *crtc_state;
  303. WARN_ON(!state->acquire_ctx);
  304. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  305. if (crtc_state)
  306. return crtc_state;
  307. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  308. if (ret)
  309. return ERR_PTR(ret);
  310. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  311. if (!crtc_state)
  312. return ERR_PTR(-ENOMEM);
  313. state->crtcs[index].state = crtc_state;
  314. state->crtcs[index].old_state = crtc->state;
  315. state->crtcs[index].new_state = crtc_state;
  316. state->crtcs[index].ptr = crtc;
  317. crtc_state->state = state;
  318. drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n",
  319. crtc->base.id, crtc->name, crtc_state, state);
  320. return crtc_state;
  321. }
  322. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  323. static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
  324. const struct drm_crtc_state *new_crtc_state)
  325. {
  326. struct drm_crtc *crtc = new_crtc_state->crtc;
  327. /* NOTE: we explicitly don't enforce constraints such as primary
  328. * layer covering entire screen, since that is something we want
  329. * to allow (on hw that supports it). For hw that does not, it
  330. * should be checked in driver's crtc->atomic_check() vfunc.
  331. *
  332. * TODO: Add generic modeset state checks once we support those.
  333. */
  334. if (new_crtc_state->active && !new_crtc_state->enable) {
  335. drm_dbg_atomic(crtc->dev,
  336. "[CRTC:%d:%s] active without enabled\n",
  337. crtc->base.id, crtc->name);
  338. return -EINVAL;
  339. }
  340. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  341. * as this is a kernel-internal detail that userspace should never
  342. * be able to trigger.
  343. */
  344. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  345. WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
  346. drm_dbg_atomic(crtc->dev,
  347. "[CRTC:%d:%s] enabled without mode blob\n",
  348. crtc->base.id, crtc->name);
  349. return -EINVAL;
  350. }
  351. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  352. WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
  353. drm_dbg_atomic(crtc->dev,
  354. "[CRTC:%d:%s] disabled with mode blob\n",
  355. crtc->base.id, crtc->name);
  356. return -EINVAL;
  357. }
  358. /*
  359. * Reject event generation for when a CRTC is off and stays off.
  360. * It wouldn't be hard to implement this, but userspace has a track
  361. * record of happily burning through 100% cpu (or worse, crash) when the
  362. * display pipe is suspended. To avoid all that fun just reject updates
  363. * that ask for events since likely that indicates a bug in the
  364. * compositor's drawing loop. This is consistent with the vblank IOCTL
  365. * and legacy page_flip IOCTL which also reject service on a disabled
  366. * pipe.
  367. */
  368. if (new_crtc_state->event &&
  369. !new_crtc_state->active && !old_crtc_state->active) {
  370. drm_dbg_atomic(crtc->dev,
  371. "[CRTC:%d:%s] requesting event but off\n",
  372. crtc->base.id, crtc->name);
  373. return -EINVAL;
  374. }
  375. return 0;
  376. }
  377. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  378. const struct drm_crtc_state *state)
  379. {
  380. struct drm_crtc *crtc = state->crtc;
  381. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  382. drm_printf(p, "\tenable=%d\n", state->enable);
  383. drm_printf(p, "\tactive=%d\n", state->active);
  384. drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active);
  385. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  386. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  387. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  388. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  389. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  390. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  391. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  392. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  393. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  394. if (crtc->funcs->atomic_print_state)
  395. crtc->funcs->atomic_print_state(p, state);
  396. }
  397. static int drm_atomic_connector_check(struct drm_connector *connector,
  398. struct drm_connector_state *state)
  399. {
  400. struct drm_crtc_state *crtc_state;
  401. struct drm_writeback_job *writeback_job = state->writeback_job;
  402. const struct drm_display_info *info = &connector->display_info;
  403. state->max_bpc = info->bpc ? info->bpc : 8;
  404. if (connector->max_bpc_property)
  405. state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
  406. if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
  407. return 0;
  408. if (writeback_job->fb && !state->crtc) {
  409. drm_dbg_atomic(connector->dev,
  410. "[CONNECTOR:%d:%s] framebuffer without CRTC\n",
  411. connector->base.id, connector->name);
  412. return -EINVAL;
  413. }
  414. if (state->crtc)
  415. crtc_state = drm_atomic_get_existing_crtc_state(state->state,
  416. state->crtc);
  417. if (writeback_job->fb && !crtc_state->active) {
  418. drm_dbg_atomic(connector->dev,
  419. "[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
  420. connector->base.id, connector->name,
  421. state->crtc->base.id);
  422. return -EINVAL;
  423. }
  424. if (!writeback_job->fb) {
  425. if (writeback_job->out_fence) {
  426. drm_dbg_atomic(connector->dev,
  427. "[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
  428. connector->base.id, connector->name);
  429. return -EINVAL;
  430. }
  431. drm_writeback_cleanup_job(writeback_job);
  432. state->writeback_job = NULL;
  433. }
  434. return 0;
  435. }
  436. /**
  437. * drm_atomic_get_plane_state - get plane state
  438. * @state: global atomic state object
  439. * @plane: plane to get state object for
  440. *
  441. * This function returns the plane state for the given plane, allocating it if
  442. * needed. It will also grab the relevant plane lock to make sure that the state
  443. * is consistent.
  444. *
  445. * Returns:
  446. * Either the allocated state or the error code encoded into the pointer. When
  447. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  448. * entire atomic sequence must be restarted. All other errors are fatal.
  449. */
  450. struct drm_plane_state *
  451. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  452. struct drm_plane *plane)
  453. {
  454. int ret, index = drm_plane_index(plane);
  455. struct drm_plane_state *plane_state;
  456. WARN_ON(!state->acquire_ctx);
  457. /* the legacy pointers should never be set */
  458. WARN_ON(plane->fb);
  459. WARN_ON(plane->old_fb);
  460. WARN_ON(plane->crtc);
  461. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  462. if (plane_state)
  463. return plane_state;
  464. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  465. if (ret)
  466. return ERR_PTR(ret);
  467. plane_state = plane->funcs->atomic_duplicate_state(plane);
  468. if (!plane_state)
  469. return ERR_PTR(-ENOMEM);
  470. state->planes[index].state = plane_state;
  471. state->planes[index].ptr = plane;
  472. state->planes[index].old_state = plane->state;
  473. state->planes[index].new_state = plane_state;
  474. plane_state->state = state;
  475. drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n",
  476. plane->base.id, plane->name, plane_state, state);
  477. if (plane_state->crtc) {
  478. struct drm_crtc_state *crtc_state;
  479. crtc_state = drm_atomic_get_crtc_state(state,
  480. plane_state->crtc);
  481. if (IS_ERR(crtc_state))
  482. return ERR_CAST(crtc_state);
  483. }
  484. return plane_state;
  485. }
  486. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  487. static bool
  488. plane_switching_crtc(const struct drm_plane_state *old_plane_state,
  489. const struct drm_plane_state *new_plane_state)
  490. {
  491. if (!old_plane_state->crtc || !new_plane_state->crtc)
  492. return false;
  493. if (old_plane_state->crtc == new_plane_state->crtc)
  494. return false;
  495. /* This could be refined, but currently there's no helper or driver code
  496. * to implement direct switching of active planes nor userspace to take
  497. * advantage of more direct plane switching without the intermediate
  498. * full OFF state.
  499. */
  500. return true;
  501. }
  502. /**
  503. * drm_atomic_plane_check - check plane state
  504. * @old_plane_state: old plane state to check
  505. * @new_plane_state: new plane state to check
  506. *
  507. * Provides core sanity checks for plane state.
  508. *
  509. * RETURNS:
  510. * Zero on success, error code on failure
  511. */
  512. static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
  513. const struct drm_plane_state *new_plane_state)
  514. {
  515. struct drm_plane *plane = new_plane_state->plane;
  516. struct drm_crtc *crtc = new_plane_state->crtc;
  517. const struct drm_framebuffer *fb = new_plane_state->fb;
  518. unsigned int fb_width, fb_height;
  519. struct drm_mode_rect *clips;
  520. uint32_t num_clips;
  521. /* either *both* CRTC and FB must be set, or neither */
  522. if (crtc && !fb) {
  523. drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n",
  524. plane->base.id, plane->name);
  525. return -EINVAL;
  526. } else if (fb && !crtc) {
  527. drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n",
  528. plane->base.id, plane->name);
  529. return -EINVAL;
  530. }
  531. /* if disabled, we don't care about the rest of the state: */
  532. if (!crtc)
  533. return 0;
  534. /* Check whether this plane is usable on this CRTC */
  535. if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
  536. drm_dbg_atomic(plane->dev,
  537. "Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
  538. crtc->base.id, crtc->name,
  539. plane->base.id, plane->name);
  540. return -EINVAL;
  541. }
  542. /* Check whether this plane supports the fb pixel format. */
  543. if (!drm_plane_has_format(plane, fb->format->format, fb->modifier)) {
  544. drm_dbg_atomic(plane->dev,
  545. "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
  546. plane->base.id, plane->name,
  547. &fb->format->format, fb->modifier);
  548. return -EINVAL;
  549. }
  550. /* Give drivers some help against integer overflows */
  551. if (new_plane_state->crtc_w > INT_MAX ||
  552. new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
  553. new_plane_state->crtc_h > INT_MAX ||
  554. new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
  555. drm_dbg_atomic(plane->dev,
  556. "[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
  557. plane->base.id, plane->name,
  558. new_plane_state->crtc_w, new_plane_state->crtc_h,
  559. new_plane_state->crtc_x, new_plane_state->crtc_y);
  560. return -ERANGE;
  561. }
  562. fb_width = fb->width << 16;
  563. fb_height = fb->height << 16;
  564. /* Make sure source coordinates are inside the fb. */
  565. if (new_plane_state->src_w > fb_width ||
  566. new_plane_state->src_x > fb_width - new_plane_state->src_w ||
  567. new_plane_state->src_h > fb_height ||
  568. new_plane_state->src_y > fb_height - new_plane_state->src_h) {
  569. drm_dbg_atomic(plane->dev,
  570. "[PLANE:%d:%s] invalid source coordinates "
  571. "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
  572. plane->base.id, plane->name,
  573. new_plane_state->src_w >> 16,
  574. ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
  575. new_plane_state->src_h >> 16,
  576. ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
  577. new_plane_state->src_x >> 16,
  578. ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
  579. new_plane_state->src_y >> 16,
  580. ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
  581. fb->width, fb->height);
  582. return -ENOSPC;
  583. }
  584. clips = __drm_plane_get_damage_clips(new_plane_state);
  585. num_clips = drm_plane_get_damage_clips_count(new_plane_state);
  586. /* Make sure damage clips are valid and inside the fb. */
  587. while (num_clips > 0) {
  588. if (clips->x1 >= clips->x2 ||
  589. clips->y1 >= clips->y2 ||
  590. clips->x1 < 0 ||
  591. clips->y1 < 0 ||
  592. clips->x2 > fb_width ||
  593. clips->y2 > fb_height) {
  594. drm_dbg_atomic(plane->dev,
  595. "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
  596. plane->base.id, plane->name, clips->x1,
  597. clips->y1, clips->x2, clips->y2);
  598. return -EINVAL;
  599. }
  600. clips++;
  601. num_clips--;
  602. }
  603. if (plane_switching_crtc(old_plane_state, new_plane_state)) {
  604. drm_dbg_atomic(plane->dev,
  605. "[PLANE:%d:%s] switching CRTC directly\n",
  606. plane->base.id, plane->name);
  607. return -EINVAL;
  608. }
  609. return 0;
  610. }
  611. static void drm_atomic_plane_print_state(struct drm_printer *p,
  612. const struct drm_plane_state *state)
  613. {
  614. struct drm_plane *plane = state->plane;
  615. struct drm_rect src = drm_plane_state_src(state);
  616. struct drm_rect dest = drm_plane_state_dest(state);
  617. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  618. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  619. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  620. if (state->fb)
  621. drm_framebuffer_print_info(p, 2, state->fb);
  622. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  623. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  624. drm_printf(p, "\trotation=%x\n", state->rotation);
  625. drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
  626. drm_printf(p, "\tcolor-encoding=%s\n",
  627. drm_get_color_encoding_name(state->color_encoding));
  628. drm_printf(p, "\tcolor-range=%s\n",
  629. drm_get_color_range_name(state->color_range));
  630. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  631. if (plane->funcs->atomic_print_state)
  632. plane->funcs->atomic_print_state(p, state);
  633. }
  634. /**
  635. * DOC: handling driver private state
  636. *
  637. * Very often the DRM objects exposed to userspace in the atomic modeset api
  638. * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
  639. * underlying hardware. Especially for any kind of shared resources (e.g. shared
  640. * clocks, scaler units, bandwidth and fifo limits shared among a group of
  641. * planes or CRTCs, and so on) it makes sense to model these as independent
  642. * objects. Drivers then need to do similar state tracking and commit ordering for
  643. * such private (since not exposed to userspace) objects as the atomic core and
  644. * helpers already provide for connectors, planes and CRTCs.
  645. *
  646. * To make this easier on drivers the atomic core provides some support to track
  647. * driver private state objects using struct &drm_private_obj, with the
  648. * associated state struct &drm_private_state.
  649. *
  650. * Similar to userspace-exposed objects, private state structures can be
  651. * acquired by calling drm_atomic_get_private_obj_state(). This also takes care
  652. * of locking, hence drivers should not have a need to call drm_modeset_lock()
  653. * directly. Sequence of the actual hardware state commit is not handled,
  654. * drivers might need to keep track of struct drm_crtc_commit within subclassed
  655. * structure of &drm_private_state as necessary, e.g. similar to
  656. * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
  657. *
  658. * All private state structures contained in a &drm_atomic_state update can be
  659. * iterated using for_each_oldnew_private_obj_in_state(),
  660. * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
  661. * Drivers are recommended to wrap these for each type of driver private state
  662. * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
  663. * least if they want to iterate over all objects of a given type.
  664. *
  665. * An earlier way to handle driver private state was by subclassing struct
  666. * &drm_atomic_state. But since that encourages non-standard ways to implement
  667. * the check/commit split atomic requires (by using e.g. "check and rollback or
  668. * commit instead" of "duplicate state, check, then either commit or release
  669. * duplicated state) it is deprecated in favour of using &drm_private_state.
  670. */
  671. /**
  672. * drm_atomic_private_obj_init - initialize private object
  673. * @dev: DRM device this object will be attached to
  674. * @obj: private object
  675. * @state: initial private object state
  676. * @funcs: pointer to the struct of function pointers that identify the object
  677. * type
  678. *
  679. * Initialize the private object, which can be embedded into any
  680. * driver private object that needs its own atomic state.
  681. */
  682. void
  683. drm_atomic_private_obj_init(struct drm_device *dev,
  684. struct drm_private_obj *obj,
  685. struct drm_private_state *state,
  686. const struct drm_private_state_funcs *funcs)
  687. {
  688. memset(obj, 0, sizeof(*obj));
  689. drm_modeset_lock_init(&obj->lock);
  690. obj->state = state;
  691. obj->funcs = funcs;
  692. list_add_tail(&obj->head, &dev->mode_config.privobj_list);
  693. state->obj = obj;
  694. }
  695. EXPORT_SYMBOL(drm_atomic_private_obj_init);
  696. /**
  697. * drm_atomic_private_obj_fini - finalize private object
  698. * @obj: private object
  699. *
  700. * Finalize the private object.
  701. */
  702. void
  703. drm_atomic_private_obj_fini(struct drm_private_obj *obj)
  704. {
  705. list_del(&obj->head);
  706. obj->funcs->atomic_destroy_state(obj, obj->state);
  707. drm_modeset_lock_fini(&obj->lock);
  708. }
  709. EXPORT_SYMBOL(drm_atomic_private_obj_fini);
  710. /**
  711. * drm_atomic_get_private_obj_state - get private object state
  712. * @state: global atomic state
  713. * @obj: private object to get the state for
  714. *
  715. * This function returns the private object state for the given private object,
  716. * allocating the state if needed. It will also grab the relevant private
  717. * object lock to make sure that the state is consistent.
  718. *
  719. * RETURNS:
  720. * Either the allocated state or the error code encoded into a pointer.
  721. */
  722. struct drm_private_state *
  723. drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
  724. struct drm_private_obj *obj)
  725. {
  726. int index, num_objs, i, ret;
  727. size_t size;
  728. struct __drm_private_objs_state *arr;
  729. struct drm_private_state *obj_state;
  730. for (i = 0; i < state->num_private_objs; i++)
  731. if (obj == state->private_objs[i].ptr)
  732. return state->private_objs[i].state;
  733. ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
  734. if (ret)
  735. return ERR_PTR(ret);
  736. num_objs = state->num_private_objs + 1;
  737. size = sizeof(*state->private_objs) * num_objs;
  738. arr = krealloc(state->private_objs, size, GFP_KERNEL);
  739. if (!arr)
  740. return ERR_PTR(-ENOMEM);
  741. state->private_objs = arr;
  742. index = state->num_private_objs;
  743. memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
  744. obj_state = obj->funcs->atomic_duplicate_state(obj);
  745. if (!obj_state)
  746. return ERR_PTR(-ENOMEM);
  747. state->private_objs[index].state = obj_state;
  748. state->private_objs[index].old_state = obj->state;
  749. state->private_objs[index].new_state = obj_state;
  750. state->private_objs[index].ptr = obj;
  751. obj_state->state = state;
  752. state->num_private_objs = num_objs;
  753. drm_dbg_atomic(state->dev,
  754. "Added new private object %p state %p to %p\n",
  755. obj, obj_state, state);
  756. return obj_state;
  757. }
  758. EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
  759. /**
  760. * drm_atomic_get_old_private_obj_state
  761. * @state: global atomic state object
  762. * @obj: private_obj to grab
  763. *
  764. * This function returns the old private object state for the given private_obj,
  765. * or NULL if the private_obj is not part of the global atomic state.
  766. */
  767. struct drm_private_state *
  768. drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
  769. struct drm_private_obj *obj)
  770. {
  771. int i;
  772. for (i = 0; i < state->num_private_objs; i++)
  773. if (obj == state->private_objs[i].ptr)
  774. return state->private_objs[i].old_state;
  775. return NULL;
  776. }
  777. EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
  778. /**
  779. * drm_atomic_get_new_private_obj_state
  780. * @state: global atomic state object
  781. * @obj: private_obj to grab
  782. *
  783. * This function returns the new private object state for the given private_obj,
  784. * or NULL if the private_obj is not part of the global atomic state.
  785. */
  786. struct drm_private_state *
  787. drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
  788. struct drm_private_obj *obj)
  789. {
  790. int i;
  791. for (i = 0; i < state->num_private_objs; i++)
  792. if (obj == state->private_objs[i].ptr)
  793. return state->private_objs[i].new_state;
  794. return NULL;
  795. }
  796. EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
  797. /**
  798. * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder
  799. * @state: Atomic state
  800. * @encoder: The encoder to fetch the connector state for
  801. *
  802. * This function finds and returns the connector that was connected to @encoder
  803. * as specified by the @state.
  804. *
  805. * If there is no connector in @state which previously had @encoder connected to
  806. * it, this function will return NULL. While this may seem like an invalid use
  807. * case, it is sometimes useful to differentiate commits which had no prior
  808. * connectors attached to @encoder vs ones that did (and to inspect their
  809. * state). This is especially true in enable hooks because the pipeline has
  810. * changed.
  811. *
  812. * Returns: The old connector connected to @encoder, or NULL if the encoder is
  813. * not connected.
  814. */
  815. struct drm_connector *
  816. drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
  817. struct drm_encoder *encoder)
  818. {
  819. struct drm_connector_state *conn_state;
  820. struct drm_connector *connector;
  821. unsigned int i;
  822. for_each_old_connector_in_state(state, connector, conn_state, i) {
  823. if (conn_state->best_encoder == encoder)
  824. return connector;
  825. }
  826. return NULL;
  827. }
  828. EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
  829. /**
  830. * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder
  831. * @state: Atomic state
  832. * @encoder: The encoder to fetch the connector state for
  833. *
  834. * This function finds and returns the connector that will be connected to
  835. * @encoder as specified by the @state.
  836. *
  837. * If there is no connector in @state which will have @encoder connected to it,
  838. * this function will return NULL. While this may seem like an invalid use case,
  839. * it is sometimes useful to differentiate commits which have no connectors
  840. * attached to @encoder vs ones that do (and to inspect their state). This is
  841. * especially true in disable hooks because the pipeline will change.
  842. *
  843. * Returns: The new connector connected to @encoder, or NULL if the encoder is
  844. * not connected.
  845. */
  846. struct drm_connector *
  847. drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
  848. struct drm_encoder *encoder)
  849. {
  850. struct drm_connector_state *conn_state;
  851. struct drm_connector *connector;
  852. unsigned int i;
  853. for_each_new_connector_in_state(state, connector, conn_state, i) {
  854. if (conn_state->best_encoder == encoder)
  855. return connector;
  856. }
  857. return NULL;
  858. }
  859. EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
  860. /**
  861. * drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder
  862. * @state: Atomic state
  863. * @encoder: The encoder to fetch the crtc state for
  864. *
  865. * This function finds and returns the crtc that was connected to @encoder
  866. * as specified by the @state.
  867. *
  868. * Returns: The old crtc connected to @encoder, or NULL if the encoder is
  869. * not connected.
  870. */
  871. struct drm_crtc *
  872. drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
  873. struct drm_encoder *encoder)
  874. {
  875. struct drm_connector *connector;
  876. struct drm_connector_state *conn_state;
  877. connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
  878. if (!connector)
  879. return NULL;
  880. conn_state = drm_atomic_get_old_connector_state(state, connector);
  881. if (!conn_state)
  882. return NULL;
  883. return conn_state->crtc;
  884. }
  885. EXPORT_SYMBOL(drm_atomic_get_old_crtc_for_encoder);
  886. /**
  887. * drm_atomic_get_new_crtc_for_encoder - Get new crtc for an encoder
  888. * @state: Atomic state
  889. * @encoder: The encoder to fetch the crtc state for
  890. *
  891. * This function finds and returns the crtc that will be connected to @encoder
  892. * as specified by the @state.
  893. *
  894. * Returns: The new crtc connected to @encoder, or NULL if the encoder is
  895. * not connected.
  896. */
  897. struct drm_crtc *
  898. drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
  899. struct drm_encoder *encoder)
  900. {
  901. struct drm_connector *connector;
  902. struct drm_connector_state *conn_state;
  903. connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
  904. if (!connector)
  905. return NULL;
  906. conn_state = drm_atomic_get_new_connector_state(state, connector);
  907. if (!conn_state)
  908. return NULL;
  909. return conn_state->crtc;
  910. }
  911. EXPORT_SYMBOL(drm_atomic_get_new_crtc_for_encoder);
  912. /**
  913. * drm_atomic_get_connector_state - get connector state
  914. * @state: global atomic state object
  915. * @connector: connector to get state object for
  916. *
  917. * This function returns the connector state for the given connector,
  918. * allocating it if needed. It will also grab the relevant connector lock to
  919. * make sure that the state is consistent.
  920. *
  921. * Returns:
  922. * Either the allocated state or the error code encoded into the pointer. When
  923. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  924. * entire atomic sequence must be restarted. All other errors are fatal.
  925. */
  926. struct drm_connector_state *
  927. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  928. struct drm_connector *connector)
  929. {
  930. int ret, index;
  931. struct drm_mode_config *config = &connector->dev->mode_config;
  932. struct drm_connector_state *connector_state;
  933. WARN_ON(!state->acquire_ctx);
  934. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  935. if (ret)
  936. return ERR_PTR(ret);
  937. index = drm_connector_index(connector);
  938. if (index >= state->num_connector) {
  939. struct __drm_connnectors_state *c;
  940. int alloc = max(index + 1, config->num_connector);
  941. c = krealloc_array(state->connectors, alloc,
  942. sizeof(*state->connectors), GFP_KERNEL);
  943. if (!c)
  944. return ERR_PTR(-ENOMEM);
  945. state->connectors = c;
  946. memset(&state->connectors[state->num_connector], 0,
  947. sizeof(*state->connectors) * (alloc - state->num_connector));
  948. state->num_connector = alloc;
  949. }
  950. if (state->connectors[index].state)
  951. return state->connectors[index].state;
  952. connector_state = connector->funcs->atomic_duplicate_state(connector);
  953. if (!connector_state)
  954. return ERR_PTR(-ENOMEM);
  955. drm_connector_get(connector);
  956. state->connectors[index].state = connector_state;
  957. state->connectors[index].old_state = connector->state;
  958. state->connectors[index].new_state = connector_state;
  959. state->connectors[index].ptr = connector;
  960. connector_state->state = state;
  961. drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n",
  962. connector->base.id, connector->name,
  963. connector_state, state);
  964. if (connector_state->crtc) {
  965. struct drm_crtc_state *crtc_state;
  966. crtc_state = drm_atomic_get_crtc_state(state,
  967. connector_state->crtc);
  968. if (IS_ERR(crtc_state))
  969. return ERR_CAST(crtc_state);
  970. }
  971. return connector_state;
  972. }
  973. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  974. static void drm_atomic_connector_print_state(struct drm_printer *p,
  975. const struct drm_connector_state *state)
  976. {
  977. struct drm_connector *connector = state->connector;
  978. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  979. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  980. drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
  981. drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
  982. drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace));
  983. if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
  984. connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
  985. drm_printf(p, "\tbroadcast_rgb=%s\n",
  986. drm_hdmi_connector_get_broadcast_rgb_name(state->hdmi.broadcast_rgb));
  987. drm_printf(p, "\tis_limited_range=%c\n", state->hdmi.is_limited_range ? 'y' : 'n');
  988. drm_printf(p, "\toutput_bpc=%u\n", state->hdmi.output_bpc);
  989. drm_printf(p, "\toutput_format=%s\n",
  990. drm_hdmi_connector_get_output_format_name(state->hdmi.output_format));
  991. drm_printf(p, "\ttmds_char_rate=%llu\n", state->hdmi.tmds_char_rate);
  992. }
  993. if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
  994. if (state->writeback_job && state->writeback_job->fb)
  995. drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
  996. if (connector->funcs->atomic_print_state)
  997. connector->funcs->atomic_print_state(p, state);
  998. }
  999. /**
  1000. * drm_atomic_get_bridge_state - get bridge state
  1001. * @state: global atomic state object
  1002. * @bridge: bridge to get state object for
  1003. *
  1004. * This function returns the bridge state for the given bridge, allocating it
  1005. * if needed. It will also grab the relevant bridge lock to make sure that the
  1006. * state is consistent.
  1007. *
  1008. * Returns:
  1009. * Either the allocated state or the error code encoded into the pointer. When
  1010. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  1011. * entire atomic sequence must be restarted.
  1012. */
  1013. struct drm_bridge_state *
  1014. drm_atomic_get_bridge_state(struct drm_atomic_state *state,
  1015. struct drm_bridge *bridge)
  1016. {
  1017. struct drm_private_state *obj_state;
  1018. obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
  1019. if (IS_ERR(obj_state))
  1020. return ERR_CAST(obj_state);
  1021. return drm_priv_to_bridge_state(obj_state);
  1022. }
  1023. EXPORT_SYMBOL(drm_atomic_get_bridge_state);
  1024. /**
  1025. * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
  1026. * @state: global atomic state object
  1027. * @bridge: bridge to grab
  1028. *
  1029. * This function returns the old bridge state for the given bridge, or NULL if
  1030. * the bridge is not part of the global atomic state.
  1031. */
  1032. struct drm_bridge_state *
  1033. drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
  1034. struct drm_bridge *bridge)
  1035. {
  1036. struct drm_private_state *obj_state;
  1037. obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
  1038. if (!obj_state)
  1039. return NULL;
  1040. return drm_priv_to_bridge_state(obj_state);
  1041. }
  1042. EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
  1043. /**
  1044. * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
  1045. * @state: global atomic state object
  1046. * @bridge: bridge to grab
  1047. *
  1048. * This function returns the new bridge state for the given bridge, or NULL if
  1049. * the bridge is not part of the global atomic state.
  1050. */
  1051. struct drm_bridge_state *
  1052. drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
  1053. struct drm_bridge *bridge)
  1054. {
  1055. struct drm_private_state *obj_state;
  1056. obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
  1057. if (!obj_state)
  1058. return NULL;
  1059. return drm_priv_to_bridge_state(obj_state);
  1060. }
  1061. EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
  1062. /**
  1063. * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
  1064. * @state: atomic state
  1065. * @encoder: DRM encoder
  1066. *
  1067. * This function adds all bridges attached to @encoder. This is needed to add
  1068. * bridge states to @state and make them available when
  1069. * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
  1070. * &drm_bridge_funcs.atomic_enable(),
  1071. * &drm_bridge_funcs.atomic_disable_post_disable() are called.
  1072. *
  1073. * Returns:
  1074. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1075. * then the w/w mutex code has detected a deadlock and the entire atomic
  1076. * sequence must be restarted. All other errors are fatal.
  1077. */
  1078. int
  1079. drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
  1080. struct drm_encoder *encoder)
  1081. {
  1082. struct drm_bridge_state *bridge_state;
  1083. struct drm_bridge *bridge;
  1084. if (!encoder)
  1085. return 0;
  1086. drm_dbg_atomic(encoder->dev,
  1087. "Adding all bridges for [encoder:%d:%s] to %p\n",
  1088. encoder->base.id, encoder->name, state);
  1089. drm_for_each_bridge_in_chain(encoder, bridge) {
  1090. /* Skip bridges that don't implement the atomic state hooks. */
  1091. if (!bridge->funcs->atomic_duplicate_state)
  1092. continue;
  1093. bridge_state = drm_atomic_get_bridge_state(state, bridge);
  1094. if (IS_ERR(bridge_state))
  1095. return PTR_ERR(bridge_state);
  1096. }
  1097. return 0;
  1098. }
  1099. EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
  1100. /**
  1101. * drm_atomic_add_affected_connectors - add connectors for CRTC
  1102. * @state: atomic state
  1103. * @crtc: DRM CRTC
  1104. *
  1105. * This function walks the current configuration and adds all connectors
  1106. * currently using @crtc to the atomic configuration @state. Note that this
  1107. * function must acquire the connection mutex. This can potentially cause
  1108. * unneeded serialization if the update is just for the planes on one CRTC. Hence
  1109. * drivers and helpers should only call this when really needed (e.g. when a
  1110. * full modeset needs to happen due to some change).
  1111. *
  1112. * Returns:
  1113. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1114. * then the w/w mutex code has detected a deadlock and the entire atomic
  1115. * sequence must be restarted. All other errors are fatal.
  1116. */
  1117. int
  1118. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1119. struct drm_crtc *crtc)
  1120. {
  1121. struct drm_mode_config *config = &state->dev->mode_config;
  1122. struct drm_connector *connector;
  1123. struct drm_connector_state *conn_state;
  1124. struct drm_connector_list_iter conn_iter;
  1125. struct drm_crtc_state *crtc_state;
  1126. int ret;
  1127. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1128. if (IS_ERR(crtc_state))
  1129. return PTR_ERR(crtc_state);
  1130. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1131. if (ret)
  1132. return ret;
  1133. drm_dbg_atomic(crtc->dev,
  1134. "Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1135. crtc->base.id, crtc->name, state);
  1136. /*
  1137. * Changed connectors are already in @state, so only need to look
  1138. * at the connector_mask in crtc_state.
  1139. */
  1140. drm_connector_list_iter_begin(state->dev, &conn_iter);
  1141. drm_for_each_connector_iter(connector, &conn_iter) {
  1142. if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
  1143. continue;
  1144. conn_state = drm_atomic_get_connector_state(state, connector);
  1145. if (IS_ERR(conn_state)) {
  1146. drm_connector_list_iter_end(&conn_iter);
  1147. return PTR_ERR(conn_state);
  1148. }
  1149. }
  1150. drm_connector_list_iter_end(&conn_iter);
  1151. return 0;
  1152. }
  1153. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1154. /**
  1155. * drm_atomic_add_affected_planes - add planes for CRTC
  1156. * @state: atomic state
  1157. * @crtc: DRM CRTC
  1158. *
  1159. * This function walks the current configuration and adds all planes
  1160. * currently used by @crtc to the atomic configuration @state. This is useful
  1161. * when an atomic commit also needs to check all currently enabled plane on
  1162. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1163. * to avoid special code to force-enable all planes.
  1164. *
  1165. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1166. * current CRTC for that plane (if there is any) adding all the plane states for
  1167. * a CRTC will not reduce parallelism of atomic updates.
  1168. *
  1169. * Returns:
  1170. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1171. * then the w/w mutex code has detected a deadlock and the entire atomic
  1172. * sequence must be restarted. All other errors are fatal.
  1173. */
  1174. int
  1175. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1176. struct drm_crtc *crtc)
  1177. {
  1178. const struct drm_crtc_state *old_crtc_state =
  1179. drm_atomic_get_old_crtc_state(state, crtc);
  1180. struct drm_plane *plane;
  1181. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  1182. drm_dbg_atomic(crtc->dev,
  1183. "Adding all current planes for [CRTC:%d:%s] to %p\n",
  1184. crtc->base.id, crtc->name, state);
  1185. drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
  1186. struct drm_plane_state *plane_state =
  1187. drm_atomic_get_plane_state(state, plane);
  1188. if (IS_ERR(plane_state))
  1189. return PTR_ERR(plane_state);
  1190. }
  1191. return 0;
  1192. }
  1193. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1194. /**
  1195. * drm_atomic_check_only - check whether a given config would work
  1196. * @state: atomic configuration to check
  1197. *
  1198. * Note that this function can return -EDEADLK if the driver needed to acquire
  1199. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1200. * backoff dance and restart. All other errors are fatal.
  1201. *
  1202. * Returns:
  1203. * 0 on success, negative error code on failure.
  1204. */
  1205. int drm_atomic_check_only(struct drm_atomic_state *state)
  1206. {
  1207. struct drm_device *dev = state->dev;
  1208. struct drm_mode_config *config = &dev->mode_config;
  1209. struct drm_plane *plane;
  1210. struct drm_plane_state *old_plane_state;
  1211. struct drm_plane_state *new_plane_state;
  1212. struct drm_crtc *crtc;
  1213. struct drm_crtc_state *old_crtc_state;
  1214. struct drm_crtc_state *new_crtc_state;
  1215. struct drm_connector *conn;
  1216. struct drm_connector_state *conn_state;
  1217. unsigned int requested_crtc = 0;
  1218. unsigned int affected_crtc = 0;
  1219. int i, ret = 0;
  1220. drm_dbg_atomic(dev, "checking %p\n", state);
  1221. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  1222. if (new_crtc_state->enable)
  1223. requested_crtc |= drm_crtc_mask(crtc);
  1224. }
  1225. for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
  1226. ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
  1227. if (ret) {
  1228. drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n",
  1229. plane->base.id, plane->name);
  1230. return ret;
  1231. }
  1232. }
  1233. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  1234. ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
  1235. if (ret) {
  1236. drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n",
  1237. crtc->base.id, crtc->name);
  1238. return ret;
  1239. }
  1240. }
  1241. for_each_new_connector_in_state(state, conn, conn_state, i) {
  1242. ret = drm_atomic_connector_check(conn, conn_state);
  1243. if (ret) {
  1244. drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n",
  1245. conn->base.id, conn->name);
  1246. return ret;
  1247. }
  1248. }
  1249. if (config->funcs->atomic_check) {
  1250. ret = config->funcs->atomic_check(state->dev, state);
  1251. if (ret) {
  1252. drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n",
  1253. state, ret);
  1254. return ret;
  1255. }
  1256. }
  1257. if (!state->allow_modeset) {
  1258. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  1259. if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
  1260. drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n",
  1261. crtc->base.id, crtc->name);
  1262. return -EINVAL;
  1263. }
  1264. }
  1265. }
  1266. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  1267. if (new_crtc_state->enable)
  1268. affected_crtc |= drm_crtc_mask(crtc);
  1269. }
  1270. /*
  1271. * For commits that allow modesets drivers can add other CRTCs to the
  1272. * atomic commit, e.g. when they need to reallocate global resources.
  1273. * This can cause spurious EBUSY, which robs compositors of a very
  1274. * effective sanity check for their drawing loop. Therefor only allow
  1275. * drivers to add unrelated CRTC states for modeset commits.
  1276. *
  1277. * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output
  1278. * so compositors know what's going on.
  1279. */
  1280. if (affected_crtc != requested_crtc) {
  1281. drm_dbg_atomic(dev,
  1282. "driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
  1283. requested_crtc, affected_crtc);
  1284. WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
  1285. requested_crtc, affected_crtc);
  1286. }
  1287. return 0;
  1288. }
  1289. EXPORT_SYMBOL(drm_atomic_check_only);
  1290. /**
  1291. * drm_atomic_commit - commit configuration atomically
  1292. * @state: atomic configuration to check
  1293. *
  1294. * Note that this function can return -EDEADLK if the driver needed to acquire
  1295. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1296. * backoff dance and restart. All other errors are fatal.
  1297. *
  1298. * This function will take its own reference on @state.
  1299. * Callers should always release their reference with drm_atomic_state_put().
  1300. *
  1301. * Returns:
  1302. * 0 on success, negative error code on failure.
  1303. */
  1304. int drm_atomic_commit(struct drm_atomic_state *state)
  1305. {
  1306. struct drm_mode_config *config = &state->dev->mode_config;
  1307. struct drm_printer p = drm_info_printer(state->dev->dev);
  1308. int ret;
  1309. if (drm_debug_enabled(DRM_UT_STATE))
  1310. drm_atomic_print_new_state(state, &p);
  1311. ret = drm_atomic_check_only(state);
  1312. if (ret)
  1313. return ret;
  1314. drm_dbg_atomic(state->dev, "committing %p\n", state);
  1315. return config->funcs->atomic_commit(state->dev, state, false);
  1316. }
  1317. EXPORT_SYMBOL(drm_atomic_commit);
  1318. /**
  1319. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  1320. * @state: atomic configuration to check
  1321. *
  1322. * Note that this function can return -EDEADLK if the driver needed to acquire
  1323. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1324. * backoff dance and restart. All other errors are fatal.
  1325. *
  1326. * This function will take its own reference on @state.
  1327. * Callers should always release their reference with drm_atomic_state_put().
  1328. *
  1329. * Returns:
  1330. * 0 on success, negative error code on failure.
  1331. */
  1332. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1333. {
  1334. struct drm_mode_config *config = &state->dev->mode_config;
  1335. int ret;
  1336. ret = drm_atomic_check_only(state);
  1337. if (ret)
  1338. return ret;
  1339. drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state);
  1340. return config->funcs->atomic_commit(state->dev, state, true);
  1341. }
  1342. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1343. /* just used from drm-client and atomic-helper: */
  1344. int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
  1345. struct drm_plane_state *plane_state)
  1346. {
  1347. int ret;
  1348. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  1349. if (ret != 0)
  1350. return ret;
  1351. drm_atomic_set_fb_for_plane(plane_state, NULL);
  1352. plane_state->crtc_x = 0;
  1353. plane_state->crtc_y = 0;
  1354. plane_state->crtc_w = 0;
  1355. plane_state->crtc_h = 0;
  1356. plane_state->src_x = 0;
  1357. plane_state->src_y = 0;
  1358. plane_state->src_w = 0;
  1359. plane_state->src_h = 0;
  1360. return 0;
  1361. }
  1362. EXPORT_SYMBOL(__drm_atomic_helper_disable_plane);
  1363. static int update_output_state(struct drm_atomic_state *state,
  1364. struct drm_mode_set *set)
  1365. {
  1366. struct drm_device *dev = set->crtc->dev;
  1367. struct drm_crtc *crtc;
  1368. struct drm_crtc_state *new_crtc_state;
  1369. struct drm_connector *connector;
  1370. struct drm_connector_state *new_conn_state;
  1371. int ret, i;
  1372. ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
  1373. state->acquire_ctx);
  1374. if (ret)
  1375. return ret;
  1376. /* First disable all connectors on the target crtc. */
  1377. ret = drm_atomic_add_affected_connectors(state, set->crtc);
  1378. if (ret)
  1379. return ret;
  1380. for_each_new_connector_in_state(state, connector, new_conn_state, i) {
  1381. if (new_conn_state->crtc == set->crtc) {
  1382. ret = drm_atomic_set_crtc_for_connector(new_conn_state,
  1383. NULL);
  1384. if (ret)
  1385. return ret;
  1386. /* Make sure legacy setCrtc always re-trains */
  1387. new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
  1388. }
  1389. }
  1390. /* Then set all connectors from set->connectors on the target crtc */
  1391. for (i = 0; i < set->num_connectors; i++) {
  1392. new_conn_state = drm_atomic_get_connector_state(state,
  1393. set->connectors[i]);
  1394. if (IS_ERR(new_conn_state))
  1395. return PTR_ERR(new_conn_state);
  1396. ret = drm_atomic_set_crtc_for_connector(new_conn_state,
  1397. set->crtc);
  1398. if (ret)
  1399. return ret;
  1400. }
  1401. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  1402. /*
  1403. * Don't update ->enable for the CRTC in the set_config request,
  1404. * since a mismatch would indicate a bug in the upper layers.
  1405. * The actual modeset code later on will catch any
  1406. * inconsistencies here.
  1407. */
  1408. if (crtc == set->crtc)
  1409. continue;
  1410. if (!new_crtc_state->connector_mask) {
  1411. ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
  1412. NULL);
  1413. if (ret < 0)
  1414. return ret;
  1415. new_crtc_state->active = false;
  1416. }
  1417. }
  1418. return 0;
  1419. }
  1420. /* just used from drm-client and atomic-helper: */
  1421. int __drm_atomic_helper_set_config(struct drm_mode_set *set,
  1422. struct drm_atomic_state *state)
  1423. {
  1424. struct drm_crtc_state *crtc_state;
  1425. struct drm_plane_state *primary_state;
  1426. struct drm_crtc *crtc = set->crtc;
  1427. int hdisplay, vdisplay;
  1428. int ret;
  1429. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1430. if (IS_ERR(crtc_state))
  1431. return PTR_ERR(crtc_state);
  1432. primary_state = drm_atomic_get_plane_state(state, crtc->primary);
  1433. if (IS_ERR(primary_state))
  1434. return PTR_ERR(primary_state);
  1435. if (!set->mode) {
  1436. WARN_ON(set->fb);
  1437. WARN_ON(set->num_connectors);
  1438. ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
  1439. if (ret != 0)
  1440. return ret;
  1441. crtc_state->active = false;
  1442. ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
  1443. if (ret != 0)
  1444. return ret;
  1445. drm_atomic_set_fb_for_plane(primary_state, NULL);
  1446. goto commit;
  1447. }
  1448. WARN_ON(!set->fb);
  1449. WARN_ON(!set->num_connectors);
  1450. ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
  1451. if (ret != 0)
  1452. return ret;
  1453. crtc_state->active = true;
  1454. ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
  1455. if (ret != 0)
  1456. return ret;
  1457. drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
  1458. drm_atomic_set_fb_for_plane(primary_state, set->fb);
  1459. primary_state->crtc_x = 0;
  1460. primary_state->crtc_y = 0;
  1461. primary_state->crtc_w = hdisplay;
  1462. primary_state->crtc_h = vdisplay;
  1463. primary_state->src_x = set->x << 16;
  1464. primary_state->src_y = set->y << 16;
  1465. if (drm_rotation_90_or_270(primary_state->rotation)) {
  1466. primary_state->src_w = vdisplay << 16;
  1467. primary_state->src_h = hdisplay << 16;
  1468. } else {
  1469. primary_state->src_w = hdisplay << 16;
  1470. primary_state->src_h = vdisplay << 16;
  1471. }
  1472. commit:
  1473. ret = update_output_state(state, set);
  1474. if (ret)
  1475. return ret;
  1476. return 0;
  1477. }
  1478. EXPORT_SYMBOL(__drm_atomic_helper_set_config);
  1479. static void drm_atomic_private_obj_print_state(struct drm_printer *p,
  1480. const struct drm_private_state *state)
  1481. {
  1482. struct drm_private_obj *obj = state->obj;
  1483. if (obj->funcs->atomic_print_state)
  1484. obj->funcs->atomic_print_state(p, state);
  1485. }
  1486. /**
  1487. * drm_atomic_print_new_state - prints drm atomic state
  1488. * @state: atomic configuration to check
  1489. * @p: drm printer
  1490. *
  1491. * This functions prints the drm atomic state snapshot using the drm printer
  1492. * which is passed to it. This snapshot can be used for debugging purposes.
  1493. *
  1494. * Note that this function looks into the new state objects and hence its not
  1495. * safe to be used after the call to drm_atomic_helper_commit_hw_done().
  1496. */
  1497. void drm_atomic_print_new_state(const struct drm_atomic_state *state,
  1498. struct drm_printer *p)
  1499. {
  1500. struct drm_plane *plane;
  1501. struct drm_plane_state *plane_state;
  1502. struct drm_crtc *crtc;
  1503. struct drm_crtc_state *crtc_state;
  1504. struct drm_connector *connector;
  1505. struct drm_connector_state *connector_state;
  1506. struct drm_private_obj *obj;
  1507. struct drm_private_state *obj_state;
  1508. int i;
  1509. if (!p) {
  1510. drm_err(state->dev, "invalid drm printer\n");
  1511. return;
  1512. }
  1513. drm_dbg_atomic(state->dev, "checking %p\n", state);
  1514. for_each_new_plane_in_state(state, plane, plane_state, i)
  1515. drm_atomic_plane_print_state(p, plane_state);
  1516. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  1517. drm_atomic_crtc_print_state(p, crtc_state);
  1518. for_each_new_connector_in_state(state, connector, connector_state, i)
  1519. drm_atomic_connector_print_state(p, connector_state);
  1520. for_each_new_private_obj_in_state(state, obj, obj_state, i)
  1521. drm_atomic_private_obj_print_state(p, obj_state);
  1522. }
  1523. EXPORT_SYMBOL(drm_atomic_print_new_state);
  1524. static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  1525. bool take_locks)
  1526. {
  1527. struct drm_mode_config *config = &dev->mode_config;
  1528. struct drm_plane *plane;
  1529. struct drm_crtc *crtc;
  1530. struct drm_connector *connector;
  1531. struct drm_connector_list_iter conn_iter;
  1532. struct drm_private_obj *obj;
  1533. if (!drm_drv_uses_atomic_modeset(dev))
  1534. return;
  1535. list_for_each_entry(plane, &config->plane_list, head) {
  1536. if (take_locks)
  1537. drm_modeset_lock(&plane->mutex, NULL);
  1538. drm_atomic_plane_print_state(p, plane->state);
  1539. if (take_locks)
  1540. drm_modeset_unlock(&plane->mutex);
  1541. }
  1542. list_for_each_entry(crtc, &config->crtc_list, head) {
  1543. if (take_locks)
  1544. drm_modeset_lock(&crtc->mutex, NULL);
  1545. drm_atomic_crtc_print_state(p, crtc->state);
  1546. if (take_locks)
  1547. drm_modeset_unlock(&crtc->mutex);
  1548. }
  1549. drm_connector_list_iter_begin(dev, &conn_iter);
  1550. if (take_locks)
  1551. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  1552. drm_for_each_connector_iter(connector, &conn_iter)
  1553. drm_atomic_connector_print_state(p, connector->state);
  1554. if (take_locks)
  1555. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  1556. drm_connector_list_iter_end(&conn_iter);
  1557. list_for_each_entry(obj, &config->privobj_list, head) {
  1558. if (take_locks)
  1559. drm_modeset_lock(&obj->lock, NULL);
  1560. drm_atomic_private_obj_print_state(p, obj->state);
  1561. if (take_locks)
  1562. drm_modeset_unlock(&obj->lock);
  1563. }
  1564. }
  1565. /**
  1566. * drm_state_dump - dump entire device atomic state
  1567. * @dev: the drm device
  1568. * @p: where to print the state to
  1569. *
  1570. * Just for debugging. Drivers might want an option to dump state
  1571. * to dmesg in case of error irq's. (Hint, you probably want to
  1572. * ratelimit this!)
  1573. *
  1574. * The caller must wrap this drm_modeset_lock_all_ctx() and
  1575. * drm_modeset_drop_locks(). If this is called from error irq handler, it should
  1576. * not be enabled by default - if you are debugging errors you might
  1577. * not care that this is racey, but calling this without all modeset locks held
  1578. * is inherently unsafe.
  1579. */
  1580. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1581. {
  1582. __drm_state_dump(dev, p, false);
  1583. }
  1584. EXPORT_SYMBOL(drm_state_dump);
  1585. #ifdef CONFIG_DEBUG_FS
  1586. static int drm_state_info(struct seq_file *m, void *data)
  1587. {
  1588. struct drm_debugfs_entry *entry = m->private;
  1589. struct drm_device *dev = entry->dev;
  1590. struct drm_printer p = drm_seq_file_printer(m);
  1591. __drm_state_dump(dev, &p, true);
  1592. return 0;
  1593. }
  1594. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1595. static const struct drm_debugfs_info drm_atomic_debugfs_list[] = {
  1596. {"state", drm_state_info, 0},
  1597. };
  1598. void drm_atomic_debugfs_init(struct drm_device *dev)
  1599. {
  1600. drm_debugfs_add_files(dev, drm_atomic_debugfs_list,
  1601. ARRAY_SIZE(drm_atomic_debugfs_list));
  1602. }
  1603. #endif