drm_atomic.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_print.h>
  31. #include <drm/drm_writeback.h>
  32. #include <linux/sync_file.h>
  33. #include "drm_crtc_internal.h"
  34. #include "drm_internal.h"
  35. void __drm_crtc_commit_free(struct kref *kref)
  36. {
  37. struct drm_crtc_commit *commit =
  38. container_of(kref, struct drm_crtc_commit, ref);
  39. kfree(commit);
  40. }
  41. EXPORT_SYMBOL(__drm_crtc_commit_free);
  42. /**
  43. * drm_atomic_state_default_release -
  44. * release memory initialized by drm_atomic_state_init
  45. * @state: atomic state
  46. *
  47. * Free all the memory allocated by drm_atomic_state_init.
  48. * This should only be used by drivers which are still subclassing
  49. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  50. */
  51. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  52. {
  53. kfree(state->connectors);
  54. kfree(state->crtcs);
  55. kfree(state->planes);
  56. kfree(state->private_objs);
  57. }
  58. EXPORT_SYMBOL(drm_atomic_state_default_release);
  59. /**
  60. * drm_atomic_state_init - init new atomic state
  61. * @dev: DRM device
  62. * @state: atomic state
  63. *
  64. * Default implementation for filling in a new atomic state.
  65. * This should only be used by drivers which are still subclassing
  66. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  67. */
  68. int
  69. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  70. {
  71. kref_init(&state->ref);
  72. /* TODO legacy paths should maybe do a better job about
  73. * setting this appropriately?
  74. */
  75. state->allow_modeset = true;
  76. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  77. sizeof(*state->crtcs), GFP_KERNEL);
  78. if (!state->crtcs)
  79. goto fail;
  80. state->planes = kcalloc(dev->mode_config.num_total_plane,
  81. sizeof(*state->planes), GFP_KERNEL);
  82. if (!state->planes)
  83. goto fail;
  84. state->dev = dev;
  85. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  86. return 0;
  87. fail:
  88. drm_atomic_state_default_release(state);
  89. return -ENOMEM;
  90. }
  91. EXPORT_SYMBOL(drm_atomic_state_init);
  92. /**
  93. * drm_atomic_state_alloc - allocate atomic state
  94. * @dev: DRM device
  95. *
  96. * This allocates an empty atomic state to track updates.
  97. */
  98. struct drm_atomic_state *
  99. drm_atomic_state_alloc(struct drm_device *dev)
  100. {
  101. struct drm_mode_config *config = &dev->mode_config;
  102. if (!config->funcs->atomic_state_alloc) {
  103. struct drm_atomic_state *state;
  104. state = kzalloc(sizeof(*state), GFP_KERNEL);
  105. if (!state)
  106. return NULL;
  107. if (drm_atomic_state_init(dev, state) < 0) {
  108. kfree(state);
  109. return NULL;
  110. }
  111. return state;
  112. }
  113. return config->funcs->atomic_state_alloc(dev);
  114. }
  115. EXPORT_SYMBOL(drm_atomic_state_alloc);
  116. /**
  117. * drm_atomic_state_default_clear - clear base atomic state
  118. * @state: atomic state
  119. *
  120. * Default implementation for clearing atomic state.
  121. * This should only be used by drivers which are still subclassing
  122. * &drm_atomic_state and haven't switched to &drm_private_state yet.
  123. */
  124. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  125. {
  126. struct drm_device *dev = state->dev;
  127. struct drm_mode_config *config = &dev->mode_config;
  128. int i;
  129. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  130. for (i = 0; i < state->num_connector; i++) {
  131. struct drm_connector *connector = state->connectors[i].ptr;
  132. if (!connector)
  133. continue;
  134. connector->funcs->atomic_destroy_state(connector,
  135. state->connectors[i].state);
  136. state->connectors[i].ptr = NULL;
  137. state->connectors[i].state = NULL;
  138. state->connectors[i].old_state = NULL;
  139. state->connectors[i].new_state = NULL;
  140. drm_connector_put(connector);
  141. }
  142. for (i = 0; i < config->num_crtc; i++) {
  143. struct drm_crtc *crtc = state->crtcs[i].ptr;
  144. if (!crtc)
  145. continue;
  146. crtc->funcs->atomic_destroy_state(crtc,
  147. state->crtcs[i].state);
  148. state->crtcs[i].ptr = NULL;
  149. state->crtcs[i].state = NULL;
  150. state->crtcs[i].old_state = NULL;
  151. state->crtcs[i].new_state = NULL;
  152. if (state->crtcs[i].commit) {
  153. drm_crtc_commit_put(state->crtcs[i].commit);
  154. state->crtcs[i].commit = NULL;
  155. }
  156. }
  157. for (i = 0; i < config->num_total_plane; i++) {
  158. struct drm_plane *plane = state->planes[i].ptr;
  159. if (!plane)
  160. continue;
  161. plane->funcs->atomic_destroy_state(plane,
  162. state->planes[i].state);
  163. state->planes[i].ptr = NULL;
  164. state->planes[i].state = NULL;
  165. state->planes[i].old_state = NULL;
  166. state->planes[i].new_state = NULL;
  167. }
  168. for (i = 0; i < state->num_private_objs; i++) {
  169. struct drm_private_obj *obj = state->private_objs[i].ptr;
  170. obj->funcs->atomic_destroy_state(obj,
  171. state->private_objs[i].state);
  172. state->private_objs[i].ptr = NULL;
  173. state->private_objs[i].state = NULL;
  174. state->private_objs[i].old_state = NULL;
  175. state->private_objs[i].new_state = NULL;
  176. }
  177. state->num_private_objs = 0;
  178. if (state->fake_commit) {
  179. drm_crtc_commit_put(state->fake_commit);
  180. state->fake_commit = NULL;
  181. }
  182. }
  183. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  184. /**
  185. * drm_atomic_state_clear - clear state object
  186. * @state: atomic state
  187. *
  188. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  189. * all locks. So someone else could sneak in and change the current modeset
  190. * configuration. Which means that all the state assembled in @state is no
  191. * longer an atomic update to the current state, but to some arbitrary earlier
  192. * state. Which could break assumptions the driver's
  193. * &drm_mode_config_funcs.atomic_check likely relies on.
  194. *
  195. * Hence we must clear all cached state and completely start over, using this
  196. * function.
  197. */
  198. void drm_atomic_state_clear(struct drm_atomic_state *state)
  199. {
  200. struct drm_device *dev = state->dev;
  201. struct drm_mode_config *config = &dev->mode_config;
  202. if (config->funcs->atomic_state_clear)
  203. config->funcs->atomic_state_clear(state);
  204. else
  205. drm_atomic_state_default_clear(state);
  206. }
  207. EXPORT_SYMBOL(drm_atomic_state_clear);
  208. /**
  209. * __drm_atomic_state_free - free all memory for an atomic state
  210. * @ref: This atomic state to deallocate
  211. *
  212. * This frees all memory associated with an atomic state, including all the
  213. * per-object state for planes, crtcs and connectors.
  214. */
  215. void __drm_atomic_state_free(struct kref *ref)
  216. {
  217. struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
  218. struct drm_mode_config *config = &state->dev->mode_config;
  219. drm_atomic_state_clear(state);
  220. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  221. if (config->funcs->atomic_state_free) {
  222. config->funcs->atomic_state_free(state);
  223. } else {
  224. drm_atomic_state_default_release(state);
  225. kfree(state);
  226. }
  227. }
  228. EXPORT_SYMBOL(__drm_atomic_state_free);
  229. /**
  230. * drm_atomic_get_crtc_state - get crtc state
  231. * @state: global atomic state object
  232. * @crtc: crtc to get state object for
  233. *
  234. * This function returns the crtc state for the given crtc, allocating it if
  235. * needed. It will also grab the relevant crtc lock to make sure that the state
  236. * is consistent.
  237. *
  238. * Returns:
  239. *
  240. * Either the allocated state or the error code encoded into the pointer. When
  241. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  242. * entire atomic sequence must be restarted. All other errors are fatal.
  243. */
  244. struct drm_crtc_state *
  245. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  246. struct drm_crtc *crtc)
  247. {
  248. int ret, index = drm_crtc_index(crtc);
  249. struct drm_crtc_state *crtc_state;
  250. WARN_ON(!state->acquire_ctx);
  251. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  252. if (crtc_state)
  253. return crtc_state;
  254. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  255. if (ret)
  256. return ERR_PTR(ret);
  257. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  258. if (!crtc_state)
  259. return ERR_PTR(-ENOMEM);
  260. state->crtcs[index].state = crtc_state;
  261. state->crtcs[index].old_state = crtc->state;
  262. state->crtcs[index].new_state = crtc_state;
  263. state->crtcs[index].ptr = crtc;
  264. crtc_state->state = state;
  265. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  266. crtc->base.id, crtc->name, crtc_state, state);
  267. return crtc_state;
  268. }
  269. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  270. static void set_out_fence_for_crtc(struct drm_atomic_state *state,
  271. struct drm_crtc *crtc, s32 __user *fence_ptr)
  272. {
  273. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
  274. }
  275. static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
  276. struct drm_crtc *crtc)
  277. {
  278. s32 __user *fence_ptr;
  279. fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
  280. state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
  281. return fence_ptr;
  282. }
  283. static int set_out_fence_for_connector(struct drm_atomic_state *state,
  284. struct drm_connector *connector,
  285. s32 __user *fence_ptr)
  286. {
  287. unsigned int index = drm_connector_index(connector);
  288. if (!fence_ptr)
  289. return 0;
  290. if (put_user(-1, fence_ptr))
  291. return -EFAULT;
  292. state->connectors[index].out_fence_ptr = fence_ptr;
  293. return 0;
  294. }
  295. static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
  296. struct drm_connector *connector)
  297. {
  298. unsigned int index = drm_connector_index(connector);
  299. s32 __user *fence_ptr;
  300. fence_ptr = state->connectors[index].out_fence_ptr;
  301. state->connectors[index].out_fence_ptr = NULL;
  302. return fence_ptr;
  303. }
  304. /**
  305. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  306. * @state: the CRTC whose incoming state to update
  307. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  308. *
  309. * Set a mode (originating from the kernel) on the desired CRTC state and update
  310. * the enable property.
  311. *
  312. * RETURNS:
  313. * Zero on success, error code on failure. Cannot return -EDEADLK.
  314. */
  315. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  316. const struct drm_display_mode *mode)
  317. {
  318. struct drm_crtc *crtc = state->crtc;
  319. struct drm_mode_modeinfo umode;
  320. /* Early return for no change. */
  321. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  322. return 0;
  323. drm_property_blob_put(state->mode_blob);
  324. state->mode_blob = NULL;
  325. if (mode) {
  326. drm_mode_convert_to_umode(&umode, mode);
  327. state->mode_blob =
  328. drm_property_create_blob(state->crtc->dev,
  329. sizeof(umode),
  330. &umode);
  331. if (IS_ERR(state->mode_blob))
  332. return PTR_ERR(state->mode_blob);
  333. drm_mode_copy(&state->mode, mode);
  334. state->enable = true;
  335. DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  336. mode->name, crtc->base.id, crtc->name, state);
  337. } else {
  338. memset(&state->mode, 0, sizeof(state->mode));
  339. state->enable = false;
  340. DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  341. crtc->base.id, crtc->name, state);
  342. }
  343. return 0;
  344. }
  345. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  346. /**
  347. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  348. * @state: the CRTC whose incoming state to update
  349. * @blob: pointer to blob property to use for mode
  350. *
  351. * Set a mode (originating from a blob property) on the desired CRTC state.
  352. * This function will take a reference on the blob property for the CRTC state,
  353. * and release the reference held on the state's existing mode property, if any
  354. * was set.
  355. *
  356. * RETURNS:
  357. * Zero on success, error code on failure. Cannot return -EDEADLK.
  358. */
  359. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  360. struct drm_property_blob *blob)
  361. {
  362. struct drm_crtc *crtc = state->crtc;
  363. if (blob == state->mode_blob)
  364. return 0;
  365. drm_property_blob_put(state->mode_blob);
  366. state->mode_blob = NULL;
  367. memset(&state->mode, 0, sizeof(state->mode));
  368. if (blob) {
  369. int ret;
  370. if (blob->length != sizeof(struct drm_mode_modeinfo)) {
  371. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
  372. crtc->base.id, crtc->name,
  373. blob->length);
  374. return -EINVAL;
  375. }
  376. ret = drm_mode_convert_umode(crtc->dev,
  377. &state->mode, blob->data);
  378. if (ret) {
  379. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
  380. crtc->base.id, crtc->name,
  381. ret, drm_get_mode_status_name(state->mode.status));
  382. drm_mode_debug_printmodeline(&state->mode);
  383. return -EINVAL;
  384. }
  385. state->mode_blob = drm_property_blob_get(blob);
  386. state->enable = true;
  387. DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
  388. state->mode.name, crtc->base.id, crtc->name,
  389. state);
  390. } else {
  391. state->enable = false;
  392. DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
  393. crtc->base.id, crtc->name, state);
  394. }
  395. return 0;
  396. }
  397. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  398. /**
  399. * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it
  400. * @dev: DRM device
  401. * @blob: a pointer to the member blob to be replaced
  402. * @blob_id: ID of the new blob
  403. * @expected_size: total expected size of the blob data (in bytes)
  404. * @expected_elem_size: expected element size of the blob data (in bytes)
  405. * @replaced: did the blob get replaced?
  406. *
  407. * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero
  408. * @blob becomes NULL.
  409. *
  410. * If @expected_size is positive the new blob length is expected to be equal
  411. * to @expected_size bytes. If @expected_elem_size is positive the new blob
  412. * length is expected to be a multiple of @expected_elem_size bytes. Otherwise
  413. * an error is returned.
  414. *
  415. * @replaced will indicate to the caller whether the blob was replaced or not.
  416. * If the old and new blobs were in fact the same blob @replaced will be false
  417. * otherwise it will be true.
  418. *
  419. * RETURNS:
  420. * Zero on success, error code on failure.
  421. */
  422. static int
  423. drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
  424. struct drm_property_blob **blob,
  425. uint64_t blob_id,
  426. ssize_t expected_size,
  427. ssize_t expected_elem_size,
  428. bool *replaced)
  429. {
  430. struct drm_property_blob *new_blob = NULL;
  431. if (blob_id != 0) {
  432. new_blob = drm_property_lookup_blob(dev, blob_id);
  433. if (new_blob == NULL)
  434. return -EINVAL;
  435. if (expected_size > 0 &&
  436. new_blob->length != expected_size) {
  437. drm_property_blob_put(new_blob);
  438. return -EINVAL;
  439. }
  440. if (expected_elem_size > 0 &&
  441. new_blob->length % expected_elem_size != 0) {
  442. drm_property_blob_put(new_blob);
  443. return -EINVAL;
  444. }
  445. }
  446. *replaced |= drm_property_replace_blob(blob, new_blob);
  447. drm_property_blob_put(new_blob);
  448. return 0;
  449. }
  450. /**
  451. * drm_atomic_crtc_set_property - set property on CRTC
  452. * @crtc: the drm CRTC to set a property on
  453. * @state: the state object to update with the new property value
  454. * @property: the property to set
  455. * @val: the new property value
  456. *
  457. * This function handles generic/core properties and calls out to driver's
  458. * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure
  459. * consistent behavior you must call this function rather than the driver hook
  460. * directly.
  461. *
  462. * RETURNS:
  463. * Zero on success, error code on failure
  464. */
  465. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  466. struct drm_crtc_state *state, struct drm_property *property,
  467. uint64_t val)
  468. {
  469. struct drm_device *dev = crtc->dev;
  470. struct drm_mode_config *config = &dev->mode_config;
  471. bool replaced = false;
  472. int ret;
  473. if (property == config->prop_active)
  474. state->active = val;
  475. else if (property == config->prop_mode_id) {
  476. struct drm_property_blob *mode =
  477. drm_property_lookup_blob(dev, val);
  478. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  479. drm_property_blob_put(mode);
  480. return ret;
  481. } else if (property == config->degamma_lut_property) {
  482. ret = drm_atomic_replace_property_blob_from_id(dev,
  483. &state->degamma_lut,
  484. val,
  485. -1, sizeof(struct drm_color_lut),
  486. &replaced);
  487. state->color_mgmt_changed |= replaced;
  488. return ret;
  489. } else if (property == config->ctm_property) {
  490. ret = drm_atomic_replace_property_blob_from_id(dev,
  491. &state->ctm,
  492. val,
  493. sizeof(struct drm_color_ctm), -1,
  494. &replaced);
  495. state->color_mgmt_changed |= replaced;
  496. return ret;
  497. } else if (property == config->gamma_lut_property) {
  498. ret = drm_atomic_replace_property_blob_from_id(dev,
  499. &state->gamma_lut,
  500. val,
  501. -1, sizeof(struct drm_color_lut),
  502. &replaced);
  503. state->color_mgmt_changed |= replaced;
  504. return ret;
  505. } else if (property == config->prop_out_fence_ptr) {
  506. s32 __user *fence_ptr = u64_to_user_ptr(val);
  507. if (!fence_ptr)
  508. return 0;
  509. if (put_user(-1, fence_ptr))
  510. return -EFAULT;
  511. set_out_fence_for_crtc(state->state, crtc, fence_ptr);
  512. } else if (crtc->funcs->atomic_set_property) {
  513. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  514. } else {
  515. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
  516. crtc->base.id, crtc->name,
  517. property->base.id, property->name);
  518. return -EINVAL;
  519. }
  520. return 0;
  521. }
  522. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  523. /**
  524. * drm_atomic_crtc_get_property - get property value from CRTC state
  525. * @crtc: the drm CRTC to set a property on
  526. * @state: the state object to get the property value from
  527. * @property: the property to set
  528. * @val: return location for the property value
  529. *
  530. * This function handles generic/core properties and calls out to driver's
  531. * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure
  532. * consistent behavior you must call this function rather than the driver hook
  533. * directly.
  534. *
  535. * RETURNS:
  536. * Zero on success, error code on failure
  537. */
  538. static int
  539. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  540. const struct drm_crtc_state *state,
  541. struct drm_property *property, uint64_t *val)
  542. {
  543. struct drm_device *dev = crtc->dev;
  544. struct drm_mode_config *config = &dev->mode_config;
  545. if (property == config->prop_active)
  546. *val = state->active;
  547. else if (property == config->prop_mode_id)
  548. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  549. else if (property == config->degamma_lut_property)
  550. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  551. else if (property == config->ctm_property)
  552. *val = (state->ctm) ? state->ctm->base.id : 0;
  553. else if (property == config->gamma_lut_property)
  554. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  555. else if (property == config->prop_out_fence_ptr)
  556. *val = 0;
  557. else if (crtc->funcs->atomic_get_property)
  558. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  559. else
  560. return -EINVAL;
  561. return 0;
  562. }
  563. /**
  564. * drm_atomic_crtc_check - check crtc state
  565. * @crtc: crtc to check
  566. * @state: crtc state to check
  567. *
  568. * Provides core sanity checks for crtc state.
  569. *
  570. * RETURNS:
  571. * Zero on success, error code on failure
  572. */
  573. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  574. struct drm_crtc_state *state)
  575. {
  576. /* NOTE: we explicitly don't enforce constraints such as primary
  577. * layer covering entire screen, since that is something we want
  578. * to allow (on hw that supports it). For hw that does not, it
  579. * should be checked in driver's crtc->atomic_check() vfunc.
  580. *
  581. * TODO: Add generic modeset state checks once we support those.
  582. */
  583. if (state->active && !state->enable) {
  584. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  585. crtc->base.id, crtc->name);
  586. return -EINVAL;
  587. }
  588. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  589. * as this is a kernel-internal detail that userspace should never
  590. * be able to trigger. */
  591. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  592. WARN_ON(state->enable && !state->mode_blob)) {
  593. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  594. crtc->base.id, crtc->name);
  595. return -EINVAL;
  596. }
  597. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  598. WARN_ON(!state->enable && state->mode_blob)) {
  599. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  600. crtc->base.id, crtc->name);
  601. return -EINVAL;
  602. }
  603. /*
  604. * Reject event generation for when a CRTC is off and stays off.
  605. * It wouldn't be hard to implement this, but userspace has a track
  606. * record of happily burning through 100% cpu (or worse, crash) when the
  607. * display pipe is suspended. To avoid all that fun just reject updates
  608. * that ask for events since likely that indicates a bug in the
  609. * compositor's drawing loop. This is consistent with the vblank IOCTL
  610. * and legacy page_flip IOCTL which also reject service on a disabled
  611. * pipe.
  612. */
  613. if (state->event && !state->active && !crtc->state->active) {
  614. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
  615. crtc->base.id, crtc->name);
  616. return -EINVAL;
  617. }
  618. return 0;
  619. }
  620. static void drm_atomic_crtc_print_state(struct drm_printer *p,
  621. const struct drm_crtc_state *state)
  622. {
  623. struct drm_crtc *crtc = state->crtc;
  624. drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
  625. drm_printf(p, "\tenable=%d\n", state->enable);
  626. drm_printf(p, "\tactive=%d\n", state->active);
  627. drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
  628. drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
  629. drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
  630. drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
  631. drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
  632. drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
  633. drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
  634. drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
  635. drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
  636. if (crtc->funcs->atomic_print_state)
  637. crtc->funcs->atomic_print_state(p, state);
  638. }
  639. /**
  640. * drm_atomic_connector_check - check connector state
  641. * @connector: connector to check
  642. * @state: connector state to check
  643. *
  644. * Provides core sanity checks for connector state.
  645. *
  646. * RETURNS:
  647. * Zero on success, error code on failure
  648. */
  649. static int drm_atomic_connector_check(struct drm_connector *connector,
  650. struct drm_connector_state *state)
  651. {
  652. struct drm_crtc_state *crtc_state;
  653. struct drm_writeback_job *writeback_job = state->writeback_job;
  654. if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
  655. return 0;
  656. if (writeback_job->fb && !state->crtc) {
  657. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
  658. connector->base.id, connector->name);
  659. return -EINVAL;
  660. }
  661. if (state->crtc)
  662. crtc_state = drm_atomic_get_existing_crtc_state(state->state,
  663. state->crtc);
  664. if (writeback_job->fb && !crtc_state->active) {
  665. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
  666. connector->base.id, connector->name,
  667. state->crtc->base.id);
  668. return -EINVAL;
  669. }
  670. if (writeback_job->out_fence && !writeback_job->fb) {
  671. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
  672. connector->base.id, connector->name);
  673. return -EINVAL;
  674. }
  675. return 0;
  676. }
  677. /**
  678. * drm_atomic_get_plane_state - get plane state
  679. * @state: global atomic state object
  680. * @plane: plane to get state object for
  681. *
  682. * This function returns the plane state for the given plane, allocating it if
  683. * needed. It will also grab the relevant plane lock to make sure that the state
  684. * is consistent.
  685. *
  686. * Returns:
  687. *
  688. * Either the allocated state or the error code encoded into the pointer. When
  689. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  690. * entire atomic sequence must be restarted. All other errors are fatal.
  691. */
  692. struct drm_plane_state *
  693. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  694. struct drm_plane *plane)
  695. {
  696. int ret, index = drm_plane_index(plane);
  697. struct drm_plane_state *plane_state;
  698. WARN_ON(!state->acquire_ctx);
  699. /* the legacy pointers should never be set */
  700. WARN_ON(plane->fb);
  701. WARN_ON(plane->old_fb);
  702. WARN_ON(plane->crtc);
  703. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  704. if (plane_state)
  705. return plane_state;
  706. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  707. if (ret)
  708. return ERR_PTR(ret);
  709. plane_state = plane->funcs->atomic_duplicate_state(plane);
  710. if (!plane_state)
  711. return ERR_PTR(-ENOMEM);
  712. state->planes[index].state = plane_state;
  713. state->planes[index].ptr = plane;
  714. state->planes[index].old_state = plane->state;
  715. state->planes[index].new_state = plane_state;
  716. plane_state->state = state;
  717. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  718. plane->base.id, plane->name, plane_state, state);
  719. if (plane_state->crtc) {
  720. struct drm_crtc_state *crtc_state;
  721. crtc_state = drm_atomic_get_crtc_state(state,
  722. plane_state->crtc);
  723. if (IS_ERR(crtc_state))
  724. return ERR_CAST(crtc_state);
  725. }
  726. return plane_state;
  727. }
  728. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  729. /**
  730. * drm_atomic_plane_set_property - set property on plane
  731. * @plane: the drm plane to set a property on
  732. * @state: the state object to update with the new property value
  733. * @property: the property to set
  734. * @val: the new property value
  735. *
  736. * This function handles generic/core properties and calls out to driver's
  737. * &drm_plane_funcs.atomic_set_property for driver properties. To ensure
  738. * consistent behavior you must call this function rather than the driver hook
  739. * directly.
  740. *
  741. * RETURNS:
  742. * Zero on success, error code on failure
  743. */
  744. static int drm_atomic_plane_set_property(struct drm_plane *plane,
  745. struct drm_plane_state *state, struct drm_property *property,
  746. uint64_t val)
  747. {
  748. struct drm_device *dev = plane->dev;
  749. struct drm_mode_config *config = &dev->mode_config;
  750. if (property == config->prop_fb_id) {
  751. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  752. drm_atomic_set_fb_for_plane(state, fb);
  753. if (fb)
  754. drm_framebuffer_put(fb);
  755. } else if (property == config->prop_in_fence_fd) {
  756. if (state->fence)
  757. return -EINVAL;
  758. if (U642I64(val) == -1)
  759. return 0;
  760. state->fence = sync_file_get_fence(val);
  761. if (!state->fence)
  762. return -EINVAL;
  763. } else if (property == config->prop_crtc_id) {
  764. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  765. return drm_atomic_set_crtc_for_plane(state, crtc);
  766. } else if (property == config->prop_crtc_x) {
  767. state->crtc_x = U642I64(val);
  768. } else if (property == config->prop_crtc_y) {
  769. state->crtc_y = U642I64(val);
  770. } else if (property == config->prop_crtc_w) {
  771. state->crtc_w = val;
  772. } else if (property == config->prop_crtc_h) {
  773. state->crtc_h = val;
  774. } else if (property == config->prop_src_x) {
  775. state->src_x = val;
  776. } else if (property == config->prop_src_y) {
  777. state->src_y = val;
  778. } else if (property == config->prop_src_w) {
  779. state->src_w = val;
  780. } else if (property == config->prop_src_h) {
  781. state->src_h = val;
  782. } else if (property == plane->alpha_property) {
  783. state->alpha = val;
  784. } else if (property == plane->rotation_property) {
  785. if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
  786. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
  787. plane->base.id, plane->name, val);
  788. return -EINVAL;
  789. }
  790. state->rotation = val;
  791. } else if (property == plane->zpos_property) {
  792. state->zpos = val;
  793. } else if (property == plane->color_encoding_property) {
  794. state->color_encoding = val;
  795. } else if (property == plane->color_range_property) {
  796. state->color_range = val;
  797. } else if (plane->funcs->atomic_set_property) {
  798. return plane->funcs->atomic_set_property(plane, state,
  799. property, val);
  800. } else {
  801. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
  802. plane->base.id, plane->name,
  803. property->base.id, property->name);
  804. return -EINVAL;
  805. }
  806. return 0;
  807. }
  808. /**
  809. * drm_atomic_plane_get_property - get property value from plane state
  810. * @plane: the drm plane to set a property on
  811. * @state: the state object to get the property value from
  812. * @property: the property to set
  813. * @val: return location for the property value
  814. *
  815. * This function handles generic/core properties and calls out to driver's
  816. * &drm_plane_funcs.atomic_get_property for driver properties. To ensure
  817. * consistent behavior you must call this function rather than the driver hook
  818. * directly.
  819. *
  820. * RETURNS:
  821. * Zero on success, error code on failure
  822. */
  823. static int
  824. drm_atomic_plane_get_property(struct drm_plane *plane,
  825. const struct drm_plane_state *state,
  826. struct drm_property *property, uint64_t *val)
  827. {
  828. struct drm_device *dev = plane->dev;
  829. struct drm_mode_config *config = &dev->mode_config;
  830. if (property == config->prop_fb_id) {
  831. *val = (state->fb) ? state->fb->base.id : 0;
  832. } else if (property == config->prop_in_fence_fd) {
  833. *val = -1;
  834. } else if (property == config->prop_crtc_id) {
  835. *val = (state->crtc) ? state->crtc->base.id : 0;
  836. } else if (property == config->prop_crtc_x) {
  837. *val = I642U64(state->crtc_x);
  838. } else if (property == config->prop_crtc_y) {
  839. *val = I642U64(state->crtc_y);
  840. } else if (property == config->prop_crtc_w) {
  841. *val = state->crtc_w;
  842. } else if (property == config->prop_crtc_h) {
  843. *val = state->crtc_h;
  844. } else if (property == config->prop_src_x) {
  845. *val = state->src_x;
  846. } else if (property == config->prop_src_y) {
  847. *val = state->src_y;
  848. } else if (property == config->prop_src_w) {
  849. *val = state->src_w;
  850. } else if (property == config->prop_src_h) {
  851. *val = state->src_h;
  852. } else if (property == plane->alpha_property) {
  853. *val = state->alpha;
  854. } else if (property == plane->rotation_property) {
  855. *val = state->rotation;
  856. } else if (property == plane->zpos_property) {
  857. *val = state->zpos;
  858. } else if (property == plane->color_encoding_property) {
  859. *val = state->color_encoding;
  860. } else if (property == plane->color_range_property) {
  861. *val = state->color_range;
  862. } else if (plane->funcs->atomic_get_property) {
  863. return plane->funcs->atomic_get_property(plane, state, property, val);
  864. } else {
  865. return -EINVAL;
  866. }
  867. return 0;
  868. }
  869. static bool
  870. plane_switching_crtc(struct drm_atomic_state *state,
  871. struct drm_plane *plane,
  872. struct drm_plane_state *plane_state)
  873. {
  874. if (!plane->state->crtc || !plane_state->crtc)
  875. return false;
  876. if (plane->state->crtc == plane_state->crtc)
  877. return false;
  878. /* This could be refined, but currently there's no helper or driver code
  879. * to implement direct switching of active planes nor userspace to take
  880. * advantage of more direct plane switching without the intermediate
  881. * full OFF state.
  882. */
  883. return true;
  884. }
  885. /**
  886. * drm_atomic_plane_check - check plane state
  887. * @plane: plane to check
  888. * @state: plane state to check
  889. *
  890. * Provides core sanity checks for plane state.
  891. *
  892. * RETURNS:
  893. * Zero on success, error code on failure
  894. */
  895. static int drm_atomic_plane_check(struct drm_plane *plane,
  896. struct drm_plane_state *state)
  897. {
  898. unsigned int fb_width, fb_height;
  899. int ret;
  900. /* either *both* CRTC and FB must be set, or neither */
  901. if (state->crtc && !state->fb) {
  902. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
  903. plane->base.id, plane->name);
  904. return -EINVAL;
  905. } else if (state->fb && !state->crtc) {
  906. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
  907. plane->base.id, plane->name);
  908. return -EINVAL;
  909. }
  910. /* if disabled, we don't care about the rest of the state: */
  911. if (!state->crtc)
  912. return 0;
  913. /* Check whether this plane is usable on this CRTC */
  914. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  915. DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
  916. state->crtc->base.id, state->crtc->name,
  917. plane->base.id, plane->name);
  918. return -EINVAL;
  919. }
  920. /* Check whether this plane supports the fb pixel format. */
  921. ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
  922. state->fb->modifier);
  923. if (ret) {
  924. struct drm_format_name_buf format_name;
  925. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
  926. plane->base.id, plane->name,
  927. drm_get_format_name(state->fb->format->format,
  928. &format_name),
  929. state->fb->modifier);
  930. return ret;
  931. }
  932. /* Give drivers some help against integer overflows */
  933. if (state->crtc_w > INT_MAX ||
  934. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  935. state->crtc_h > INT_MAX ||
  936. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  937. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
  938. plane->base.id, plane->name,
  939. state->crtc_w, state->crtc_h,
  940. state->crtc_x, state->crtc_y);
  941. return -ERANGE;
  942. }
  943. fb_width = state->fb->width << 16;
  944. fb_height = state->fb->height << 16;
  945. /* Make sure source coordinates are inside the fb. */
  946. if (state->src_w > fb_width ||
  947. state->src_x > fb_width - state->src_w ||
  948. state->src_h > fb_height ||
  949. state->src_y > fb_height - state->src_h) {
  950. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
  951. "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
  952. plane->base.id, plane->name,
  953. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  954. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  955. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  956. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
  957. state->fb->width, state->fb->height);
  958. return -ENOSPC;
  959. }
  960. if (plane_switching_crtc(state->state, plane, state)) {
  961. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  962. plane->base.id, plane->name);
  963. return -EINVAL;
  964. }
  965. return 0;
  966. }
  967. static void drm_atomic_plane_print_state(struct drm_printer *p,
  968. const struct drm_plane_state *state)
  969. {
  970. struct drm_plane *plane = state->plane;
  971. struct drm_rect src = drm_plane_state_src(state);
  972. struct drm_rect dest = drm_plane_state_dest(state);
  973. drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
  974. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  975. drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
  976. if (state->fb)
  977. drm_framebuffer_print_info(p, 2, state->fb);
  978. drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
  979. drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
  980. drm_printf(p, "\trotation=%x\n", state->rotation);
  981. drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
  982. drm_printf(p, "\tcolor-encoding=%s\n",
  983. drm_get_color_encoding_name(state->color_encoding));
  984. drm_printf(p, "\tcolor-range=%s\n",
  985. drm_get_color_range_name(state->color_range));
  986. if (plane->funcs->atomic_print_state)
  987. plane->funcs->atomic_print_state(p, state);
  988. }
  989. /**
  990. * DOC: handling driver private state
  991. *
  992. * Very often the DRM objects exposed to userspace in the atomic modeset api
  993. * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
  994. * underlying hardware. Especially for any kind of shared resources (e.g. shared
  995. * clocks, scaler units, bandwidth and fifo limits shared among a group of
  996. * planes or CRTCs, and so on) it makes sense to model these as independent
  997. * objects. Drivers then need to do similar state tracking and commit ordering for
  998. * such private (since not exposed to userpace) objects as the atomic core and
  999. * helpers already provide for connectors, planes and CRTCs.
  1000. *
  1001. * To make this easier on drivers the atomic core provides some support to track
  1002. * driver private state objects using struct &drm_private_obj, with the
  1003. * associated state struct &drm_private_state.
  1004. *
  1005. * Similar to userspace-exposed objects, private state structures can be
  1006. * acquired by calling drm_atomic_get_private_obj_state(). Since this function
  1007. * does not take care of locking, drivers should wrap it for each type of
  1008. * private state object they have with the required call to drm_modeset_lock()
  1009. * for the corresponding &drm_modeset_lock.
  1010. *
  1011. * All private state structures contained in a &drm_atomic_state update can be
  1012. * iterated using for_each_oldnew_private_obj_in_state(),
  1013. * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
  1014. * Drivers are recommended to wrap these for each type of driver private state
  1015. * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
  1016. * least if they want to iterate over all objects of a given type.
  1017. *
  1018. * An earlier way to handle driver private state was by subclassing struct
  1019. * &drm_atomic_state. But since that encourages non-standard ways to implement
  1020. * the check/commit split atomic requires (by using e.g. "check and rollback or
  1021. * commit instead" of "duplicate state, check, then either commit or release
  1022. * duplicated state) it is deprecated in favour of using &drm_private_state.
  1023. */
  1024. /**
  1025. * drm_atomic_private_obj_init - initialize private object
  1026. * @obj: private object
  1027. * @state: initial private object state
  1028. * @funcs: pointer to the struct of function pointers that identify the object
  1029. * type
  1030. *
  1031. * Initialize the private object, which can be embedded into any
  1032. * driver private object that needs its own atomic state.
  1033. */
  1034. void
  1035. drm_atomic_private_obj_init(struct drm_private_obj *obj,
  1036. struct drm_private_state *state,
  1037. const struct drm_private_state_funcs *funcs)
  1038. {
  1039. memset(obj, 0, sizeof(*obj));
  1040. obj->state = state;
  1041. obj->funcs = funcs;
  1042. }
  1043. EXPORT_SYMBOL(drm_atomic_private_obj_init);
  1044. /**
  1045. * drm_atomic_private_obj_fini - finalize private object
  1046. * @obj: private object
  1047. *
  1048. * Finalize the private object.
  1049. */
  1050. void
  1051. drm_atomic_private_obj_fini(struct drm_private_obj *obj)
  1052. {
  1053. obj->funcs->atomic_destroy_state(obj, obj->state);
  1054. }
  1055. EXPORT_SYMBOL(drm_atomic_private_obj_fini);
  1056. /**
  1057. * drm_atomic_get_private_obj_state - get private object state
  1058. * @state: global atomic state
  1059. * @obj: private object to get the state for
  1060. *
  1061. * This function returns the private object state for the given private object,
  1062. * allocating the state if needed. It does not grab any locks as the caller is
  1063. * expected to care of any required locking.
  1064. *
  1065. * RETURNS:
  1066. *
  1067. * Either the allocated state or the error code encoded into a pointer.
  1068. */
  1069. struct drm_private_state *
  1070. drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
  1071. struct drm_private_obj *obj)
  1072. {
  1073. int index, num_objs, i;
  1074. size_t size;
  1075. struct __drm_private_objs_state *arr;
  1076. struct drm_private_state *obj_state;
  1077. for (i = 0; i < state->num_private_objs; i++)
  1078. if (obj == state->private_objs[i].ptr)
  1079. return state->private_objs[i].state;
  1080. num_objs = state->num_private_objs + 1;
  1081. size = sizeof(*state->private_objs) * num_objs;
  1082. arr = krealloc(state->private_objs, size, GFP_KERNEL);
  1083. if (!arr)
  1084. return ERR_PTR(-ENOMEM);
  1085. state->private_objs = arr;
  1086. index = state->num_private_objs;
  1087. memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
  1088. obj_state = obj->funcs->atomic_duplicate_state(obj);
  1089. if (!obj_state)
  1090. return ERR_PTR(-ENOMEM);
  1091. state->private_objs[index].state = obj_state;
  1092. state->private_objs[index].old_state = obj->state;
  1093. state->private_objs[index].new_state = obj_state;
  1094. state->private_objs[index].ptr = obj;
  1095. obj_state->state = state;
  1096. state->num_private_objs = num_objs;
  1097. DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
  1098. obj, obj_state, state);
  1099. return obj_state;
  1100. }
  1101. EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
  1102. /**
  1103. * drm_atomic_get_connector_state - get connector state
  1104. * @state: global atomic state object
  1105. * @connector: connector to get state object for
  1106. *
  1107. * This function returns the connector state for the given connector,
  1108. * allocating it if needed. It will also grab the relevant connector lock to
  1109. * make sure that the state is consistent.
  1110. *
  1111. * Returns:
  1112. *
  1113. * Either the allocated state or the error code encoded into the pointer. When
  1114. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  1115. * entire atomic sequence must be restarted. All other errors are fatal.
  1116. */
  1117. struct drm_connector_state *
  1118. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  1119. struct drm_connector *connector)
  1120. {
  1121. int ret, index;
  1122. struct drm_mode_config *config = &connector->dev->mode_config;
  1123. struct drm_connector_state *connector_state;
  1124. WARN_ON(!state->acquire_ctx);
  1125. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1126. if (ret)
  1127. return ERR_PTR(ret);
  1128. index = drm_connector_index(connector);
  1129. if (index >= state->num_connector) {
  1130. struct __drm_connnectors_state *c;
  1131. int alloc = max(index + 1, config->num_connector);
  1132. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  1133. if (!c)
  1134. return ERR_PTR(-ENOMEM);
  1135. state->connectors = c;
  1136. memset(&state->connectors[state->num_connector], 0,
  1137. sizeof(*state->connectors) * (alloc - state->num_connector));
  1138. state->num_connector = alloc;
  1139. }
  1140. if (state->connectors[index].state)
  1141. return state->connectors[index].state;
  1142. connector_state = connector->funcs->atomic_duplicate_state(connector);
  1143. if (!connector_state)
  1144. return ERR_PTR(-ENOMEM);
  1145. drm_connector_get(connector);
  1146. state->connectors[index].state = connector_state;
  1147. state->connectors[index].old_state = connector->state;
  1148. state->connectors[index].new_state = connector_state;
  1149. state->connectors[index].ptr = connector;
  1150. connector_state->state = state;
  1151. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
  1152. connector->base.id, connector->name,
  1153. connector_state, state);
  1154. if (connector_state->crtc) {
  1155. struct drm_crtc_state *crtc_state;
  1156. crtc_state = drm_atomic_get_crtc_state(state,
  1157. connector_state->crtc);
  1158. if (IS_ERR(crtc_state))
  1159. return ERR_CAST(crtc_state);
  1160. }
  1161. return connector_state;
  1162. }
  1163. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  1164. /**
  1165. * drm_atomic_connector_set_property - set property on connector.
  1166. * @connector: the drm connector to set a property on
  1167. * @state: the state object to update with the new property value
  1168. * @property: the property to set
  1169. * @val: the new property value
  1170. *
  1171. * This function handles generic/core properties and calls out to driver's
  1172. * &drm_connector_funcs.atomic_set_property for driver properties. To ensure
  1173. * consistent behavior you must call this function rather than the driver hook
  1174. * directly.
  1175. *
  1176. * RETURNS:
  1177. * Zero on success, error code on failure
  1178. */
  1179. static int drm_atomic_connector_set_property(struct drm_connector *connector,
  1180. struct drm_connector_state *state, struct drm_property *property,
  1181. uint64_t val)
  1182. {
  1183. struct drm_device *dev = connector->dev;
  1184. struct drm_mode_config *config = &dev->mode_config;
  1185. if (property == config->prop_crtc_id) {
  1186. struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
  1187. return drm_atomic_set_crtc_for_connector(state, crtc);
  1188. } else if (property == config->dpms_property) {
  1189. /* setting DPMS property requires special handling, which
  1190. * is done in legacy setprop path for us. Disallow (for
  1191. * now?) atomic writes to DPMS property:
  1192. */
  1193. return -EINVAL;
  1194. } else if (property == config->tv_select_subconnector_property) {
  1195. state->tv.subconnector = val;
  1196. } else if (property == config->tv_left_margin_property) {
  1197. state->tv.margins.left = val;
  1198. } else if (property == config->tv_right_margin_property) {
  1199. state->tv.margins.right = val;
  1200. } else if (property == config->tv_top_margin_property) {
  1201. state->tv.margins.top = val;
  1202. } else if (property == config->tv_bottom_margin_property) {
  1203. state->tv.margins.bottom = val;
  1204. } else if (property == config->tv_mode_property) {
  1205. state->tv.mode = val;
  1206. } else if (property == config->tv_brightness_property) {
  1207. state->tv.brightness = val;
  1208. } else if (property == config->tv_contrast_property) {
  1209. state->tv.contrast = val;
  1210. } else if (property == config->tv_flicker_reduction_property) {
  1211. state->tv.flicker_reduction = val;
  1212. } else if (property == config->tv_overscan_property) {
  1213. state->tv.overscan = val;
  1214. } else if (property == config->tv_saturation_property) {
  1215. state->tv.saturation = val;
  1216. } else if (property == config->tv_hue_property) {
  1217. state->tv.hue = val;
  1218. } else if (property == config->link_status_property) {
  1219. /* Never downgrade from GOOD to BAD on userspace's request here,
  1220. * only hw issues can do that.
  1221. *
  1222. * For an atomic property the userspace doesn't need to be able
  1223. * to understand all the properties, but needs to be able to
  1224. * restore the state it wants on VT switch. So if the userspace
  1225. * tries to change the link_status from GOOD to BAD, driver
  1226. * silently rejects it and returns a 0. This prevents userspace
  1227. * from accidently breaking the display when it restores the
  1228. * state.
  1229. */
  1230. if (state->link_status != DRM_LINK_STATUS_GOOD)
  1231. state->link_status = val;
  1232. } else if (property == config->aspect_ratio_property) {
  1233. state->picture_aspect_ratio = val;
  1234. } else if (property == config->content_type_property) {
  1235. state->content_type = val;
  1236. } else if (property == connector->scaling_mode_property) {
  1237. state->scaling_mode = val;
  1238. } else if (property == connector->content_protection_property) {
  1239. if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
  1240. DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
  1241. return -EINVAL;
  1242. }
  1243. state->content_protection = val;
  1244. } else if (property == config->writeback_fb_id_property) {
  1245. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
  1246. int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
  1247. if (fb)
  1248. drm_framebuffer_put(fb);
  1249. return ret;
  1250. } else if (property == config->writeback_out_fence_ptr_property) {
  1251. s32 __user *fence_ptr = u64_to_user_ptr(val);
  1252. return set_out_fence_for_connector(state->state, connector,
  1253. fence_ptr);
  1254. } else if (connector->funcs->atomic_set_property) {
  1255. return connector->funcs->atomic_set_property(connector,
  1256. state, property, val);
  1257. } else {
  1258. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
  1259. connector->base.id, connector->name,
  1260. property->base.id, property->name);
  1261. return -EINVAL;
  1262. }
  1263. return 0;
  1264. }
  1265. static void drm_atomic_connector_print_state(struct drm_printer *p,
  1266. const struct drm_connector_state *state)
  1267. {
  1268. struct drm_connector *connector = state->connector;
  1269. drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
  1270. drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
  1271. if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
  1272. if (state->writeback_job && state->writeback_job->fb)
  1273. drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
  1274. if (connector->funcs->atomic_print_state)
  1275. connector->funcs->atomic_print_state(p, state);
  1276. }
  1277. /**
  1278. * drm_atomic_connector_get_property - get property value from connector state
  1279. * @connector: the drm connector to set a property on
  1280. * @state: the state object to get the property value from
  1281. * @property: the property to set
  1282. * @val: return location for the property value
  1283. *
  1284. * This function handles generic/core properties and calls out to driver's
  1285. * &drm_connector_funcs.atomic_get_property for driver properties. To ensure
  1286. * consistent behavior you must call this function rather than the driver hook
  1287. * directly.
  1288. *
  1289. * RETURNS:
  1290. * Zero on success, error code on failure
  1291. */
  1292. static int
  1293. drm_atomic_connector_get_property(struct drm_connector *connector,
  1294. const struct drm_connector_state *state,
  1295. struct drm_property *property, uint64_t *val)
  1296. {
  1297. struct drm_device *dev = connector->dev;
  1298. struct drm_mode_config *config = &dev->mode_config;
  1299. if (property == config->prop_crtc_id) {
  1300. *val = (state->crtc) ? state->crtc->base.id : 0;
  1301. } else if (property == config->dpms_property) {
  1302. *val = connector->dpms;
  1303. } else if (property == config->tv_select_subconnector_property) {
  1304. *val = state->tv.subconnector;
  1305. } else if (property == config->tv_left_margin_property) {
  1306. *val = state->tv.margins.left;
  1307. } else if (property == config->tv_right_margin_property) {
  1308. *val = state->tv.margins.right;
  1309. } else if (property == config->tv_top_margin_property) {
  1310. *val = state->tv.margins.top;
  1311. } else if (property == config->tv_bottom_margin_property) {
  1312. *val = state->tv.margins.bottom;
  1313. } else if (property == config->tv_mode_property) {
  1314. *val = state->tv.mode;
  1315. } else if (property == config->tv_brightness_property) {
  1316. *val = state->tv.brightness;
  1317. } else if (property == config->tv_contrast_property) {
  1318. *val = state->tv.contrast;
  1319. } else if (property == config->tv_flicker_reduction_property) {
  1320. *val = state->tv.flicker_reduction;
  1321. } else if (property == config->tv_overscan_property) {
  1322. *val = state->tv.overscan;
  1323. } else if (property == config->tv_saturation_property) {
  1324. *val = state->tv.saturation;
  1325. } else if (property == config->tv_hue_property) {
  1326. *val = state->tv.hue;
  1327. } else if (property == config->link_status_property) {
  1328. *val = state->link_status;
  1329. } else if (property == config->aspect_ratio_property) {
  1330. *val = state->picture_aspect_ratio;
  1331. } else if (property == config->content_type_property) {
  1332. *val = state->content_type;
  1333. } else if (property == connector->scaling_mode_property) {
  1334. *val = state->scaling_mode;
  1335. } else if (property == connector->content_protection_property) {
  1336. *val = state->content_protection;
  1337. } else if (property == config->writeback_fb_id_property) {
  1338. /* Writeback framebuffer is one-shot, write and forget */
  1339. *val = 0;
  1340. } else if (property == config->writeback_out_fence_ptr_property) {
  1341. *val = 0;
  1342. } else if (connector->funcs->atomic_get_property) {
  1343. return connector->funcs->atomic_get_property(connector,
  1344. state, property, val);
  1345. } else {
  1346. return -EINVAL;
  1347. }
  1348. return 0;
  1349. }
  1350. int drm_atomic_get_property(struct drm_mode_object *obj,
  1351. struct drm_property *property, uint64_t *val)
  1352. {
  1353. struct drm_device *dev = property->dev;
  1354. int ret;
  1355. switch (obj->type) {
  1356. case DRM_MODE_OBJECT_CONNECTOR: {
  1357. struct drm_connector *connector = obj_to_connector(obj);
  1358. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  1359. ret = drm_atomic_connector_get_property(connector,
  1360. connector->state, property, val);
  1361. break;
  1362. }
  1363. case DRM_MODE_OBJECT_CRTC: {
  1364. struct drm_crtc *crtc = obj_to_crtc(obj);
  1365. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  1366. ret = drm_atomic_crtc_get_property(crtc,
  1367. crtc->state, property, val);
  1368. break;
  1369. }
  1370. case DRM_MODE_OBJECT_PLANE: {
  1371. struct drm_plane *plane = obj_to_plane(obj);
  1372. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  1373. ret = drm_atomic_plane_get_property(plane,
  1374. plane->state, property, val);
  1375. break;
  1376. }
  1377. default:
  1378. ret = -EINVAL;
  1379. break;
  1380. }
  1381. return ret;
  1382. }
  1383. /**
  1384. * drm_atomic_set_crtc_for_plane - set crtc for plane
  1385. * @plane_state: the plane whose incoming state to update
  1386. * @crtc: crtc to use for the plane
  1387. *
  1388. * Changing the assigned crtc for a plane requires us to grab the lock and state
  1389. * for the new crtc, as needed. This function takes care of all these details
  1390. * besides updating the pointer in the state object itself.
  1391. *
  1392. * Returns:
  1393. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1394. * then the w/w mutex code has detected a deadlock and the entire atomic
  1395. * sequence must be restarted. All other errors are fatal.
  1396. */
  1397. int
  1398. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  1399. struct drm_crtc *crtc)
  1400. {
  1401. struct drm_plane *plane = plane_state->plane;
  1402. struct drm_crtc_state *crtc_state;
  1403. /* Nothing to do for same crtc*/
  1404. if (plane_state->crtc == crtc)
  1405. return 0;
  1406. if (plane_state->crtc) {
  1407. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1408. plane_state->crtc);
  1409. if (WARN_ON(IS_ERR(crtc_state)))
  1410. return PTR_ERR(crtc_state);
  1411. crtc_state->plane_mask &= ~drm_plane_mask(plane);
  1412. }
  1413. plane_state->crtc = crtc;
  1414. if (crtc) {
  1415. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  1416. crtc);
  1417. if (IS_ERR(crtc_state))
  1418. return PTR_ERR(crtc_state);
  1419. crtc_state->plane_mask |= drm_plane_mask(plane);
  1420. }
  1421. if (crtc)
  1422. DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
  1423. plane->base.id, plane->name, plane_state,
  1424. crtc->base.id, crtc->name);
  1425. else
  1426. DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
  1427. plane->base.id, plane->name, plane_state);
  1428. return 0;
  1429. }
  1430. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  1431. /**
  1432. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  1433. * @plane_state: atomic state object for the plane
  1434. * @fb: fb to use for the plane
  1435. *
  1436. * Changing the assigned framebuffer for a plane requires us to grab a reference
  1437. * to the new fb and drop the reference to the old fb, if there is one. This
  1438. * function takes care of all these details besides updating the pointer in the
  1439. * state object itself.
  1440. */
  1441. void
  1442. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1443. struct drm_framebuffer *fb)
  1444. {
  1445. struct drm_plane *plane = plane_state->plane;
  1446. if (fb)
  1447. DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
  1448. fb->base.id, plane->base.id, plane->name,
  1449. plane_state);
  1450. else
  1451. DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
  1452. plane->base.id, plane->name, plane_state);
  1453. drm_framebuffer_assign(&plane_state->fb, fb);
  1454. }
  1455. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1456. /**
  1457. * drm_atomic_set_fence_for_plane - set fence for plane
  1458. * @plane_state: atomic state object for the plane
  1459. * @fence: dma_fence to use for the plane
  1460. *
  1461. * Helper to setup the plane_state fence in case it is not set yet.
  1462. * By using this drivers doesn't need to worry if the user choose
  1463. * implicit or explicit fencing.
  1464. *
  1465. * This function will not set the fence to the state if it was set
  1466. * via explicit fencing interfaces on the atomic ioctl. In that case it will
  1467. * drop the reference to the fence as we are not storing it anywhere.
  1468. * Otherwise, if &drm_plane_state.fence is not set this function we just set it
  1469. * with the received implicit fence. In both cases this function consumes a
  1470. * reference for @fence.
  1471. *
  1472. * This way explicit fencing can be used to overrule implicit fencing, which is
  1473. * important to make explicit fencing use-cases work: One example is using one
  1474. * buffer for 2 screens with different refresh rates. Implicit fencing will
  1475. * clamp rendering to the refresh rate of the slower screen, whereas explicit
  1476. * fence allows 2 independent render and display loops on a single buffer. If a
  1477. * driver allows obeys both implicit and explicit fences for plane updates, then
  1478. * it will break all the benefits of explicit fencing.
  1479. */
  1480. void
  1481. drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
  1482. struct dma_fence *fence)
  1483. {
  1484. if (plane_state->fence) {
  1485. dma_fence_put(fence);
  1486. return;
  1487. }
  1488. plane_state->fence = fence;
  1489. }
  1490. EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
  1491. /**
  1492. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1493. * @conn_state: atomic state object for the connector
  1494. * @crtc: crtc to use for the connector
  1495. *
  1496. * Changing the assigned crtc for a connector requires us to grab the lock and
  1497. * state for the new crtc, as needed. This function takes care of all these
  1498. * details besides updating the pointer in the state object itself.
  1499. *
  1500. * Returns:
  1501. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1502. * then the w/w mutex code has detected a deadlock and the entire atomic
  1503. * sequence must be restarted. All other errors are fatal.
  1504. */
  1505. int
  1506. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1507. struct drm_crtc *crtc)
  1508. {
  1509. struct drm_connector *connector = conn_state->connector;
  1510. struct drm_crtc_state *crtc_state;
  1511. if (conn_state->crtc == crtc)
  1512. return 0;
  1513. if (conn_state->crtc) {
  1514. crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
  1515. conn_state->crtc);
  1516. crtc_state->connector_mask &=
  1517. ~drm_connector_mask(conn_state->connector);
  1518. drm_connector_put(conn_state->connector);
  1519. conn_state->crtc = NULL;
  1520. }
  1521. if (crtc) {
  1522. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1523. if (IS_ERR(crtc_state))
  1524. return PTR_ERR(crtc_state);
  1525. crtc_state->connector_mask |=
  1526. drm_connector_mask(conn_state->connector);
  1527. drm_connector_get(conn_state->connector);
  1528. conn_state->crtc = crtc;
  1529. DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
  1530. connector->base.id, connector->name,
  1531. conn_state, crtc->base.id, crtc->name);
  1532. } else {
  1533. DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
  1534. connector->base.id, connector->name,
  1535. conn_state);
  1536. }
  1537. return 0;
  1538. }
  1539. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1540. /*
  1541. * drm_atomic_get_writeback_job - return or allocate a writeback job
  1542. * @conn_state: Connector state to get the job for
  1543. *
  1544. * Writeback jobs have a different lifetime to the atomic state they are
  1545. * associated with. This convenience function takes care of allocating a job
  1546. * if there isn't yet one associated with the connector state, otherwise
  1547. * it just returns the existing job.
  1548. *
  1549. * Returns: The writeback job for the given connector state
  1550. */
  1551. static struct drm_writeback_job *
  1552. drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
  1553. {
  1554. WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
  1555. if (!conn_state->writeback_job)
  1556. conn_state->writeback_job =
  1557. kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
  1558. return conn_state->writeback_job;
  1559. }
  1560. /**
  1561. * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
  1562. * @conn_state: atomic state object for the connector
  1563. * @fb: fb to use for the connector
  1564. *
  1565. * This is used to set the framebuffer for a writeback connector, which outputs
  1566. * to a buffer instead of an actual physical connector.
  1567. * Changing the assigned framebuffer requires us to grab a reference to the new
  1568. * fb and drop the reference to the old fb, if there is one. This function
  1569. * takes care of all these details besides updating the pointer in the
  1570. * state object itself.
  1571. *
  1572. * Note: The only way conn_state can already have an fb set is if the commit
  1573. * sets the property more than once.
  1574. *
  1575. * See also: drm_writeback_connector_init()
  1576. *
  1577. * Returns: 0 on success
  1578. */
  1579. int drm_atomic_set_writeback_fb_for_connector(
  1580. struct drm_connector_state *conn_state,
  1581. struct drm_framebuffer *fb)
  1582. {
  1583. struct drm_writeback_job *job =
  1584. drm_atomic_get_writeback_job(conn_state);
  1585. if (!job)
  1586. return -ENOMEM;
  1587. drm_framebuffer_assign(&job->fb, fb);
  1588. if (fb)
  1589. DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
  1590. fb->base.id, conn_state);
  1591. else
  1592. DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
  1593. conn_state);
  1594. return 0;
  1595. }
  1596. EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
  1597. /**
  1598. * drm_atomic_add_affected_connectors - add connectors for crtc
  1599. * @state: atomic state
  1600. * @crtc: DRM crtc
  1601. *
  1602. * This function walks the current configuration and adds all connectors
  1603. * currently using @crtc to the atomic configuration @state. Note that this
  1604. * function must acquire the connection mutex. This can potentially cause
  1605. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1606. * drivers and helpers should only call this when really needed (e.g. when a
  1607. * full modeset needs to happen due to some change).
  1608. *
  1609. * Returns:
  1610. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1611. * then the w/w mutex code has detected a deadlock and the entire atomic
  1612. * sequence must be restarted. All other errors are fatal.
  1613. */
  1614. int
  1615. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1616. struct drm_crtc *crtc)
  1617. {
  1618. struct drm_mode_config *config = &state->dev->mode_config;
  1619. struct drm_connector *connector;
  1620. struct drm_connector_state *conn_state;
  1621. struct drm_connector_list_iter conn_iter;
  1622. struct drm_crtc_state *crtc_state;
  1623. int ret;
  1624. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1625. if (IS_ERR(crtc_state))
  1626. return PTR_ERR(crtc_state);
  1627. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1628. if (ret)
  1629. return ret;
  1630. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1631. crtc->base.id, crtc->name, state);
  1632. /*
  1633. * Changed connectors are already in @state, so only need to look
  1634. * at the connector_mask in crtc_state.
  1635. */
  1636. drm_connector_list_iter_begin(state->dev, &conn_iter);
  1637. drm_for_each_connector_iter(connector, &conn_iter) {
  1638. if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
  1639. continue;
  1640. conn_state = drm_atomic_get_connector_state(state, connector);
  1641. if (IS_ERR(conn_state)) {
  1642. drm_connector_list_iter_end(&conn_iter);
  1643. return PTR_ERR(conn_state);
  1644. }
  1645. }
  1646. drm_connector_list_iter_end(&conn_iter);
  1647. return 0;
  1648. }
  1649. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1650. /**
  1651. * drm_atomic_add_affected_planes - add planes for crtc
  1652. * @state: atomic state
  1653. * @crtc: DRM crtc
  1654. *
  1655. * This function walks the current configuration and adds all planes
  1656. * currently used by @crtc to the atomic configuration @state. This is useful
  1657. * when an atomic commit also needs to check all currently enabled plane on
  1658. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1659. * to avoid special code to force-enable all planes.
  1660. *
  1661. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1662. * current CRTC for that plane (if there is any) adding all the plane states for
  1663. * a CRTC will not reduce parallism of atomic updates.
  1664. *
  1665. * Returns:
  1666. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1667. * then the w/w mutex code has detected a deadlock and the entire atomic
  1668. * sequence must be restarted. All other errors are fatal.
  1669. */
  1670. int
  1671. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1672. struct drm_crtc *crtc)
  1673. {
  1674. struct drm_plane *plane;
  1675. WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
  1676. DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
  1677. crtc->base.id, crtc->name, state);
  1678. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1679. struct drm_plane_state *plane_state =
  1680. drm_atomic_get_plane_state(state, plane);
  1681. if (IS_ERR(plane_state))
  1682. return PTR_ERR(plane_state);
  1683. }
  1684. return 0;
  1685. }
  1686. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1687. /**
  1688. * drm_atomic_check_only - check whether a given config would work
  1689. * @state: atomic configuration to check
  1690. *
  1691. * Note that this function can return -EDEADLK if the driver needed to acquire
  1692. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1693. * backoff dance and restart. All other errors are fatal.
  1694. *
  1695. * Returns:
  1696. * 0 on success, negative error code on failure.
  1697. */
  1698. int drm_atomic_check_only(struct drm_atomic_state *state)
  1699. {
  1700. struct drm_device *dev = state->dev;
  1701. struct drm_mode_config *config = &dev->mode_config;
  1702. struct drm_plane *plane;
  1703. struct drm_plane_state *plane_state;
  1704. struct drm_crtc *crtc;
  1705. struct drm_crtc_state *crtc_state;
  1706. struct drm_connector *conn;
  1707. struct drm_connector_state *conn_state;
  1708. int i, ret = 0;
  1709. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1710. for_each_new_plane_in_state(state, plane, plane_state, i) {
  1711. ret = drm_atomic_plane_check(plane, plane_state);
  1712. if (ret) {
  1713. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1714. plane->base.id, plane->name);
  1715. return ret;
  1716. }
  1717. }
  1718. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1719. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1720. if (ret) {
  1721. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1722. crtc->base.id, crtc->name);
  1723. return ret;
  1724. }
  1725. }
  1726. for_each_new_connector_in_state(state, conn, conn_state, i) {
  1727. ret = drm_atomic_connector_check(conn, conn_state);
  1728. if (ret) {
  1729. DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
  1730. conn->base.id, conn->name);
  1731. return ret;
  1732. }
  1733. }
  1734. if (config->funcs->atomic_check) {
  1735. ret = config->funcs->atomic_check(state->dev, state);
  1736. if (ret) {
  1737. DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
  1738. state, ret);
  1739. return ret;
  1740. }
  1741. }
  1742. if (!state->allow_modeset) {
  1743. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  1744. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1745. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1746. crtc->base.id, crtc->name);
  1747. return -EINVAL;
  1748. }
  1749. }
  1750. }
  1751. return 0;
  1752. }
  1753. EXPORT_SYMBOL(drm_atomic_check_only);
  1754. /**
  1755. * drm_atomic_commit - commit configuration atomically
  1756. * @state: atomic configuration to check
  1757. *
  1758. * Note that this function can return -EDEADLK if the driver needed to acquire
  1759. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1760. * backoff dance and restart. All other errors are fatal.
  1761. *
  1762. * This function will take its own reference on @state.
  1763. * Callers should always release their reference with drm_atomic_state_put().
  1764. *
  1765. * Returns:
  1766. * 0 on success, negative error code on failure.
  1767. */
  1768. int drm_atomic_commit(struct drm_atomic_state *state)
  1769. {
  1770. struct drm_mode_config *config = &state->dev->mode_config;
  1771. int ret;
  1772. ret = drm_atomic_check_only(state);
  1773. if (ret)
  1774. return ret;
  1775. DRM_DEBUG_ATOMIC("committing %p\n", state);
  1776. return config->funcs->atomic_commit(state->dev, state, false);
  1777. }
  1778. EXPORT_SYMBOL(drm_atomic_commit);
  1779. /**
  1780. * drm_atomic_nonblocking_commit - atomic nonblocking commit
  1781. * @state: atomic configuration to check
  1782. *
  1783. * Note that this function can return -EDEADLK if the driver needed to acquire
  1784. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1785. * backoff dance and restart. All other errors are fatal.
  1786. *
  1787. * This function will take its own reference on @state.
  1788. * Callers should always release their reference with drm_atomic_state_put().
  1789. *
  1790. * Returns:
  1791. * 0 on success, negative error code on failure.
  1792. */
  1793. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1794. {
  1795. struct drm_mode_config *config = &state->dev->mode_config;
  1796. int ret;
  1797. ret = drm_atomic_check_only(state);
  1798. if (ret)
  1799. return ret;
  1800. DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
  1801. return config->funcs->atomic_commit(state->dev, state, true);
  1802. }
  1803. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1804. static void drm_atomic_print_state(const struct drm_atomic_state *state)
  1805. {
  1806. struct drm_printer p = drm_info_printer(state->dev->dev);
  1807. struct drm_plane *plane;
  1808. struct drm_plane_state *plane_state;
  1809. struct drm_crtc *crtc;
  1810. struct drm_crtc_state *crtc_state;
  1811. struct drm_connector *connector;
  1812. struct drm_connector_state *connector_state;
  1813. int i;
  1814. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1815. for_each_new_plane_in_state(state, plane, plane_state, i)
  1816. drm_atomic_plane_print_state(&p, plane_state);
  1817. for_each_new_crtc_in_state(state, crtc, crtc_state, i)
  1818. drm_atomic_crtc_print_state(&p, crtc_state);
  1819. for_each_new_connector_in_state(state, connector, connector_state, i)
  1820. drm_atomic_connector_print_state(&p, connector_state);
  1821. }
  1822. static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
  1823. bool take_locks)
  1824. {
  1825. struct drm_mode_config *config = &dev->mode_config;
  1826. struct drm_plane *plane;
  1827. struct drm_crtc *crtc;
  1828. struct drm_connector *connector;
  1829. struct drm_connector_list_iter conn_iter;
  1830. if (!drm_drv_uses_atomic_modeset(dev))
  1831. return;
  1832. list_for_each_entry(plane, &config->plane_list, head) {
  1833. if (take_locks)
  1834. drm_modeset_lock(&plane->mutex, NULL);
  1835. drm_atomic_plane_print_state(p, plane->state);
  1836. if (take_locks)
  1837. drm_modeset_unlock(&plane->mutex);
  1838. }
  1839. list_for_each_entry(crtc, &config->crtc_list, head) {
  1840. if (take_locks)
  1841. drm_modeset_lock(&crtc->mutex, NULL);
  1842. drm_atomic_crtc_print_state(p, crtc->state);
  1843. if (take_locks)
  1844. drm_modeset_unlock(&crtc->mutex);
  1845. }
  1846. drm_connector_list_iter_begin(dev, &conn_iter);
  1847. if (take_locks)
  1848. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  1849. drm_for_each_connector_iter(connector, &conn_iter)
  1850. drm_atomic_connector_print_state(p, connector->state);
  1851. if (take_locks)
  1852. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  1853. drm_connector_list_iter_end(&conn_iter);
  1854. }
  1855. /**
  1856. * drm_state_dump - dump entire device atomic state
  1857. * @dev: the drm device
  1858. * @p: where to print the state to
  1859. *
  1860. * Just for debugging. Drivers might want an option to dump state
  1861. * to dmesg in case of error irq's. (Hint, you probably want to
  1862. * ratelimit this!)
  1863. *
  1864. * The caller must drm_modeset_lock_all(), or if this is called
  1865. * from error irq handler, it should not be enabled by default.
  1866. * (Ie. if you are debugging errors you might not care that this
  1867. * is racey. But calling this without all modeset locks held is
  1868. * not inherently safe.)
  1869. */
  1870. void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
  1871. {
  1872. __drm_state_dump(dev, p, false);
  1873. }
  1874. EXPORT_SYMBOL(drm_state_dump);
  1875. #ifdef CONFIG_DEBUG_FS
  1876. static int drm_state_info(struct seq_file *m, void *data)
  1877. {
  1878. struct drm_info_node *node = (struct drm_info_node *) m->private;
  1879. struct drm_device *dev = node->minor->dev;
  1880. struct drm_printer p = drm_seq_file_printer(m);
  1881. __drm_state_dump(dev, &p, true);
  1882. return 0;
  1883. }
  1884. /* any use in debugfs files to dump individual planes/crtc/etc? */
  1885. static const struct drm_info_list drm_atomic_debugfs_list[] = {
  1886. {"state", drm_state_info, 0},
  1887. };
  1888. int drm_atomic_debugfs_init(struct drm_minor *minor)
  1889. {
  1890. return drm_debugfs_create_files(drm_atomic_debugfs_list,
  1891. ARRAY_SIZE(drm_atomic_debugfs_list),
  1892. minor->debugfs_root, minor);
  1893. }
  1894. #endif
  1895. /*
  1896. * The big monster ioctl
  1897. */
  1898. static struct drm_pending_vblank_event *create_vblank_event(
  1899. struct drm_crtc *crtc, uint64_t user_data)
  1900. {
  1901. struct drm_pending_vblank_event *e = NULL;
  1902. e = kzalloc(sizeof *e, GFP_KERNEL);
  1903. if (!e)
  1904. return NULL;
  1905. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1906. e->event.base.length = sizeof(e->event);
  1907. e->event.vbl.crtc_id = crtc->base.id;
  1908. e->event.vbl.user_data = user_data;
  1909. return e;
  1910. }
  1911. int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
  1912. struct drm_connector *connector,
  1913. int mode)
  1914. {
  1915. struct drm_connector *tmp_connector;
  1916. struct drm_connector_state *new_conn_state;
  1917. struct drm_crtc *crtc;
  1918. struct drm_crtc_state *crtc_state;
  1919. int i, ret, old_mode = connector->dpms;
  1920. bool active = false;
  1921. ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
  1922. state->acquire_ctx);
  1923. if (ret)
  1924. return ret;
  1925. if (mode != DRM_MODE_DPMS_ON)
  1926. mode = DRM_MODE_DPMS_OFF;
  1927. connector->dpms = mode;
  1928. crtc = connector->state->crtc;
  1929. if (!crtc)
  1930. goto out;
  1931. ret = drm_atomic_add_affected_connectors(state, crtc);
  1932. if (ret)
  1933. goto out;
  1934. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1935. if (IS_ERR(crtc_state)) {
  1936. ret = PTR_ERR(crtc_state);
  1937. goto out;
  1938. }
  1939. for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
  1940. if (new_conn_state->crtc != crtc)
  1941. continue;
  1942. if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
  1943. active = true;
  1944. break;
  1945. }
  1946. }
  1947. crtc_state->active = active;
  1948. ret = drm_atomic_commit(state);
  1949. out:
  1950. if (ret != 0)
  1951. connector->dpms = old_mode;
  1952. return ret;
  1953. }
  1954. int drm_atomic_set_property(struct drm_atomic_state *state,
  1955. struct drm_mode_object *obj,
  1956. struct drm_property *prop,
  1957. uint64_t prop_value)
  1958. {
  1959. struct drm_mode_object *ref;
  1960. int ret;
  1961. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1962. return -EINVAL;
  1963. switch (obj->type) {
  1964. case DRM_MODE_OBJECT_CONNECTOR: {
  1965. struct drm_connector *connector = obj_to_connector(obj);
  1966. struct drm_connector_state *connector_state;
  1967. connector_state = drm_atomic_get_connector_state(state, connector);
  1968. if (IS_ERR(connector_state)) {
  1969. ret = PTR_ERR(connector_state);
  1970. break;
  1971. }
  1972. ret = drm_atomic_connector_set_property(connector,
  1973. connector_state, prop, prop_value);
  1974. break;
  1975. }
  1976. case DRM_MODE_OBJECT_CRTC: {
  1977. struct drm_crtc *crtc = obj_to_crtc(obj);
  1978. struct drm_crtc_state *crtc_state;
  1979. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1980. if (IS_ERR(crtc_state)) {
  1981. ret = PTR_ERR(crtc_state);
  1982. break;
  1983. }
  1984. ret = drm_atomic_crtc_set_property(crtc,
  1985. crtc_state, prop, prop_value);
  1986. break;
  1987. }
  1988. case DRM_MODE_OBJECT_PLANE: {
  1989. struct drm_plane *plane = obj_to_plane(obj);
  1990. struct drm_plane_state *plane_state;
  1991. plane_state = drm_atomic_get_plane_state(state, plane);
  1992. if (IS_ERR(plane_state)) {
  1993. ret = PTR_ERR(plane_state);
  1994. break;
  1995. }
  1996. ret = drm_atomic_plane_set_property(plane,
  1997. plane_state, prop, prop_value);
  1998. break;
  1999. }
  2000. default:
  2001. ret = -EINVAL;
  2002. break;
  2003. }
  2004. drm_property_change_valid_put(prop, ref);
  2005. return ret;
  2006. }
  2007. /**
  2008. * DOC: explicit fencing properties
  2009. *
  2010. * Explicit fencing allows userspace to control the buffer synchronization
  2011. * between devices. A Fence or a group of fences are transfered to/from
  2012. * userspace using Sync File fds and there are two DRM properties for that.
  2013. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
  2014. * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
  2015. *
  2016. * As a contrast, with implicit fencing the kernel keeps track of any
  2017. * ongoing rendering, and automatically ensures that the atomic update waits
  2018. * for any pending rendering to complete. For shared buffers represented with
  2019. * a &struct dma_buf this is tracked in &struct reservation_object.
  2020. * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  2021. * whereas explicit fencing is what Android wants.
  2022. *
  2023. * "IN_FENCE_FD”:
  2024. * Use this property to pass a fence that DRM should wait on before
  2025. * proceeding with the Atomic Commit request and show the framebuffer for
  2026. * the plane on the screen. The fence can be either a normal fence or a
  2027. * merged one, the sync_file framework will handle both cases and use a
  2028. * fence_array if a merged fence is received. Passing -1 here means no
  2029. * fences to wait on.
  2030. *
  2031. * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
  2032. * it will only check if the Sync File is a valid one.
  2033. *
  2034. * On the driver side the fence is stored on the @fence parameter of
  2035. * &struct drm_plane_state. Drivers which also support implicit fencing
  2036. * should set the implicit fence using drm_atomic_set_fence_for_plane(),
  2037. * to make sure there's consistent behaviour between drivers in precedence
  2038. * of implicit vs. explicit fencing.
  2039. *
  2040. * "OUT_FENCE_PTR”:
  2041. * Use this property to pass a file descriptor pointer to DRM. Once the
  2042. * Atomic Commit request call returns OUT_FENCE_PTR will be filled with
  2043. * the file descriptor number of a Sync File. This Sync File contains the
  2044. * CRTC fence that will be signaled when all framebuffers present on the
  2045. * Atomic Commit * request for that given CRTC are scanned out on the
  2046. * screen.
  2047. *
  2048. * The Atomic Commit request fails if a invalid pointer is passed. If the
  2049. * Atomic Commit request fails for any other reason the out fence fd
  2050. * returned will be -1. On a Atomic Commit with the
  2051. * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
  2052. *
  2053. * Note that out-fences don't have a special interface to drivers and are
  2054. * internally represented by a &struct drm_pending_vblank_event in struct
  2055. * &drm_crtc_state, which is also used by the nonblocking atomic commit
  2056. * helpers and for the DRM event handling for existing userspace.
  2057. */
  2058. struct drm_out_fence_state {
  2059. s32 __user *out_fence_ptr;
  2060. struct sync_file *sync_file;
  2061. int fd;
  2062. };
  2063. static int setup_out_fence(struct drm_out_fence_state *fence_state,
  2064. struct dma_fence *fence)
  2065. {
  2066. fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
  2067. if (fence_state->fd < 0)
  2068. return fence_state->fd;
  2069. if (put_user(fence_state->fd, fence_state->out_fence_ptr))
  2070. return -EFAULT;
  2071. fence_state->sync_file = sync_file_create(fence);
  2072. if (!fence_state->sync_file)
  2073. return -ENOMEM;
  2074. return 0;
  2075. }
  2076. static int prepare_signaling(struct drm_device *dev,
  2077. struct drm_atomic_state *state,
  2078. struct drm_mode_atomic *arg,
  2079. struct drm_file *file_priv,
  2080. struct drm_out_fence_state **fence_state,
  2081. unsigned int *num_fences)
  2082. {
  2083. struct drm_crtc *crtc;
  2084. struct drm_crtc_state *crtc_state;
  2085. struct drm_connector *conn;
  2086. struct drm_connector_state *conn_state;
  2087. int i, c = 0, ret;
  2088. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  2089. return 0;
  2090. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2091. s32 __user *fence_ptr;
  2092. fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
  2093. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
  2094. struct drm_pending_vblank_event *e;
  2095. e = create_vblank_event(crtc, arg->user_data);
  2096. if (!e)
  2097. return -ENOMEM;
  2098. crtc_state->event = e;
  2099. }
  2100. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  2101. struct drm_pending_vblank_event *e = crtc_state->event;
  2102. if (!file_priv)
  2103. continue;
  2104. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  2105. &e->event.base);
  2106. if (ret) {
  2107. kfree(e);
  2108. crtc_state->event = NULL;
  2109. return ret;
  2110. }
  2111. }
  2112. if (fence_ptr) {
  2113. struct dma_fence *fence;
  2114. struct drm_out_fence_state *f;
  2115. f = krealloc(*fence_state, sizeof(**fence_state) *
  2116. (*num_fences + 1), GFP_KERNEL);
  2117. if (!f)
  2118. return -ENOMEM;
  2119. memset(&f[*num_fences], 0, sizeof(*f));
  2120. f[*num_fences].out_fence_ptr = fence_ptr;
  2121. *fence_state = f;
  2122. fence = drm_crtc_create_fence(crtc);
  2123. if (!fence)
  2124. return -ENOMEM;
  2125. ret = setup_out_fence(&f[(*num_fences)++], fence);
  2126. if (ret) {
  2127. dma_fence_put(fence);
  2128. return ret;
  2129. }
  2130. crtc_state->event->base.fence = fence;
  2131. }
  2132. c++;
  2133. }
  2134. for_each_new_connector_in_state(state, conn, conn_state, i) {
  2135. struct drm_writeback_connector *wb_conn;
  2136. struct drm_writeback_job *job;
  2137. struct drm_out_fence_state *f;
  2138. struct dma_fence *fence;
  2139. s32 __user *fence_ptr;
  2140. fence_ptr = get_out_fence_for_connector(state, conn);
  2141. if (!fence_ptr)
  2142. continue;
  2143. job = drm_atomic_get_writeback_job(conn_state);
  2144. if (!job)
  2145. return -ENOMEM;
  2146. f = krealloc(*fence_state, sizeof(**fence_state) *
  2147. (*num_fences + 1), GFP_KERNEL);
  2148. if (!f)
  2149. return -ENOMEM;
  2150. memset(&f[*num_fences], 0, sizeof(*f));
  2151. f[*num_fences].out_fence_ptr = fence_ptr;
  2152. *fence_state = f;
  2153. wb_conn = drm_connector_to_writeback(conn);
  2154. fence = drm_writeback_get_out_fence(wb_conn);
  2155. if (!fence)
  2156. return -ENOMEM;
  2157. ret = setup_out_fence(&f[(*num_fences)++], fence);
  2158. if (ret) {
  2159. dma_fence_put(fence);
  2160. return ret;
  2161. }
  2162. job->out_fence = fence;
  2163. }
  2164. /*
  2165. * Having this flag means user mode pends on event which will never
  2166. * reach due to lack of at least one CRTC for signaling
  2167. */
  2168. if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  2169. return -EINVAL;
  2170. return 0;
  2171. }
  2172. static void complete_signaling(struct drm_device *dev,
  2173. struct drm_atomic_state *state,
  2174. struct drm_out_fence_state *fence_state,
  2175. unsigned int num_fences,
  2176. bool install_fds)
  2177. {
  2178. struct drm_crtc *crtc;
  2179. struct drm_crtc_state *crtc_state;
  2180. int i;
  2181. if (install_fds) {
  2182. for (i = 0; i < num_fences; i++)
  2183. fd_install(fence_state[i].fd,
  2184. fence_state[i].sync_file->file);
  2185. kfree(fence_state);
  2186. return;
  2187. }
  2188. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2189. struct drm_pending_vblank_event *event = crtc_state->event;
  2190. /*
  2191. * Free the allocated event. drm_atomic_helper_setup_commit
  2192. * can allocate an event too, so only free it if it's ours
  2193. * to prevent a double free in drm_atomic_state_clear.
  2194. */
  2195. if (event && (event->base.fence || event->base.file_priv)) {
  2196. drm_event_cancel_free(dev, &event->base);
  2197. crtc_state->event = NULL;
  2198. }
  2199. }
  2200. if (!fence_state)
  2201. return;
  2202. for (i = 0; i < num_fences; i++) {
  2203. if (fence_state[i].sync_file)
  2204. fput(fence_state[i].sync_file->file);
  2205. if (fence_state[i].fd >= 0)
  2206. put_unused_fd(fence_state[i].fd);
  2207. /* If this fails log error to the user */
  2208. if (fence_state[i].out_fence_ptr &&
  2209. put_user(-1, fence_state[i].out_fence_ptr))
  2210. DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
  2211. }
  2212. kfree(fence_state);
  2213. }
  2214. int drm_mode_atomic_ioctl(struct drm_device *dev,
  2215. void *data, struct drm_file *file_priv)
  2216. {
  2217. struct drm_mode_atomic *arg = data;
  2218. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  2219. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  2220. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  2221. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  2222. unsigned int copied_objs, copied_props;
  2223. struct drm_atomic_state *state;
  2224. struct drm_modeset_acquire_ctx ctx;
  2225. struct drm_out_fence_state *fence_state;
  2226. int ret = 0;
  2227. unsigned int i, j, num_fences;
  2228. /* disallow for drivers not supporting atomic: */
  2229. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  2230. return -EINVAL;
  2231. /* disallow for userspace that has not enabled atomic cap (even
  2232. * though this may be a bit overkill, since legacy userspace
  2233. * wouldn't know how to call this ioctl)
  2234. */
  2235. if (!file_priv->atomic)
  2236. return -EINVAL;
  2237. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  2238. return -EINVAL;
  2239. if (arg->reserved)
  2240. return -EINVAL;
  2241. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  2242. !dev->mode_config.async_page_flip)
  2243. return -EINVAL;
  2244. /* can't test and expect an event at the same time. */
  2245. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  2246. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  2247. return -EINVAL;
  2248. drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
  2249. state = drm_atomic_state_alloc(dev);
  2250. if (!state)
  2251. return -ENOMEM;
  2252. state->acquire_ctx = &ctx;
  2253. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  2254. retry:
  2255. copied_objs = 0;
  2256. copied_props = 0;
  2257. fence_state = NULL;
  2258. num_fences = 0;
  2259. for (i = 0; i < arg->count_objs; i++) {
  2260. uint32_t obj_id, count_props;
  2261. struct drm_mode_object *obj;
  2262. if (get_user(obj_id, objs_ptr + copied_objs)) {
  2263. ret = -EFAULT;
  2264. goto out;
  2265. }
  2266. obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
  2267. if (!obj) {
  2268. ret = -ENOENT;
  2269. goto out;
  2270. }
  2271. if (!obj->properties) {
  2272. drm_mode_object_put(obj);
  2273. ret = -ENOENT;
  2274. goto out;
  2275. }
  2276. if (get_user(count_props, count_props_ptr + copied_objs)) {
  2277. drm_mode_object_put(obj);
  2278. ret = -EFAULT;
  2279. goto out;
  2280. }
  2281. copied_objs++;
  2282. for (j = 0; j < count_props; j++) {
  2283. uint32_t prop_id;
  2284. uint64_t prop_value;
  2285. struct drm_property *prop;
  2286. if (get_user(prop_id, props_ptr + copied_props)) {
  2287. drm_mode_object_put(obj);
  2288. ret = -EFAULT;
  2289. goto out;
  2290. }
  2291. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  2292. if (!prop) {
  2293. drm_mode_object_put(obj);
  2294. ret = -ENOENT;
  2295. goto out;
  2296. }
  2297. if (copy_from_user(&prop_value,
  2298. prop_values_ptr + copied_props,
  2299. sizeof(prop_value))) {
  2300. drm_mode_object_put(obj);
  2301. ret = -EFAULT;
  2302. goto out;
  2303. }
  2304. ret = drm_atomic_set_property(state, obj, prop,
  2305. prop_value);
  2306. if (ret) {
  2307. drm_mode_object_put(obj);
  2308. goto out;
  2309. }
  2310. copied_props++;
  2311. }
  2312. drm_mode_object_put(obj);
  2313. }
  2314. ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
  2315. &num_fences);
  2316. if (ret)
  2317. goto out;
  2318. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  2319. ret = drm_atomic_check_only(state);
  2320. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  2321. ret = drm_atomic_nonblocking_commit(state);
  2322. } else {
  2323. if (unlikely(drm_debug & DRM_UT_STATE))
  2324. drm_atomic_print_state(state);
  2325. ret = drm_atomic_commit(state);
  2326. }
  2327. out:
  2328. complete_signaling(dev, state, fence_state, num_fences, !ret);
  2329. if (ret == -EDEADLK) {
  2330. drm_atomic_state_clear(state);
  2331. ret = drm_modeset_backoff(&ctx);
  2332. if (!ret)
  2333. goto retry;
  2334. }
  2335. drm_atomic_state_put(state);
  2336. drm_modeset_drop_locks(&ctx);
  2337. drm_modeset_acquire_fini(&ctx);
  2338. return ret;
  2339. }