qxl_display.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234
  1. /*
  2. * Copyright 2013 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Dave Airlie
  23. * Alon Levy
  24. */
  25. #include <linux/crc32.h>
  26. #include <drm/drm_crtc_helper.h>
  27. #include <drm/drm_plane_helper.h>
  28. #include <drm/drm_atomic_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include "qxl_drv.h"
  31. #include "qxl_object.h"
  32. static bool qxl_head_enabled(struct qxl_head *head)
  33. {
  34. return head->width && head->height;
  35. }
  36. static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
  37. {
  38. if (qdev->client_monitors_config &&
  39. count > qdev->client_monitors_config->count) {
  40. kfree(qdev->client_monitors_config);
  41. qdev->client_monitors_config = NULL;
  42. }
  43. if (!qdev->client_monitors_config) {
  44. qdev->client_monitors_config = kzalloc(
  45. sizeof(struct qxl_monitors_config) +
  46. sizeof(struct qxl_head) * count, GFP_KERNEL);
  47. if (!qdev->client_monitors_config)
  48. return;
  49. }
  50. qdev->client_monitors_config->count = count;
  51. }
  52. enum {
  53. MONITORS_CONFIG_MODIFIED,
  54. MONITORS_CONFIG_UNCHANGED,
  55. MONITORS_CONFIG_BAD_CRC,
  56. };
  57. static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
  58. {
  59. int i;
  60. int num_monitors;
  61. uint32_t crc;
  62. int status = MONITORS_CONFIG_UNCHANGED;
  63. num_monitors = qdev->rom->client_monitors_config.count;
  64. crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
  65. sizeof(qdev->rom->client_monitors_config));
  66. if (crc != qdev->rom->client_monitors_config_crc)
  67. return MONITORS_CONFIG_BAD_CRC;
  68. if (!num_monitors) {
  69. DRM_DEBUG_KMS("no client monitors configured\n");
  70. return status;
  71. }
  72. if (num_monitors > qdev->monitors_config->max_allowed) {
  73. DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n",
  74. qdev->monitors_config->max_allowed, num_monitors);
  75. num_monitors = qdev->monitors_config->max_allowed;
  76. } else {
  77. num_monitors = qdev->rom->client_monitors_config.count;
  78. }
  79. if (qdev->client_monitors_config
  80. && (num_monitors != qdev->client_monitors_config->count)) {
  81. status = MONITORS_CONFIG_MODIFIED;
  82. }
  83. qxl_alloc_client_monitors_config(qdev, num_monitors);
  84. /* we copy max from the client but it isn't used */
  85. qdev->client_monitors_config->max_allowed =
  86. qdev->monitors_config->max_allowed;
  87. for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
  88. struct qxl_urect *c_rect =
  89. &qdev->rom->client_monitors_config.heads[i];
  90. struct qxl_head *client_head =
  91. &qdev->client_monitors_config->heads[i];
  92. if (client_head->x != c_rect->left) {
  93. client_head->x = c_rect->left;
  94. status = MONITORS_CONFIG_MODIFIED;
  95. }
  96. if (client_head->y != c_rect->top) {
  97. client_head->y = c_rect->top;
  98. status = MONITORS_CONFIG_MODIFIED;
  99. }
  100. if (client_head->width != c_rect->right - c_rect->left) {
  101. client_head->width = c_rect->right - c_rect->left;
  102. status = MONITORS_CONFIG_MODIFIED;
  103. }
  104. if (client_head->height != c_rect->bottom - c_rect->top) {
  105. client_head->height = c_rect->bottom - c_rect->top;
  106. status = MONITORS_CONFIG_MODIFIED;
  107. }
  108. if (client_head->surface_id != 0) {
  109. client_head->surface_id = 0;
  110. status = MONITORS_CONFIG_MODIFIED;
  111. }
  112. if (client_head->id != i) {
  113. client_head->id = i;
  114. status = MONITORS_CONFIG_MODIFIED;
  115. }
  116. if (client_head->flags != 0) {
  117. client_head->flags = 0;
  118. status = MONITORS_CONFIG_MODIFIED;
  119. }
  120. DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height,
  121. client_head->x, client_head->y);
  122. }
  123. return status;
  124. }
  125. static void qxl_update_offset_props(struct qxl_device *qdev)
  126. {
  127. struct drm_device *dev = &qdev->ddev;
  128. struct drm_connector *connector;
  129. struct qxl_output *output;
  130. struct qxl_head *head;
  131. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  132. output = drm_connector_to_qxl_output(connector);
  133. head = &qdev->client_monitors_config->heads[output->index];
  134. drm_object_property_set_value(&connector->base,
  135. dev->mode_config.suggested_x_property, head->x);
  136. drm_object_property_set_value(&connector->base,
  137. dev->mode_config.suggested_y_property, head->y);
  138. }
  139. }
  140. void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
  141. {
  142. struct drm_device *dev = &qdev->ddev;
  143. int status, retries;
  144. for (retries = 0; retries < 10; retries++) {
  145. status = qxl_display_copy_rom_client_monitors_config(qdev);
  146. if (status != MONITORS_CONFIG_BAD_CRC)
  147. break;
  148. udelay(5);
  149. }
  150. if (status == MONITORS_CONFIG_BAD_CRC) {
  151. DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
  152. return;
  153. }
  154. if (status == MONITORS_CONFIG_UNCHANGED) {
  155. DRM_DEBUG_KMS("ignoring client monitors config: unchanged");
  156. return;
  157. }
  158. drm_modeset_lock_all(dev);
  159. qxl_update_offset_props(qdev);
  160. drm_modeset_unlock_all(dev);
  161. if (!drm_helper_hpd_irq_event(dev)) {
  162. /* notify that the monitor configuration changed, to
  163. adjust at the arbitrary resolution */
  164. drm_kms_helper_hotplug_event(dev);
  165. }
  166. }
  167. static int qxl_add_monitors_config_modes(struct drm_connector *connector,
  168. unsigned *pwidth,
  169. unsigned *pheight)
  170. {
  171. struct drm_device *dev = connector->dev;
  172. struct qxl_device *qdev = dev->dev_private;
  173. struct qxl_output *output = drm_connector_to_qxl_output(connector);
  174. int h = output->index;
  175. struct drm_display_mode *mode = NULL;
  176. struct qxl_head *head;
  177. if (!qdev->monitors_config)
  178. return 0;
  179. if (h >= qdev->monitors_config->max_allowed)
  180. return 0;
  181. if (!qdev->client_monitors_config)
  182. return 0;
  183. if (h >= qdev->client_monitors_config->count)
  184. return 0;
  185. head = &qdev->client_monitors_config->heads[h];
  186. DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height);
  187. mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
  188. false);
  189. mode->type |= DRM_MODE_TYPE_PREFERRED;
  190. mode->hdisplay = head->width;
  191. mode->vdisplay = head->height;
  192. drm_mode_set_name(mode);
  193. *pwidth = head->width;
  194. *pheight = head->height;
  195. drm_mode_probed_add(connector, mode);
  196. /* remember the last custom size for mode validation */
  197. qdev->monitors_config_width = mode->hdisplay;
  198. qdev->monitors_config_height = mode->vdisplay;
  199. return 1;
  200. }
  201. static struct mode_size {
  202. int w;
  203. int h;
  204. } common_modes[] = {
  205. { 640, 480},
  206. { 720, 480},
  207. { 800, 600},
  208. { 848, 480},
  209. {1024, 768},
  210. {1152, 768},
  211. {1280, 720},
  212. {1280, 800},
  213. {1280, 854},
  214. {1280, 960},
  215. {1280, 1024},
  216. {1440, 900},
  217. {1400, 1050},
  218. {1680, 1050},
  219. {1600, 1200},
  220. {1920, 1080},
  221. {1920, 1200}
  222. };
  223. static int qxl_add_common_modes(struct drm_connector *connector,
  224. unsigned pwidth,
  225. unsigned pheight)
  226. {
  227. struct drm_device *dev = connector->dev;
  228. struct drm_display_mode *mode = NULL;
  229. int i;
  230. for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
  231. mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
  232. 60, false, false, false);
  233. if (common_modes[i].w == pwidth && common_modes[i].h == pheight)
  234. mode->type |= DRM_MODE_TYPE_PREFERRED;
  235. drm_mode_probed_add(connector, mode);
  236. }
  237. return i - 1;
  238. }
  239. static void qxl_send_monitors_config(struct qxl_device *qdev)
  240. {
  241. int i;
  242. BUG_ON(!qdev->ram_header->monitors_config);
  243. if (qdev->monitors_config->count == 0)
  244. return;
  245. for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
  246. struct qxl_head *head = &qdev->monitors_config->heads[i];
  247. if (head->y > 8192 || head->x > 8192 ||
  248. head->width > 8192 || head->height > 8192) {
  249. DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
  250. i, head->width, head->height,
  251. head->x, head->y);
  252. return;
  253. }
  254. }
  255. qxl_io_monitors_config(qdev);
  256. }
  257. static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
  258. const char *reason)
  259. {
  260. struct drm_device *dev = crtc->dev;
  261. struct qxl_device *qdev = dev->dev_private;
  262. struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
  263. struct qxl_head head;
  264. int oldcount, i = qcrtc->index;
  265. if (!qdev->primary_created) {
  266. DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason);
  267. return;
  268. }
  269. if (!qdev->monitors_config ||
  270. qdev->monitors_config->max_allowed <= i)
  271. return;
  272. head.id = i;
  273. head.flags = 0;
  274. oldcount = qdev->monitors_config->count;
  275. if (crtc->state->active) {
  276. struct drm_display_mode *mode = &crtc->mode;
  277. head.width = mode->hdisplay;
  278. head.height = mode->vdisplay;
  279. head.x = crtc->x;
  280. head.y = crtc->y;
  281. if (qdev->monitors_config->count < i + 1)
  282. qdev->monitors_config->count = i + 1;
  283. } else if (i > 0) {
  284. head.width = 0;
  285. head.height = 0;
  286. head.x = 0;
  287. head.y = 0;
  288. if (qdev->monitors_config->count == i + 1)
  289. qdev->monitors_config->count = i;
  290. } else {
  291. DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason);
  292. return;
  293. }
  294. if (head.width == qdev->monitors_config->heads[i].width &&
  295. head.height == qdev->monitors_config->heads[i].height &&
  296. head.x == qdev->monitors_config->heads[i].x &&
  297. head.y == qdev->monitors_config->heads[i].y &&
  298. oldcount == qdev->monitors_config->count)
  299. return;
  300. DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n",
  301. i, head.width, head.height, head.x, head.y,
  302. crtc->state->active ? "on" : "off", reason);
  303. if (oldcount != qdev->monitors_config->count)
  304. DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n",
  305. oldcount, qdev->monitors_config->count,
  306. qdev->monitors_config->max_allowed);
  307. qdev->monitors_config->heads[i] = head;
  308. qxl_send_monitors_config(qdev);
  309. }
  310. static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
  311. struct drm_crtc_state *old_crtc_state)
  312. {
  313. struct drm_device *dev = crtc->dev;
  314. struct drm_pending_vblank_event *event;
  315. unsigned long flags;
  316. if (crtc->state && crtc->state->event) {
  317. event = crtc->state->event;
  318. crtc->state->event = NULL;
  319. spin_lock_irqsave(&dev->event_lock, flags);
  320. drm_crtc_send_vblank_event(crtc, event);
  321. spin_unlock_irqrestore(&dev->event_lock, flags);
  322. }
  323. qxl_crtc_update_monitors_config(crtc, "flush");
  324. }
  325. static void qxl_crtc_destroy(struct drm_crtc *crtc)
  326. {
  327. struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
  328. qxl_bo_unref(&qxl_crtc->cursor_bo);
  329. drm_crtc_cleanup(crtc);
  330. kfree(qxl_crtc);
  331. }
  332. static const struct drm_crtc_funcs qxl_crtc_funcs = {
  333. .set_config = drm_atomic_helper_set_config,
  334. .destroy = qxl_crtc_destroy,
  335. .page_flip = drm_atomic_helper_page_flip,
  336. .reset = drm_atomic_helper_crtc_reset,
  337. .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
  338. .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
  339. };
  340. void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
  341. {
  342. struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
  343. struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
  344. WARN_ON(bo->shadow);
  345. drm_gem_object_put_unlocked(qxl_fb->obj);
  346. drm_framebuffer_cleanup(fb);
  347. kfree(qxl_fb);
  348. }
  349. static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
  350. struct drm_file *file_priv,
  351. unsigned flags, unsigned color,
  352. struct drm_clip_rect *clips,
  353. unsigned num_clips)
  354. {
  355. /* TODO: vmwgfx where this was cribbed from had locking. Why? */
  356. struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
  357. struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
  358. struct drm_clip_rect norect;
  359. struct qxl_bo *qobj;
  360. int inc = 1;
  361. drm_modeset_lock_all(fb->dev);
  362. qobj = gem_to_qxl_bo(qxl_fb->obj);
  363. /* if we aren't primary surface ignore this */
  364. if (!qobj->is_primary) {
  365. drm_modeset_unlock_all(fb->dev);
  366. return 0;
  367. }
  368. if (!num_clips) {
  369. num_clips = 1;
  370. clips = &norect;
  371. norect.x1 = norect.y1 = 0;
  372. norect.x2 = fb->width;
  373. norect.y2 = fb->height;
  374. } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
  375. num_clips /= 2;
  376. inc = 2; /* skip source rects */
  377. }
  378. qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
  379. clips, num_clips, inc);
  380. drm_modeset_unlock_all(fb->dev);
  381. return 0;
  382. }
  383. static const struct drm_framebuffer_funcs qxl_fb_funcs = {
  384. .destroy = qxl_user_framebuffer_destroy,
  385. .dirty = qxl_framebuffer_surface_dirty,
  386. /* TODO?
  387. * .create_handle = qxl_user_framebuffer_create_handle, */
  388. };
  389. int
  390. qxl_framebuffer_init(struct drm_device *dev,
  391. struct qxl_framebuffer *qfb,
  392. const struct drm_mode_fb_cmd2 *mode_cmd,
  393. struct drm_gem_object *obj,
  394. const struct drm_framebuffer_funcs *funcs)
  395. {
  396. int ret;
  397. qfb->obj = obj;
  398. drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd);
  399. ret = drm_framebuffer_init(dev, &qfb->base, funcs);
  400. if (ret) {
  401. qfb->obj = NULL;
  402. return ret;
  403. }
  404. return 0;
  405. }
  406. static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
  407. struct drm_crtc_state *old_state)
  408. {
  409. qxl_crtc_update_monitors_config(crtc, "enable");
  410. }
  411. static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
  412. struct drm_crtc_state *old_state)
  413. {
  414. qxl_crtc_update_monitors_config(crtc, "disable");
  415. }
  416. static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
  417. .atomic_flush = qxl_crtc_atomic_flush,
  418. .atomic_enable = qxl_crtc_atomic_enable,
  419. .atomic_disable = qxl_crtc_atomic_disable,
  420. };
  421. static int qxl_primary_atomic_check(struct drm_plane *plane,
  422. struct drm_plane_state *state)
  423. {
  424. struct qxl_device *qdev = plane->dev->dev_private;
  425. struct qxl_framebuffer *qfb;
  426. struct qxl_bo *bo;
  427. if (!state->crtc || !state->fb)
  428. return 0;
  429. qfb = to_qxl_framebuffer(state->fb);
  430. bo = gem_to_qxl_bo(qfb->obj);
  431. if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
  432. DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
  433. return -EINVAL;
  434. }
  435. return 0;
  436. }
  437. static int qxl_primary_apply_cursor(struct drm_plane *plane)
  438. {
  439. struct drm_device *dev = plane->dev;
  440. struct qxl_device *qdev = dev->dev_private;
  441. struct drm_framebuffer *fb = plane->state->fb;
  442. struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
  443. struct qxl_cursor_cmd *cmd;
  444. struct qxl_release *release;
  445. int ret = 0;
  446. if (!qcrtc->cursor_bo)
  447. return 0;
  448. ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
  449. QXL_RELEASE_CURSOR_CMD,
  450. &release, NULL);
  451. if (ret)
  452. return ret;
  453. ret = qxl_release_list_add(release, qcrtc->cursor_bo);
  454. if (ret)
  455. goto out_free_release;
  456. ret = qxl_release_reserve_list(release, false);
  457. if (ret)
  458. goto out_free_release;
  459. cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
  460. cmd->type = QXL_CURSOR_SET;
  461. cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
  462. cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
  463. cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
  464. cmd->u.set.visible = 1;
  465. qxl_release_unmap(qdev, release, &cmd->release_info);
  466. qxl_release_fence_buffer_objects(release);
  467. qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
  468. return ret;
  469. out_free_release:
  470. qxl_release_free(qdev, release);
  471. return ret;
  472. }
  473. static void qxl_primary_atomic_update(struct drm_plane *plane,
  474. struct drm_plane_state *old_state)
  475. {
  476. struct qxl_device *qdev = plane->dev->dev_private;
  477. struct qxl_framebuffer *qfb =
  478. to_qxl_framebuffer(plane->state->fb);
  479. struct qxl_framebuffer *qfb_old;
  480. struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
  481. struct qxl_bo *bo_old;
  482. struct drm_clip_rect norect = {
  483. .x1 = 0,
  484. .y1 = 0,
  485. .x2 = qfb->base.width,
  486. .y2 = qfb->base.height
  487. };
  488. int ret;
  489. bool same_shadow = false;
  490. if (old_state->fb) {
  491. qfb_old = to_qxl_framebuffer(old_state->fb);
  492. bo_old = gem_to_qxl_bo(qfb_old->obj);
  493. } else {
  494. bo_old = NULL;
  495. }
  496. if (bo == bo_old)
  497. return;
  498. if (bo_old && bo_old->shadow && bo->shadow &&
  499. bo_old->shadow == bo->shadow) {
  500. same_shadow = true;
  501. }
  502. if (bo_old && bo_old->is_primary) {
  503. if (!same_shadow)
  504. qxl_io_destroy_primary(qdev);
  505. bo_old->is_primary = false;
  506. ret = qxl_primary_apply_cursor(plane);
  507. if (ret)
  508. DRM_ERROR(
  509. "could not set cursor after creating primary");
  510. }
  511. if (!bo->is_primary) {
  512. if (!same_shadow)
  513. qxl_io_create_primary(qdev, 0, bo);
  514. bo->is_primary = true;
  515. }
  516. qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
  517. }
  518. static void qxl_primary_atomic_disable(struct drm_plane *plane,
  519. struct drm_plane_state *old_state)
  520. {
  521. struct qxl_device *qdev = plane->dev->dev_private;
  522. if (old_state->fb) {
  523. struct qxl_framebuffer *qfb =
  524. to_qxl_framebuffer(old_state->fb);
  525. struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
  526. if (bo->is_primary) {
  527. qxl_io_destroy_primary(qdev);
  528. bo->is_primary = false;
  529. }
  530. }
  531. }
  532. static void qxl_cursor_atomic_update(struct drm_plane *plane,
  533. struct drm_plane_state *old_state)
  534. {
  535. struct drm_device *dev = plane->dev;
  536. struct qxl_device *qdev = dev->dev_private;
  537. struct drm_framebuffer *fb = plane->state->fb;
  538. struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
  539. struct qxl_release *release;
  540. struct qxl_cursor_cmd *cmd;
  541. struct qxl_cursor *cursor;
  542. struct drm_gem_object *obj;
  543. struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
  544. int ret;
  545. void *user_ptr;
  546. int size = 64*64*4;
  547. ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
  548. QXL_RELEASE_CURSOR_CMD,
  549. &release, NULL);
  550. if (ret)
  551. return;
  552. if (fb != old_state->fb) {
  553. obj = to_qxl_framebuffer(fb)->obj;
  554. user_bo = gem_to_qxl_bo(obj);
  555. /* pinning is done in the prepare/cleanup framevbuffer */
  556. ret = qxl_bo_kmap(user_bo, &user_ptr);
  557. if (ret)
  558. goto out_free_release;
  559. ret = qxl_alloc_bo_reserved(qdev, release,
  560. sizeof(struct qxl_cursor) + size,
  561. &cursor_bo);
  562. if (ret)
  563. goto out_kunmap;
  564. ret = qxl_release_reserve_list(release, true);
  565. if (ret)
  566. goto out_free_bo;
  567. ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
  568. if (ret)
  569. goto out_backoff;
  570. cursor->header.unique = 0;
  571. cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
  572. cursor->header.width = 64;
  573. cursor->header.height = 64;
  574. cursor->header.hot_spot_x = fb->hot_x;
  575. cursor->header.hot_spot_y = fb->hot_y;
  576. cursor->data_size = size;
  577. cursor->chunk.next_chunk = 0;
  578. cursor->chunk.prev_chunk = 0;
  579. cursor->chunk.data_size = size;
  580. memcpy(cursor->chunk.data, user_ptr, size);
  581. qxl_bo_kunmap(cursor_bo);
  582. qxl_bo_kunmap(user_bo);
  583. cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
  584. cmd->u.set.visible = 1;
  585. cmd->u.set.shape = qxl_bo_physical_address(qdev,
  586. cursor_bo, 0);
  587. cmd->type = QXL_CURSOR_SET;
  588. old_cursor_bo = qcrtc->cursor_bo;
  589. qcrtc->cursor_bo = cursor_bo;
  590. cursor_bo = NULL;
  591. } else {
  592. ret = qxl_release_reserve_list(release, true);
  593. if (ret)
  594. goto out_free_release;
  595. cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
  596. cmd->type = QXL_CURSOR_MOVE;
  597. }
  598. cmd->u.position.x = plane->state->crtc_x + fb->hot_x;
  599. cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
  600. qxl_release_unmap(qdev, release, &cmd->release_info);
  601. qxl_release_fence_buffer_objects(release);
  602. qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
  603. if (old_cursor_bo)
  604. qxl_bo_unref(&old_cursor_bo);
  605. qxl_bo_unref(&cursor_bo);
  606. return;
  607. out_backoff:
  608. qxl_release_backoff_reserve_list(release);
  609. out_free_bo:
  610. qxl_bo_unref(&cursor_bo);
  611. out_kunmap:
  612. qxl_bo_kunmap(user_bo);
  613. out_free_release:
  614. qxl_release_free(qdev, release);
  615. return;
  616. }
  617. static void qxl_cursor_atomic_disable(struct drm_plane *plane,
  618. struct drm_plane_state *old_state)
  619. {
  620. struct qxl_device *qdev = plane->dev->dev_private;
  621. struct qxl_release *release;
  622. struct qxl_cursor_cmd *cmd;
  623. int ret;
  624. ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
  625. QXL_RELEASE_CURSOR_CMD,
  626. &release, NULL);
  627. if (ret)
  628. return;
  629. ret = qxl_release_reserve_list(release, true);
  630. if (ret) {
  631. qxl_release_free(qdev, release);
  632. return;
  633. }
  634. cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
  635. cmd->type = QXL_CURSOR_HIDE;
  636. qxl_release_unmap(qdev, release, &cmd->release_info);
  637. qxl_release_fence_buffer_objects(release);
  638. qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
  639. }
  640. static int qxl_plane_prepare_fb(struct drm_plane *plane,
  641. struct drm_plane_state *new_state)
  642. {
  643. struct qxl_device *qdev = plane->dev->dev_private;
  644. struct drm_gem_object *obj;
  645. struct qxl_bo *user_bo, *old_bo = NULL;
  646. int ret;
  647. if (!new_state->fb)
  648. return 0;
  649. obj = to_qxl_framebuffer(new_state->fb)->obj;
  650. user_bo = gem_to_qxl_bo(obj);
  651. if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
  652. user_bo->is_dumb && !user_bo->shadow) {
  653. if (plane->state->fb) {
  654. obj = to_qxl_framebuffer(plane->state->fb)->obj;
  655. old_bo = gem_to_qxl_bo(obj);
  656. }
  657. if (old_bo && old_bo->shadow &&
  658. user_bo->gem_base.size == old_bo->gem_base.size &&
  659. plane->state->crtc == new_state->crtc &&
  660. plane->state->crtc_w == new_state->crtc_w &&
  661. plane->state->crtc_h == new_state->crtc_h &&
  662. plane->state->src_x == new_state->src_x &&
  663. plane->state->src_y == new_state->src_y &&
  664. plane->state->src_w == new_state->src_w &&
  665. plane->state->src_h == new_state->src_h &&
  666. plane->state->rotation == new_state->rotation &&
  667. plane->state->zpos == new_state->zpos) {
  668. drm_gem_object_get(&old_bo->shadow->gem_base);
  669. user_bo->shadow = old_bo->shadow;
  670. } else {
  671. qxl_bo_create(qdev, user_bo->gem_base.size,
  672. true, true, QXL_GEM_DOMAIN_VRAM, NULL,
  673. &user_bo->shadow);
  674. }
  675. }
  676. ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
  677. if (ret)
  678. return ret;
  679. return 0;
  680. }
  681. static void qxl_plane_cleanup_fb(struct drm_plane *plane,
  682. struct drm_plane_state *old_state)
  683. {
  684. struct drm_gem_object *obj;
  685. struct qxl_bo *user_bo;
  686. if (!old_state->fb) {
  687. /*
  688. * we never executed prepare_fb, so there's nothing to
  689. * unpin.
  690. */
  691. return;
  692. }
  693. obj = to_qxl_framebuffer(old_state->fb)->obj;
  694. user_bo = gem_to_qxl_bo(obj);
  695. qxl_bo_unpin(user_bo);
  696. if (user_bo->shadow && !user_bo->is_primary) {
  697. drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
  698. user_bo->shadow = NULL;
  699. }
  700. }
  701. static const uint32_t qxl_cursor_plane_formats[] = {
  702. DRM_FORMAT_ARGB8888,
  703. };
  704. static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
  705. .atomic_update = qxl_cursor_atomic_update,
  706. .atomic_disable = qxl_cursor_atomic_disable,
  707. .prepare_fb = qxl_plane_prepare_fb,
  708. .cleanup_fb = qxl_plane_cleanup_fb,
  709. };
  710. static const struct drm_plane_funcs qxl_cursor_plane_funcs = {
  711. .update_plane = drm_atomic_helper_update_plane,
  712. .disable_plane = drm_atomic_helper_disable_plane,
  713. .destroy = drm_primary_helper_destroy,
  714. .reset = drm_atomic_helper_plane_reset,
  715. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  716. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  717. };
  718. static const uint32_t qxl_primary_plane_formats[] = {
  719. DRM_FORMAT_XRGB8888,
  720. DRM_FORMAT_ARGB8888,
  721. };
  722. static const struct drm_plane_helper_funcs primary_helper_funcs = {
  723. .atomic_check = qxl_primary_atomic_check,
  724. .atomic_update = qxl_primary_atomic_update,
  725. .atomic_disable = qxl_primary_atomic_disable,
  726. .prepare_fb = qxl_plane_prepare_fb,
  727. .cleanup_fb = qxl_plane_cleanup_fb,
  728. };
  729. static const struct drm_plane_funcs qxl_primary_plane_funcs = {
  730. .update_plane = drm_atomic_helper_update_plane,
  731. .disable_plane = drm_atomic_helper_disable_plane,
  732. .destroy = drm_primary_helper_destroy,
  733. .reset = drm_atomic_helper_plane_reset,
  734. .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  735. .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
  736. };
  737. static struct drm_plane *qxl_create_plane(struct qxl_device *qdev,
  738. unsigned int possible_crtcs,
  739. enum drm_plane_type type)
  740. {
  741. const struct drm_plane_helper_funcs *helper_funcs = NULL;
  742. struct drm_plane *plane;
  743. const struct drm_plane_funcs *funcs;
  744. const uint32_t *formats;
  745. int num_formats;
  746. int err;
  747. if (type == DRM_PLANE_TYPE_PRIMARY) {
  748. funcs = &qxl_primary_plane_funcs;
  749. formats = qxl_primary_plane_formats;
  750. num_formats = ARRAY_SIZE(qxl_primary_plane_formats);
  751. helper_funcs = &primary_helper_funcs;
  752. } else if (type == DRM_PLANE_TYPE_CURSOR) {
  753. funcs = &qxl_cursor_plane_funcs;
  754. formats = qxl_cursor_plane_formats;
  755. helper_funcs = &qxl_cursor_helper_funcs;
  756. num_formats = ARRAY_SIZE(qxl_cursor_plane_formats);
  757. } else {
  758. return ERR_PTR(-EINVAL);
  759. }
  760. plane = kzalloc(sizeof(*plane), GFP_KERNEL);
  761. if (!plane)
  762. return ERR_PTR(-ENOMEM);
  763. err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs,
  764. funcs, formats, num_formats,
  765. NULL, type, NULL);
  766. if (err)
  767. goto free_plane;
  768. drm_plane_helper_add(plane, helper_funcs);
  769. return plane;
  770. free_plane:
  771. kfree(plane);
  772. return ERR_PTR(-EINVAL);
  773. }
  774. static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
  775. {
  776. struct qxl_crtc *qxl_crtc;
  777. struct drm_plane *primary, *cursor;
  778. struct qxl_device *qdev = dev->dev_private;
  779. int r;
  780. qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
  781. if (!qxl_crtc)
  782. return -ENOMEM;
  783. primary = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_PRIMARY);
  784. if (IS_ERR(primary)) {
  785. r = -ENOMEM;
  786. goto free_mem;
  787. }
  788. cursor = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_CURSOR);
  789. if (IS_ERR(cursor)) {
  790. r = -ENOMEM;
  791. goto clean_primary;
  792. }
  793. r = drm_crtc_init_with_planes(dev, &qxl_crtc->base, primary, cursor,
  794. &qxl_crtc_funcs, NULL);
  795. if (r)
  796. goto clean_cursor;
  797. qxl_crtc->index = crtc_id;
  798. drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
  799. return 0;
  800. clean_cursor:
  801. drm_plane_cleanup(cursor);
  802. kfree(cursor);
  803. clean_primary:
  804. drm_plane_cleanup(primary);
  805. kfree(primary);
  806. free_mem:
  807. kfree(qxl_crtc);
  808. return r;
  809. }
  810. static int qxl_conn_get_modes(struct drm_connector *connector)
  811. {
  812. unsigned pwidth = 1024;
  813. unsigned pheight = 768;
  814. int ret = 0;
  815. ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
  816. if (ret < 0)
  817. return ret;
  818. ret += qxl_add_common_modes(connector, pwidth, pheight);
  819. return ret;
  820. }
  821. static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
  822. struct drm_display_mode *mode)
  823. {
  824. struct drm_device *ddev = connector->dev;
  825. struct qxl_device *qdev = ddev->dev_private;
  826. int i;
  827. /* TODO: is this called for user defined modes? (xrandr --add-mode)
  828. * TODO: check that the mode fits in the framebuffer */
  829. if(qdev->monitors_config_width == mode->hdisplay &&
  830. qdev->monitors_config_height == mode->vdisplay)
  831. return MODE_OK;
  832. for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
  833. if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
  834. return MODE_OK;
  835. }
  836. return MODE_BAD;
  837. }
  838. static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
  839. {
  840. struct qxl_output *qxl_output =
  841. drm_connector_to_qxl_output(connector);
  842. DRM_DEBUG("\n");
  843. return &qxl_output->enc;
  844. }
  845. static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
  846. };
  847. static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
  848. .get_modes = qxl_conn_get_modes,
  849. .mode_valid = qxl_conn_mode_valid,
  850. .best_encoder = qxl_best_encoder,
  851. };
  852. static enum drm_connector_status qxl_conn_detect(
  853. struct drm_connector *connector,
  854. bool force)
  855. {
  856. struct qxl_output *output =
  857. drm_connector_to_qxl_output(connector);
  858. struct drm_device *ddev = connector->dev;
  859. struct qxl_device *qdev = ddev->dev_private;
  860. bool connected = false;
  861. /* The first monitor is always connected */
  862. if (!qdev->client_monitors_config) {
  863. if (output->index == 0)
  864. connected = true;
  865. } else
  866. connected = qdev->client_monitors_config->count > output->index &&
  867. qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
  868. DRM_DEBUG("#%d connected: %d\n", output->index, connected);
  869. return connected ? connector_status_connected
  870. : connector_status_disconnected;
  871. }
  872. static void qxl_conn_destroy(struct drm_connector *connector)
  873. {
  874. struct qxl_output *qxl_output =
  875. drm_connector_to_qxl_output(connector);
  876. drm_connector_unregister(connector);
  877. drm_connector_cleanup(connector);
  878. kfree(qxl_output);
  879. }
  880. static const struct drm_connector_funcs qxl_connector_funcs = {
  881. .dpms = drm_helper_connector_dpms,
  882. .detect = qxl_conn_detect,
  883. .fill_modes = drm_helper_probe_single_connector_modes,
  884. .destroy = qxl_conn_destroy,
  885. .reset = drm_atomic_helper_connector_reset,
  886. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  887. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  888. };
  889. static void qxl_enc_destroy(struct drm_encoder *encoder)
  890. {
  891. drm_encoder_cleanup(encoder);
  892. }
  893. static const struct drm_encoder_funcs qxl_enc_funcs = {
  894. .destroy = qxl_enc_destroy,
  895. };
  896. static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
  897. {
  898. if (qdev->hotplug_mode_update_property)
  899. return 0;
  900. qdev->hotplug_mode_update_property =
  901. drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
  902. "hotplug_mode_update", 0, 1);
  903. return 0;
  904. }
  905. static int qdev_output_init(struct drm_device *dev, int num_output)
  906. {
  907. struct qxl_device *qdev = dev->dev_private;
  908. struct qxl_output *qxl_output;
  909. struct drm_connector *connector;
  910. struct drm_encoder *encoder;
  911. qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
  912. if (!qxl_output)
  913. return -ENOMEM;
  914. qxl_output->index = num_output;
  915. connector = &qxl_output->base;
  916. encoder = &qxl_output->enc;
  917. drm_connector_init(dev, &qxl_output->base,
  918. &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
  919. drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
  920. DRM_MODE_ENCODER_VIRTUAL, NULL);
  921. /* we get HPD via client monitors config */
  922. connector->polled = DRM_CONNECTOR_POLL_HPD;
  923. encoder->possible_crtcs = 1 << num_output;
  924. drm_connector_attach_encoder(&qxl_output->base,
  925. &qxl_output->enc);
  926. drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
  927. drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
  928. drm_object_attach_property(&connector->base,
  929. qdev->hotplug_mode_update_property, 0);
  930. drm_object_attach_property(&connector->base,
  931. dev->mode_config.suggested_x_property, 0);
  932. drm_object_attach_property(&connector->base,
  933. dev->mode_config.suggested_y_property, 0);
  934. return 0;
  935. }
  936. static struct drm_framebuffer *
  937. qxl_user_framebuffer_create(struct drm_device *dev,
  938. struct drm_file *file_priv,
  939. const struct drm_mode_fb_cmd2 *mode_cmd)
  940. {
  941. struct drm_gem_object *obj;
  942. struct qxl_framebuffer *qxl_fb;
  943. int ret;
  944. obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
  945. if (!obj)
  946. return NULL;
  947. qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
  948. if (qxl_fb == NULL)
  949. return NULL;
  950. ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
  951. if (ret) {
  952. kfree(qxl_fb);
  953. drm_gem_object_put_unlocked(obj);
  954. return NULL;
  955. }
  956. return &qxl_fb->base;
  957. }
  958. static const struct drm_mode_config_funcs qxl_mode_funcs = {
  959. .fb_create = qxl_user_framebuffer_create,
  960. .atomic_check = drm_atomic_helper_check,
  961. .atomic_commit = drm_atomic_helper_commit,
  962. };
  963. int qxl_create_monitors_object(struct qxl_device *qdev)
  964. {
  965. int ret;
  966. struct drm_gem_object *gobj;
  967. int max_allowed = qxl_num_crtc;
  968. int monitors_config_size = sizeof(struct qxl_monitors_config) +
  969. max_allowed * sizeof(struct qxl_head);
  970. ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
  971. QXL_GEM_DOMAIN_VRAM,
  972. false, false, NULL, &gobj);
  973. if (ret) {
  974. DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
  975. return -ENOMEM;
  976. }
  977. qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
  978. ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
  979. if (ret)
  980. return ret;
  981. qxl_bo_kmap(qdev->monitors_config_bo, NULL);
  982. qdev->monitors_config = qdev->monitors_config_bo->kptr;
  983. qdev->ram_header->monitors_config =
  984. qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
  985. memset(qdev->monitors_config, 0, monitors_config_size);
  986. qdev->monitors_config->max_allowed = max_allowed;
  987. return 0;
  988. }
  989. int qxl_destroy_monitors_object(struct qxl_device *qdev)
  990. {
  991. int ret;
  992. qdev->monitors_config = NULL;
  993. qdev->ram_header->monitors_config = 0;
  994. qxl_bo_kunmap(qdev->monitors_config_bo);
  995. ret = qxl_bo_unpin(qdev->monitors_config_bo);
  996. if (ret)
  997. return ret;
  998. qxl_bo_unref(&qdev->monitors_config_bo);
  999. return 0;
  1000. }
  1001. int qxl_modeset_init(struct qxl_device *qdev)
  1002. {
  1003. int i;
  1004. int ret;
  1005. drm_mode_config_init(&qdev->ddev);
  1006. ret = qxl_create_monitors_object(qdev);
  1007. if (ret)
  1008. return ret;
  1009. qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
  1010. /* modes will be validated against the framebuffer size */
  1011. qdev->ddev.mode_config.min_width = 0;
  1012. qdev->ddev.mode_config.min_height = 0;
  1013. qdev->ddev.mode_config.max_width = 8192;
  1014. qdev->ddev.mode_config.max_height = 8192;
  1015. qdev->ddev.mode_config.fb_base = qdev->vram_base;
  1016. drm_mode_create_suggested_offset_properties(&qdev->ddev);
  1017. qxl_mode_create_hotplug_mode_update_property(qdev);
  1018. for (i = 0 ; i < qxl_num_crtc; ++i) {
  1019. qdev_crtc_init(&qdev->ddev, i);
  1020. qdev_output_init(&qdev->ddev, i);
  1021. }
  1022. qxl_display_read_client_monitors_config(qdev);
  1023. qdev->mode_info.mode_config_initialized = true;
  1024. drm_mode_config_reset(&qdev->ddev);
  1025. /* primary surface must be created by this point, to allow
  1026. * issuing command queue commands and having them read by
  1027. * spice server. */
  1028. qxl_fbdev_init(qdev);
  1029. return 0;
  1030. }
  1031. void qxl_modeset_fini(struct qxl_device *qdev)
  1032. {
  1033. qxl_fbdev_fini(qdev);
  1034. qxl_destroy_monitors_object(qdev);
  1035. if (qdev->mode_info.mode_config_initialized) {
  1036. drm_mode_config_cleanup(&qdev->ddev);
  1037. qdev->mode_info.mode_config_initialized = false;
  1038. }
  1039. }