gma_display.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * Copyright © 2006-2011 Intel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. * Authors:
  18. * Eric Anholt <eric@anholt.net>
  19. * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
  20. */
  21. #include <drm/drmP.h>
  22. #include "gma_display.h"
  23. #include "psb_intel_drv.h"
  24. #include "psb_intel_reg.h"
  25. #include "psb_drv.h"
  26. #include "framebuffer.h"
  27. /**
  28. * Returns whether any output on the specified pipe is of the specified type
  29. */
  30. bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
  31. {
  32. struct drm_device *dev = crtc->dev;
  33. struct drm_mode_config *mode_config = &dev->mode_config;
  34. struct drm_connector *l_entry;
  35. list_for_each_entry(l_entry, &mode_config->connector_list, head) {
  36. if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
  37. struct gma_encoder *gma_encoder =
  38. gma_attached_encoder(l_entry);
  39. if (gma_encoder->type == type)
  40. return true;
  41. }
  42. }
  43. return false;
  44. }
  45. void gma_wait_for_vblank(struct drm_device *dev)
  46. {
  47. /* Wait for 20ms, i.e. one cycle at 50hz. */
  48. mdelay(20);
  49. }
  50. int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
  51. struct drm_framebuffer *old_fb)
  52. {
  53. struct drm_device *dev = crtc->dev;
  54. struct drm_psb_private *dev_priv = dev->dev_private;
  55. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  56. struct drm_framebuffer *fb = crtc->primary->fb;
  57. struct gtt_range *gtt;
  58. int pipe = gma_crtc->pipe;
  59. const struct psb_offset *map = &dev_priv->regmap[pipe];
  60. unsigned long start, offset;
  61. u32 dspcntr;
  62. int ret = 0;
  63. if (!gma_power_begin(dev, true))
  64. return 0;
  65. /* no fb bound */
  66. if (!fb) {
  67. dev_err(dev->dev, "No FB bound\n");
  68. goto gma_pipe_cleaner;
  69. }
  70. gtt = to_gtt_range(fb->obj[0]);
  71. /* We are displaying this buffer, make sure it is actually loaded
  72. into the GTT */
  73. ret = psb_gtt_pin(gtt);
  74. if (ret < 0)
  75. goto gma_pipe_set_base_exit;
  76. start = gtt->offset;
  77. offset = y * fb->pitches[0] + x * fb->format->cpp[0];
  78. REG_WRITE(map->stride, fb->pitches[0]);
  79. dspcntr = REG_READ(map->cntr);
  80. dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
  81. switch (fb->format->cpp[0] * 8) {
  82. case 8:
  83. dspcntr |= DISPPLANE_8BPP;
  84. break;
  85. case 16:
  86. if (fb->format->depth == 15)
  87. dspcntr |= DISPPLANE_15_16BPP;
  88. else
  89. dspcntr |= DISPPLANE_16BPP;
  90. break;
  91. case 24:
  92. case 32:
  93. dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
  94. break;
  95. default:
  96. dev_err(dev->dev, "Unknown color depth\n");
  97. ret = -EINVAL;
  98. goto gma_pipe_set_base_exit;
  99. }
  100. REG_WRITE(map->cntr, dspcntr);
  101. dev_dbg(dev->dev,
  102. "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
  103. /* FIXME: Investigate whether this really is the base for psb and why
  104. the linear offset is named base for the other chips. map->surf
  105. should be the base and map->linoff the offset for all chips */
  106. if (IS_PSB(dev)) {
  107. REG_WRITE(map->base, offset + start);
  108. REG_READ(map->base);
  109. } else {
  110. REG_WRITE(map->base, offset);
  111. REG_READ(map->base);
  112. REG_WRITE(map->surf, start);
  113. REG_READ(map->surf);
  114. }
  115. gma_pipe_cleaner:
  116. /* If there was a previous display we can now unpin it */
  117. if (old_fb)
  118. psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
  119. gma_pipe_set_base_exit:
  120. gma_power_end(dev);
  121. return ret;
  122. }
  123. /* Loads the palette/gamma unit for the CRTC with the prepared values */
  124. void gma_crtc_load_lut(struct drm_crtc *crtc)
  125. {
  126. struct drm_device *dev = crtc->dev;
  127. struct drm_psb_private *dev_priv = dev->dev_private;
  128. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  129. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  130. int palreg = map->palette;
  131. u16 *r, *g, *b;
  132. int i;
  133. /* The clocks have to be on to load the palette. */
  134. if (!crtc->enabled)
  135. return;
  136. r = crtc->gamma_store;
  137. g = r + crtc->gamma_size;
  138. b = g + crtc->gamma_size;
  139. if (gma_power_begin(dev, false)) {
  140. for (i = 0; i < 256; i++) {
  141. REG_WRITE(palreg + 4 * i,
  142. (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
  143. (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
  144. ((*b++ >> 8) + gma_crtc->lut_adj[i]));
  145. }
  146. gma_power_end(dev);
  147. } else {
  148. for (i = 0; i < 256; i++) {
  149. /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
  150. dev_priv->regs.pipe[0].palette[i] =
  151. (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
  152. (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
  153. ((*b++ >> 8) + gma_crtc->lut_adj[i]);
  154. }
  155. }
  156. }
  157. int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
  158. u32 size,
  159. struct drm_modeset_acquire_ctx *ctx)
  160. {
  161. gma_crtc_load_lut(crtc);
  162. return 0;
  163. }
  164. /**
  165. * Sets the power management mode of the pipe and plane.
  166. *
  167. * This code should probably grow support for turning the cursor off and back
  168. * on appropriately at the same time as we're turning the pipe off/on.
  169. */
  170. void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
  171. {
  172. struct drm_device *dev = crtc->dev;
  173. struct drm_psb_private *dev_priv = dev->dev_private;
  174. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  175. int pipe = gma_crtc->pipe;
  176. const struct psb_offset *map = &dev_priv->regmap[pipe];
  177. u32 temp;
  178. /* XXX: When our outputs are all unaware of DPMS modes other than off
  179. * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
  180. */
  181. if (IS_CDV(dev))
  182. dev_priv->ops->disable_sr(dev);
  183. switch (mode) {
  184. case DRM_MODE_DPMS_ON:
  185. case DRM_MODE_DPMS_STANDBY:
  186. case DRM_MODE_DPMS_SUSPEND:
  187. if (gma_crtc->active)
  188. break;
  189. gma_crtc->active = true;
  190. /* Enable the DPLL */
  191. temp = REG_READ(map->dpll);
  192. if ((temp & DPLL_VCO_ENABLE) == 0) {
  193. REG_WRITE(map->dpll, temp);
  194. REG_READ(map->dpll);
  195. /* Wait for the clocks to stabilize. */
  196. udelay(150);
  197. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  198. REG_READ(map->dpll);
  199. /* Wait for the clocks to stabilize. */
  200. udelay(150);
  201. REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
  202. REG_READ(map->dpll);
  203. /* Wait for the clocks to stabilize. */
  204. udelay(150);
  205. }
  206. /* Enable the plane */
  207. temp = REG_READ(map->cntr);
  208. if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
  209. REG_WRITE(map->cntr,
  210. temp | DISPLAY_PLANE_ENABLE);
  211. /* Flush the plane changes */
  212. REG_WRITE(map->base, REG_READ(map->base));
  213. }
  214. udelay(150);
  215. /* Enable the pipe */
  216. temp = REG_READ(map->conf);
  217. if ((temp & PIPEACONF_ENABLE) == 0)
  218. REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
  219. temp = REG_READ(map->status);
  220. temp &= ~(0xFFFF);
  221. temp |= PIPE_FIFO_UNDERRUN;
  222. REG_WRITE(map->status, temp);
  223. REG_READ(map->status);
  224. gma_crtc_load_lut(crtc);
  225. /* Give the overlay scaler a chance to enable
  226. * if it's on this pipe */
  227. /* psb_intel_crtc_dpms_video(crtc, true); TODO */
  228. break;
  229. case DRM_MODE_DPMS_OFF:
  230. if (!gma_crtc->active)
  231. break;
  232. gma_crtc->active = false;
  233. /* Give the overlay scaler a chance to disable
  234. * if it's on this pipe */
  235. /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
  236. /* Disable the VGA plane that we never use */
  237. REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
  238. /* Turn off vblank interrupts */
  239. drm_crtc_vblank_off(crtc);
  240. /* Wait for vblank for the disable to take effect */
  241. gma_wait_for_vblank(dev);
  242. /* Disable plane */
  243. temp = REG_READ(map->cntr);
  244. if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
  245. REG_WRITE(map->cntr,
  246. temp & ~DISPLAY_PLANE_ENABLE);
  247. /* Flush the plane changes */
  248. REG_WRITE(map->base, REG_READ(map->base));
  249. REG_READ(map->base);
  250. }
  251. /* Disable pipe */
  252. temp = REG_READ(map->conf);
  253. if ((temp & PIPEACONF_ENABLE) != 0) {
  254. REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
  255. REG_READ(map->conf);
  256. }
  257. /* Wait for vblank for the disable to take effect. */
  258. gma_wait_for_vblank(dev);
  259. udelay(150);
  260. /* Disable DPLL */
  261. temp = REG_READ(map->dpll);
  262. if ((temp & DPLL_VCO_ENABLE) != 0) {
  263. REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
  264. REG_READ(map->dpll);
  265. }
  266. /* Wait for the clocks to turn off. */
  267. udelay(150);
  268. break;
  269. }
  270. if (IS_CDV(dev))
  271. dev_priv->ops->update_wm(dev, crtc);
  272. /* Set FIFO watermarks */
  273. REG_WRITE(DSPARB, 0x3F3E);
  274. }
  275. int gma_crtc_cursor_set(struct drm_crtc *crtc,
  276. struct drm_file *file_priv,
  277. uint32_t handle,
  278. uint32_t width, uint32_t height)
  279. {
  280. struct drm_device *dev = crtc->dev;
  281. struct drm_psb_private *dev_priv = dev->dev_private;
  282. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  283. int pipe = gma_crtc->pipe;
  284. uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
  285. uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
  286. uint32_t temp;
  287. size_t addr = 0;
  288. struct gtt_range *gt;
  289. struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
  290. struct drm_gem_object *obj;
  291. void *tmp_dst, *tmp_src;
  292. int ret = 0, i, cursor_pages;
  293. /* If we didn't get a handle then turn the cursor off */
  294. if (!handle) {
  295. temp = CURSOR_MODE_DISABLE;
  296. if (gma_power_begin(dev, false)) {
  297. REG_WRITE(control, temp);
  298. REG_WRITE(base, 0);
  299. gma_power_end(dev);
  300. }
  301. /* Unpin the old GEM object */
  302. if (gma_crtc->cursor_obj) {
  303. gt = container_of(gma_crtc->cursor_obj,
  304. struct gtt_range, gem);
  305. psb_gtt_unpin(gt);
  306. drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
  307. gma_crtc->cursor_obj = NULL;
  308. }
  309. return 0;
  310. }
  311. /* Currently we only support 64x64 cursors */
  312. if (width != 64 || height != 64) {
  313. dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
  314. return -EINVAL;
  315. }
  316. obj = drm_gem_object_lookup(file_priv, handle);
  317. if (!obj) {
  318. ret = -ENOENT;
  319. goto unlock;
  320. }
  321. if (obj->size < width * height * 4) {
  322. dev_dbg(dev->dev, "Buffer is too small\n");
  323. ret = -ENOMEM;
  324. goto unref_cursor;
  325. }
  326. gt = container_of(obj, struct gtt_range, gem);
  327. /* Pin the memory into the GTT */
  328. ret = psb_gtt_pin(gt);
  329. if (ret) {
  330. dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
  331. goto unref_cursor;
  332. }
  333. if (dev_priv->ops->cursor_needs_phys) {
  334. if (cursor_gt == NULL) {
  335. dev_err(dev->dev, "No hardware cursor mem available");
  336. ret = -ENOMEM;
  337. goto unref_cursor;
  338. }
  339. /* Prevent overflow */
  340. if (gt->npage > 4)
  341. cursor_pages = 4;
  342. else
  343. cursor_pages = gt->npage;
  344. /* Copy the cursor to cursor mem */
  345. tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
  346. for (i = 0; i < cursor_pages; i++) {
  347. tmp_src = kmap(gt->pages[i]);
  348. memcpy(tmp_dst, tmp_src, PAGE_SIZE);
  349. kunmap(gt->pages[i]);
  350. tmp_dst += PAGE_SIZE;
  351. }
  352. addr = gma_crtc->cursor_addr;
  353. } else {
  354. addr = gt->offset;
  355. gma_crtc->cursor_addr = addr;
  356. }
  357. temp = 0;
  358. /* set the pipe for the cursor */
  359. temp |= (pipe << 28);
  360. temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
  361. if (gma_power_begin(dev, false)) {
  362. REG_WRITE(control, temp);
  363. REG_WRITE(base, addr);
  364. gma_power_end(dev);
  365. }
  366. /* unpin the old bo */
  367. if (gma_crtc->cursor_obj) {
  368. gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
  369. psb_gtt_unpin(gt);
  370. drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
  371. }
  372. gma_crtc->cursor_obj = obj;
  373. unlock:
  374. return ret;
  375. unref_cursor:
  376. drm_gem_object_put_unlocked(obj);
  377. return ret;
  378. }
  379. int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  380. {
  381. struct drm_device *dev = crtc->dev;
  382. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  383. int pipe = gma_crtc->pipe;
  384. uint32_t temp = 0;
  385. uint32_t addr;
  386. if (x < 0) {
  387. temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
  388. x = -x;
  389. }
  390. if (y < 0) {
  391. temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
  392. y = -y;
  393. }
  394. temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
  395. temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
  396. addr = gma_crtc->cursor_addr;
  397. if (gma_power_begin(dev, false)) {
  398. REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
  399. REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
  400. gma_power_end(dev);
  401. }
  402. return 0;
  403. }
  404. void gma_crtc_prepare(struct drm_crtc *crtc)
  405. {
  406. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  407. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  408. }
  409. void gma_crtc_commit(struct drm_crtc *crtc)
  410. {
  411. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  412. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
  413. }
  414. void gma_crtc_disable(struct drm_crtc *crtc)
  415. {
  416. struct gtt_range *gt;
  417. const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
  418. crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
  419. if (crtc->primary->fb) {
  420. gt = to_gtt_range(crtc->primary->fb->obj[0]);
  421. psb_gtt_unpin(gt);
  422. }
  423. }
  424. void gma_crtc_destroy(struct drm_crtc *crtc)
  425. {
  426. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  427. kfree(gma_crtc->crtc_state);
  428. drm_crtc_cleanup(crtc);
  429. kfree(gma_crtc);
  430. }
  431. int gma_crtc_set_config(struct drm_mode_set *set,
  432. struct drm_modeset_acquire_ctx *ctx)
  433. {
  434. struct drm_device *dev = set->crtc->dev;
  435. struct drm_psb_private *dev_priv = dev->dev_private;
  436. int ret;
  437. if (!dev_priv->rpm_enabled)
  438. return drm_crtc_helper_set_config(set, ctx);
  439. pm_runtime_forbid(&dev->pdev->dev);
  440. ret = drm_crtc_helper_set_config(set, ctx);
  441. pm_runtime_allow(&dev->pdev->dev);
  442. return ret;
  443. }
  444. /**
  445. * Save HW states of given crtc
  446. */
  447. void gma_crtc_save(struct drm_crtc *crtc)
  448. {
  449. struct drm_device *dev = crtc->dev;
  450. struct drm_psb_private *dev_priv = dev->dev_private;
  451. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  452. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  453. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  454. uint32_t palette_reg;
  455. int i;
  456. if (!crtc_state) {
  457. dev_err(dev->dev, "No CRTC state found\n");
  458. return;
  459. }
  460. crtc_state->saveDSPCNTR = REG_READ(map->cntr);
  461. crtc_state->savePIPECONF = REG_READ(map->conf);
  462. crtc_state->savePIPESRC = REG_READ(map->src);
  463. crtc_state->saveFP0 = REG_READ(map->fp0);
  464. crtc_state->saveFP1 = REG_READ(map->fp1);
  465. crtc_state->saveDPLL = REG_READ(map->dpll);
  466. crtc_state->saveHTOTAL = REG_READ(map->htotal);
  467. crtc_state->saveHBLANK = REG_READ(map->hblank);
  468. crtc_state->saveHSYNC = REG_READ(map->hsync);
  469. crtc_state->saveVTOTAL = REG_READ(map->vtotal);
  470. crtc_state->saveVBLANK = REG_READ(map->vblank);
  471. crtc_state->saveVSYNC = REG_READ(map->vsync);
  472. crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
  473. /* NOTE: DSPSIZE DSPPOS only for psb */
  474. crtc_state->saveDSPSIZE = REG_READ(map->size);
  475. crtc_state->saveDSPPOS = REG_READ(map->pos);
  476. crtc_state->saveDSPBASE = REG_READ(map->base);
  477. palette_reg = map->palette;
  478. for (i = 0; i < 256; ++i)
  479. crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
  480. }
  481. /**
  482. * Restore HW states of given crtc
  483. */
  484. void gma_crtc_restore(struct drm_crtc *crtc)
  485. {
  486. struct drm_device *dev = crtc->dev;
  487. struct drm_psb_private *dev_priv = dev->dev_private;
  488. struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
  489. struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
  490. const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
  491. uint32_t palette_reg;
  492. int i;
  493. if (!crtc_state) {
  494. dev_err(dev->dev, "No crtc state\n");
  495. return;
  496. }
  497. if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
  498. REG_WRITE(map->dpll,
  499. crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
  500. REG_READ(map->dpll);
  501. udelay(150);
  502. }
  503. REG_WRITE(map->fp0, crtc_state->saveFP0);
  504. REG_READ(map->fp0);
  505. REG_WRITE(map->fp1, crtc_state->saveFP1);
  506. REG_READ(map->fp1);
  507. REG_WRITE(map->dpll, crtc_state->saveDPLL);
  508. REG_READ(map->dpll);
  509. udelay(150);
  510. REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
  511. REG_WRITE(map->hblank, crtc_state->saveHBLANK);
  512. REG_WRITE(map->hsync, crtc_state->saveHSYNC);
  513. REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
  514. REG_WRITE(map->vblank, crtc_state->saveVBLANK);
  515. REG_WRITE(map->vsync, crtc_state->saveVSYNC);
  516. REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
  517. REG_WRITE(map->size, crtc_state->saveDSPSIZE);
  518. REG_WRITE(map->pos, crtc_state->saveDSPPOS);
  519. REG_WRITE(map->src, crtc_state->savePIPESRC);
  520. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  521. REG_WRITE(map->conf, crtc_state->savePIPECONF);
  522. gma_wait_for_vblank(dev);
  523. REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
  524. REG_WRITE(map->base, crtc_state->saveDSPBASE);
  525. gma_wait_for_vblank(dev);
  526. palette_reg = map->palette;
  527. for (i = 0; i < 256; ++i)
  528. REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
  529. }
  530. void gma_encoder_prepare(struct drm_encoder *encoder)
  531. {
  532. const struct drm_encoder_helper_funcs *encoder_funcs =
  533. encoder->helper_private;
  534. /* lvds has its own version of prepare see psb_intel_lvds_prepare */
  535. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
  536. }
  537. void gma_encoder_commit(struct drm_encoder *encoder)
  538. {
  539. const struct drm_encoder_helper_funcs *encoder_funcs =
  540. encoder->helper_private;
  541. /* lvds has its own version of commit see psb_intel_lvds_commit */
  542. encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
  543. }
  544. void gma_encoder_destroy(struct drm_encoder *encoder)
  545. {
  546. struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
  547. drm_encoder_cleanup(encoder);
  548. kfree(intel_encoder);
  549. }
  550. /* Currently there is only a 1:1 mapping of encoders and connectors */
  551. struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
  552. {
  553. struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
  554. return &gma_encoder->base;
  555. }
  556. void gma_connector_attach_encoder(struct gma_connector *connector,
  557. struct gma_encoder *encoder)
  558. {
  559. connector->encoder = encoder;
  560. drm_connector_attach_encoder(&connector->base,
  561. &encoder->base);
  562. }
  563. #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
  564. bool gma_pll_is_valid(struct drm_crtc *crtc,
  565. const struct gma_limit_t *limit,
  566. struct gma_clock_t *clock)
  567. {
  568. if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
  569. GMA_PLL_INVALID("p1 out of range");
  570. if (clock->p < limit->p.min || limit->p.max < clock->p)
  571. GMA_PLL_INVALID("p out of range");
  572. if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
  573. GMA_PLL_INVALID("m2 out of range");
  574. if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
  575. GMA_PLL_INVALID("m1 out of range");
  576. /* On CDV m1 is always 0 */
  577. if (clock->m1 <= clock->m2 && clock->m1 != 0)
  578. GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
  579. if (clock->m < limit->m.min || limit->m.max < clock->m)
  580. GMA_PLL_INVALID("m out of range");
  581. if (clock->n < limit->n.min || limit->n.max < clock->n)
  582. GMA_PLL_INVALID("n out of range");
  583. if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
  584. GMA_PLL_INVALID("vco out of range");
  585. /* XXX: We may need to be checking "Dot clock"
  586. * depending on the multiplier, connector, etc.,
  587. * rather than just a single range.
  588. */
  589. if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
  590. GMA_PLL_INVALID("dot out of range");
  591. return true;
  592. }
  593. bool gma_find_best_pll(const struct gma_limit_t *limit,
  594. struct drm_crtc *crtc, int target, int refclk,
  595. struct gma_clock_t *best_clock)
  596. {
  597. struct drm_device *dev = crtc->dev;
  598. const struct gma_clock_funcs *clock_funcs =
  599. to_gma_crtc(crtc)->clock_funcs;
  600. struct gma_clock_t clock;
  601. int err = target;
  602. if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
  603. (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
  604. /*
  605. * For LVDS, if the panel is on, just rely on its current
  606. * settings for dual-channel. We haven't figured out how to
  607. * reliably set up different single/dual channel state, if we
  608. * even can.
  609. */
  610. if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
  611. LVDS_CLKB_POWER_UP)
  612. clock.p2 = limit->p2.p2_fast;
  613. else
  614. clock.p2 = limit->p2.p2_slow;
  615. } else {
  616. if (target < limit->p2.dot_limit)
  617. clock.p2 = limit->p2.p2_slow;
  618. else
  619. clock.p2 = limit->p2.p2_fast;
  620. }
  621. memset(best_clock, 0, sizeof(*best_clock));
  622. /* m1 is always 0 on CDV so the outmost loop will run just once */
  623. for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
  624. for (clock.m2 = limit->m2.min;
  625. (clock.m2 < clock.m1 || clock.m1 == 0) &&
  626. clock.m2 <= limit->m2.max; clock.m2++) {
  627. for (clock.n = limit->n.min;
  628. clock.n <= limit->n.max; clock.n++) {
  629. for (clock.p1 = limit->p1.min;
  630. clock.p1 <= limit->p1.max;
  631. clock.p1++) {
  632. int this_err;
  633. clock_funcs->clock(refclk, &clock);
  634. if (!clock_funcs->pll_is_valid(crtc,
  635. limit, &clock))
  636. continue;
  637. this_err = abs(clock.dot - target);
  638. if (this_err < err) {
  639. *best_clock = clock;
  640. err = this_err;
  641. }
  642. }
  643. }
  644. }
  645. }
  646. return err != target;
  647. }