psb_irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /**************************************************************************
  2. * Copyright (c) 2007, Intel Corporation.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
  19. * develop this driver.
  20. *
  21. **************************************************************************/
  22. /*
  23. */
  24. #include <drm/drmP.h>
  25. #include "psb_drv.h"
  26. #include "psb_reg.h"
  27. #include "psb_intel_reg.h"
  28. #include "power.h"
  29. #include "psb_irq.h"
  30. #include "mdfld_output.h"
  31. /*
  32. * inline functions
  33. */
  34. static inline u32
  35. psb_pipestat(int pipe)
  36. {
  37. if (pipe == 0)
  38. return PIPEASTAT;
  39. if (pipe == 1)
  40. return PIPEBSTAT;
  41. if (pipe == 2)
  42. return PIPECSTAT;
  43. BUG();
  44. }
  45. static inline u32
  46. mid_pipe_event(int pipe)
  47. {
  48. if (pipe == 0)
  49. return _PSB_PIPEA_EVENT_FLAG;
  50. if (pipe == 1)
  51. return _MDFLD_PIPEB_EVENT_FLAG;
  52. if (pipe == 2)
  53. return _MDFLD_PIPEC_EVENT_FLAG;
  54. BUG();
  55. }
  56. static inline u32
  57. mid_pipe_vsync(int pipe)
  58. {
  59. if (pipe == 0)
  60. return _PSB_VSYNC_PIPEA_FLAG;
  61. if (pipe == 1)
  62. return _PSB_VSYNC_PIPEB_FLAG;
  63. if (pipe == 2)
  64. return _MDFLD_PIPEC_VBLANK_FLAG;
  65. BUG();
  66. }
  67. static inline u32
  68. mid_pipeconf(int pipe)
  69. {
  70. if (pipe == 0)
  71. return PIPEACONF;
  72. if (pipe == 1)
  73. return PIPEBCONF;
  74. if (pipe == 2)
  75. return PIPECCONF;
  76. BUG();
  77. }
  78. void
  79. psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
  80. {
  81. if ((dev_priv->pipestat[pipe] & mask) != mask) {
  82. u32 reg = psb_pipestat(pipe);
  83. dev_priv->pipestat[pipe] |= mask;
  84. /* Enable the interrupt, clear any pending status */
  85. if (gma_power_begin(dev_priv->dev, false)) {
  86. u32 writeVal = PSB_RVDC32(reg);
  87. writeVal |= (mask | (mask >> 16));
  88. PSB_WVDC32(writeVal, reg);
  89. (void) PSB_RVDC32(reg);
  90. gma_power_end(dev_priv->dev);
  91. }
  92. }
  93. }
  94. void
  95. psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
  96. {
  97. if ((dev_priv->pipestat[pipe] & mask) != 0) {
  98. u32 reg = psb_pipestat(pipe);
  99. dev_priv->pipestat[pipe] &= ~mask;
  100. if (gma_power_begin(dev_priv->dev, false)) {
  101. u32 writeVal = PSB_RVDC32(reg);
  102. writeVal &= ~mask;
  103. PSB_WVDC32(writeVal, reg);
  104. (void) PSB_RVDC32(reg);
  105. gma_power_end(dev_priv->dev);
  106. }
  107. }
  108. }
  109. static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
  110. {
  111. if (gma_power_begin(dev_priv->dev, false)) {
  112. u32 pipe_event = mid_pipe_event(pipe);
  113. dev_priv->vdc_irq_mask |= pipe_event;
  114. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  115. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  116. gma_power_end(dev_priv->dev);
  117. }
  118. }
  119. static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
  120. {
  121. if (dev_priv->pipestat[pipe] == 0) {
  122. if (gma_power_begin(dev_priv->dev, false)) {
  123. u32 pipe_event = mid_pipe_event(pipe);
  124. dev_priv->vdc_irq_mask &= ~pipe_event;
  125. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  126. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  127. gma_power_end(dev_priv->dev);
  128. }
  129. }
  130. }
  131. /**
  132. * Display controller interrupt handler for pipe event.
  133. *
  134. */
  135. static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
  136. {
  137. struct drm_psb_private *dev_priv =
  138. (struct drm_psb_private *) dev->dev_private;
  139. uint32_t pipe_stat_val = 0;
  140. uint32_t pipe_stat_reg = psb_pipestat(pipe);
  141. uint32_t pipe_enable = dev_priv->pipestat[pipe];
  142. uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
  143. uint32_t pipe_clear;
  144. uint32_t i = 0;
  145. spin_lock(&dev_priv->irqmask_lock);
  146. pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
  147. pipe_stat_val &= pipe_enable | pipe_status;
  148. pipe_stat_val &= pipe_stat_val >> 16;
  149. spin_unlock(&dev_priv->irqmask_lock);
  150. /* Clear the 2nd level interrupt status bits
  151. * Sometimes the bits are very sticky so we repeat until they unstick */
  152. for (i = 0; i < 0xffff; i++) {
  153. PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
  154. pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
  155. if (pipe_clear == 0)
  156. break;
  157. }
  158. if (pipe_clear)
  159. dev_err(dev->dev,
  160. "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
  161. __func__, pipe, PSB_RVDC32(pipe_stat_reg));
  162. if (pipe_stat_val & PIPE_VBLANK_STATUS)
  163. drm_handle_vblank(dev, pipe);
  164. if (pipe_stat_val & PIPE_TE_STATUS)
  165. drm_handle_vblank(dev, pipe);
  166. }
  167. /*
  168. * Display controller interrupt handler.
  169. */
  170. static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
  171. {
  172. if (vdc_stat & _PSB_IRQ_ASLE)
  173. psb_intel_opregion_asle_intr(dev);
  174. if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
  175. mid_pipe_event_handler(dev, 0);
  176. if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
  177. mid_pipe_event_handler(dev, 1);
  178. }
  179. /*
  180. * SGX interrupt handler
  181. */
  182. static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
  183. {
  184. struct drm_psb_private *dev_priv = dev->dev_private;
  185. u32 val, addr;
  186. int error = false;
  187. if (stat_1 & _PSB_CE_TWOD_COMPLETE)
  188. val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
  189. if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
  190. val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
  191. addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
  192. if (val) {
  193. if (val & _PSB_CBI_STAT_PF_N_RW)
  194. DRM_ERROR("SGX MMU page fault:");
  195. else
  196. DRM_ERROR("SGX MMU read / write protection fault:");
  197. if (val & _PSB_CBI_STAT_FAULT_CACHE)
  198. DRM_ERROR("\tCache requestor");
  199. if (val & _PSB_CBI_STAT_FAULT_TA)
  200. DRM_ERROR("\tTA requestor");
  201. if (val & _PSB_CBI_STAT_FAULT_VDM)
  202. DRM_ERROR("\tVDM requestor");
  203. if (val & _PSB_CBI_STAT_FAULT_2D)
  204. DRM_ERROR("\t2D requestor");
  205. if (val & _PSB_CBI_STAT_FAULT_PBE)
  206. DRM_ERROR("\tPBE requestor");
  207. if (val & _PSB_CBI_STAT_FAULT_TSP)
  208. DRM_ERROR("\tTSP requestor");
  209. if (val & _PSB_CBI_STAT_FAULT_ISP)
  210. DRM_ERROR("\tISP requestor");
  211. if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
  212. DRM_ERROR("\tUSSEPDS requestor");
  213. if (val & _PSB_CBI_STAT_FAULT_HOST)
  214. DRM_ERROR("\tHost requestor");
  215. DRM_ERROR("\tMMU failing address is 0x%08x.\n",
  216. (unsigned int)addr);
  217. error = true;
  218. }
  219. }
  220. /* Clear bits */
  221. PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
  222. PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
  223. PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
  224. }
  225. irqreturn_t psb_irq_handler(int irq, void *arg)
  226. {
  227. struct drm_device *dev = arg;
  228. struct drm_psb_private *dev_priv = dev->dev_private;
  229. uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
  230. u32 sgx_stat_1, sgx_stat_2;
  231. int handled = 0;
  232. spin_lock(&dev_priv->irqmask_lock);
  233. vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
  234. if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
  235. dsp_int = 1;
  236. /* FIXME: Handle Medfield
  237. if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
  238. dsp_int = 1;
  239. */
  240. if (vdc_stat & _PSB_IRQ_SGX_FLAG)
  241. sgx_int = 1;
  242. if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
  243. hotplug_int = 1;
  244. vdc_stat &= dev_priv->vdc_irq_mask;
  245. spin_unlock(&dev_priv->irqmask_lock);
  246. if (dsp_int && gma_power_is_on(dev)) {
  247. psb_vdc_interrupt(dev, vdc_stat);
  248. handled = 1;
  249. }
  250. if (sgx_int) {
  251. sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
  252. sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
  253. psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
  254. handled = 1;
  255. }
  256. /* Note: this bit has other meanings on some devices, so we will
  257. need to address that later if it ever matters */
  258. if (hotplug_int && dev_priv->ops->hotplug) {
  259. handled = dev_priv->ops->hotplug(dev);
  260. REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
  261. }
  262. PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
  263. (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
  264. rmb();
  265. if (!handled)
  266. return IRQ_NONE;
  267. return IRQ_HANDLED;
  268. }
  269. void psb_irq_preinstall(struct drm_device *dev)
  270. {
  271. struct drm_psb_private *dev_priv =
  272. (struct drm_psb_private *) dev->dev_private;
  273. unsigned long irqflags;
  274. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  275. if (gma_power_is_on(dev)) {
  276. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  277. PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
  278. PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
  279. PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
  280. PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
  281. }
  282. if (dev->vblank[0].enabled)
  283. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
  284. if (dev->vblank[1].enabled)
  285. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
  286. /* FIXME: Handle Medfield irq mask
  287. if (dev->vblank[1].enabled)
  288. dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
  289. if (dev->vblank[2].enabled)
  290. dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
  291. */
  292. /* Revisit this area - want per device masks ? */
  293. if (dev_priv->ops->hotplug)
  294. dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
  295. dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
  296. /* This register is safe even if display island is off */
  297. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  298. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  299. }
  300. int psb_irq_postinstall(struct drm_device *dev)
  301. {
  302. struct drm_psb_private *dev_priv = dev->dev_private;
  303. unsigned long irqflags;
  304. unsigned int i;
  305. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  306. /* Enable 2D and MMU fault interrupts */
  307. PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
  308. PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
  309. PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
  310. /* This register is safe even if display island is off */
  311. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  312. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  313. for (i = 0; i < dev->num_crtcs; ++i) {
  314. if (dev->vblank[i].enabled)
  315. psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  316. else
  317. psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  318. }
  319. if (dev_priv->ops->hotplug_enable)
  320. dev_priv->ops->hotplug_enable(dev, true);
  321. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  322. return 0;
  323. }
  324. void psb_irq_uninstall(struct drm_device *dev)
  325. {
  326. struct drm_psb_private *dev_priv = dev->dev_private;
  327. unsigned long irqflags;
  328. unsigned int i;
  329. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  330. if (dev_priv->ops->hotplug_enable)
  331. dev_priv->ops->hotplug_enable(dev, false);
  332. PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
  333. for (i = 0; i < dev->num_crtcs; ++i) {
  334. if (dev->vblank[i].enabled)
  335. psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
  336. }
  337. dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
  338. _PSB_IRQ_MSVDX_FLAG |
  339. _LNC_IRQ_TOPAZ_FLAG;
  340. /* These two registers are safe even if display island is off */
  341. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  342. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  343. wmb();
  344. /* This register is safe even if display island is off */
  345. PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
  346. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  347. }
  348. void psb_irq_turn_on_dpst(struct drm_device *dev)
  349. {
  350. struct drm_psb_private *dev_priv =
  351. (struct drm_psb_private *) dev->dev_private;
  352. u32 hist_reg;
  353. u32 pwm_reg;
  354. if (gma_power_begin(dev, false)) {
  355. PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
  356. hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
  357. PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
  358. hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
  359. PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
  360. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  361. PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
  362. | PWM_PHASEIN_INT_ENABLE,
  363. PWM_CONTROL_LOGIC);
  364. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  365. psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
  366. hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
  367. PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
  368. HISTOGRAM_INT_CONTROL);
  369. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  370. PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
  371. PWM_CONTROL_LOGIC);
  372. gma_power_end(dev);
  373. }
  374. }
  375. int psb_irq_enable_dpst(struct drm_device *dev)
  376. {
  377. struct drm_psb_private *dev_priv =
  378. (struct drm_psb_private *) dev->dev_private;
  379. unsigned long irqflags;
  380. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  381. /* enable DPST */
  382. mid_enable_pipe_event(dev_priv, 0);
  383. psb_irq_turn_on_dpst(dev);
  384. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  385. return 0;
  386. }
  387. void psb_irq_turn_off_dpst(struct drm_device *dev)
  388. {
  389. struct drm_psb_private *dev_priv =
  390. (struct drm_psb_private *) dev->dev_private;
  391. u32 hist_reg;
  392. u32 pwm_reg;
  393. if (gma_power_begin(dev, false)) {
  394. PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
  395. hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
  396. psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
  397. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  398. PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
  399. PWM_CONTROL_LOGIC);
  400. pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
  401. gma_power_end(dev);
  402. }
  403. }
  404. int psb_irq_disable_dpst(struct drm_device *dev)
  405. {
  406. struct drm_psb_private *dev_priv =
  407. (struct drm_psb_private *) dev->dev_private;
  408. unsigned long irqflags;
  409. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  410. mid_disable_pipe_event(dev_priv, 0);
  411. psb_irq_turn_off_dpst(dev);
  412. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  413. return 0;
  414. }
  415. /*
  416. * It is used to enable VBLANK interrupt
  417. */
  418. int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
  419. {
  420. struct drm_psb_private *dev_priv = dev->dev_private;
  421. unsigned long irqflags;
  422. uint32_t reg_val = 0;
  423. uint32_t pipeconf_reg = mid_pipeconf(pipe);
  424. /* Medfield is different - we should perhaps extract out vblank
  425. and blacklight etc ops */
  426. if (IS_MFLD(dev))
  427. return mdfld_enable_te(dev, pipe);
  428. if (gma_power_begin(dev, false)) {
  429. reg_val = REG_READ(pipeconf_reg);
  430. gma_power_end(dev);
  431. }
  432. if (!(reg_val & PIPEACONF_ENABLE))
  433. return -EINVAL;
  434. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  435. if (pipe == 0)
  436. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
  437. else if (pipe == 1)
  438. dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
  439. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  440. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  441. psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
  442. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  443. return 0;
  444. }
  445. /*
  446. * It is used to disable VBLANK interrupt
  447. */
  448. void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
  449. {
  450. struct drm_psb_private *dev_priv = dev->dev_private;
  451. unsigned long irqflags;
  452. if (IS_MFLD(dev))
  453. mdfld_disable_te(dev, pipe);
  454. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  455. if (pipe == 0)
  456. dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
  457. else if (pipe == 1)
  458. dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
  459. PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
  460. PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
  461. psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
  462. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  463. }
  464. /*
  465. * It is used to enable TE interrupt
  466. */
  467. int mdfld_enable_te(struct drm_device *dev, int pipe)
  468. {
  469. struct drm_psb_private *dev_priv =
  470. (struct drm_psb_private *) dev->dev_private;
  471. unsigned long irqflags;
  472. uint32_t reg_val = 0;
  473. uint32_t pipeconf_reg = mid_pipeconf(pipe);
  474. if (gma_power_begin(dev, false)) {
  475. reg_val = REG_READ(pipeconf_reg);
  476. gma_power_end(dev);
  477. }
  478. if (!(reg_val & PIPEACONF_ENABLE))
  479. return -EINVAL;
  480. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  481. mid_enable_pipe_event(dev_priv, pipe);
  482. psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
  483. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  484. return 0;
  485. }
  486. /*
  487. * It is used to disable TE interrupt
  488. */
  489. void mdfld_disable_te(struct drm_device *dev, int pipe)
  490. {
  491. struct drm_psb_private *dev_priv =
  492. (struct drm_psb_private *) dev->dev_private;
  493. unsigned long irqflags;
  494. if (!dev_priv->dsr_enable)
  495. return;
  496. spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
  497. mid_disable_pipe_event(dev_priv, pipe);
  498. psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
  499. spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
  500. }
  501. /* Called from drm generic code, passed a 'crtc', which
  502. * we use as a pipe index
  503. */
  504. u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  505. {
  506. uint32_t high_frame = PIPEAFRAMEHIGH;
  507. uint32_t low_frame = PIPEAFRAMEPIXEL;
  508. uint32_t pipeconf_reg = PIPEACONF;
  509. uint32_t reg_val = 0;
  510. uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
  511. switch (pipe) {
  512. case 0:
  513. break;
  514. case 1:
  515. high_frame = PIPEBFRAMEHIGH;
  516. low_frame = PIPEBFRAMEPIXEL;
  517. pipeconf_reg = PIPEBCONF;
  518. break;
  519. case 2:
  520. high_frame = PIPECFRAMEHIGH;
  521. low_frame = PIPECFRAMEPIXEL;
  522. pipeconf_reg = PIPECCONF;
  523. break;
  524. default:
  525. dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
  526. return 0;
  527. }
  528. if (!gma_power_begin(dev, false))
  529. return 0;
  530. reg_val = REG_READ(pipeconf_reg);
  531. if (!(reg_val & PIPEACONF_ENABLE)) {
  532. dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
  533. pipe);
  534. goto psb_get_vblank_counter_exit;
  535. }
  536. /*
  537. * High & low register fields aren't synchronized, so make sure
  538. * we get a low value that's stable across two reads of the high
  539. * register.
  540. */
  541. do {
  542. high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  543. PIPE_FRAME_HIGH_SHIFT);
  544. low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
  545. PIPE_FRAME_LOW_SHIFT);
  546. high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
  547. PIPE_FRAME_HIGH_SHIFT);
  548. } while (high1 != high2);
  549. count = (high1 << 8) | low;
  550. psb_get_vblank_counter_exit:
  551. gma_power_end(dev);
  552. return count;
  553. }