nv_accel.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /***************************************************************************\
  2. |* *|
  3. |* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
  4. |* *|
  5. |* NOTICE TO USER: The source code is copyrighted under U.S. and *|
  6. |* international laws. Users and possessors of this source code are *|
  7. |* hereby granted a nonexclusive, royalty-free copyright license to *|
  8. |* use this code in individual and commercial software. *|
  9. |* *|
  10. |* Any use of this source code must include, in the user documenta- *|
  11. |* tion and internal comments to the code, notices to the end user *|
  12. |* as follows: *|
  13. |* *|
  14. |* Copyright 1993-2003 NVIDIA, Corporation. All rights reserved. *|
  15. |* *|
  16. |* NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY *|
  17. |* OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" *|
  18. |* WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA, CORPOR- *|
  19. |* ATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, *|
  20. |* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE- *|
  21. |* MENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL *|
  22. |* NVIDIA, CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT, INCI- *|
  23. |* DENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RE- *|
  24. |* SULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION *|
  25. |* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF *|
  26. |* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. *|
  27. |* *|
  28. |* U.S. Government End Users. This source code is a "commercial *|
  29. |* item," as that term is defined at 48 C.F.R. 2.101 (OCT 1995), *|
  30. |* consisting of "commercial computer software" and "commercial *|
  31. |* computer software documentation," as such terms are used in *|
  32. |* 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Govern- *|
  33. |* ment only as a commercial end item. Consistent with 48 C.F.R. *|
  34. |* 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), *|
  35. |* all U.S. Government End Users acquire the source code with only *|
  36. |* those rights set forth herein. *|
  37. |* *|
  38. \***************************************************************************/
  39. /*
  40. * GPL Licensing Note - According to Mark Vojkovich, author of the Xorg/
  41. * XFree86 'nv' driver, this source code is provided under MIT-style licensing
  42. * where the source code is provided "as is" without warranty of any kind.
  43. * The only usage restriction is for the copyright notices to be retained
  44. * whenever code is used.
  45. *
  46. * Antonino Daplas <adaplas@pol.net> 2005-03-11
  47. */
  48. #include <linux/fb.h>
  49. #include <linux/nmi.h>
  50. #include "nv_type.h"
  51. #include "nv_proto.h"
  52. #include "nv_dma.h"
  53. #include "nv_local.h"
  54. /* There is a HW race condition with videoram command buffers.
  55. You can't jump to the location of your put offset. We write put
  56. at the jump offset + SKIPS dwords with noop padding in between
  57. to solve this problem */
  58. #define SKIPS 8
  59. static const int NVCopyROP[16] = {
  60. 0xCC, /* copy */
  61. 0x55 /* invert */
  62. };
  63. static const int NVCopyROP_PM[16] = {
  64. 0xCA, /* copy */
  65. 0x5A, /* invert */
  66. };
  67. static inline void nvidiafb_safe_mode(struct fb_info *info)
  68. {
  69. struct nvidia_par *par = info->par;
  70. touch_softlockup_watchdog();
  71. info->pixmap.scan_align = 1;
  72. par->lockup = 1;
  73. }
  74. static inline void NVFlush(struct fb_info *info)
  75. {
  76. struct nvidia_par *par = info->par;
  77. int count = 1000000000;
  78. while (--count && READ_GET(par) != par->dmaPut) ;
  79. if (!count) {
  80. printk("nvidiafb: DMA Flush lockup\n");
  81. nvidiafb_safe_mode(info);
  82. }
  83. }
  84. static inline void NVSync(struct fb_info *info)
  85. {
  86. struct nvidia_par *par = info->par;
  87. int count = 1000000000;
  88. while (--count && NV_RD32(par->PGRAPH, 0x0700)) ;
  89. if (!count) {
  90. printk("nvidiafb: DMA Sync lockup\n");
  91. nvidiafb_safe_mode(info);
  92. }
  93. }
  94. static void NVDmaKickoff(struct nvidia_par *par)
  95. {
  96. if (par->dmaCurrent != par->dmaPut) {
  97. par->dmaPut = par->dmaCurrent;
  98. WRITE_PUT(par, par->dmaPut);
  99. }
  100. }
  101. static void NVDmaWait(struct fb_info *info, int size)
  102. {
  103. struct nvidia_par *par = info->par;
  104. int dmaGet;
  105. int count = 1000000000, cnt;
  106. size++;
  107. while (par->dmaFree < size && --count && !par->lockup) {
  108. dmaGet = READ_GET(par);
  109. if (par->dmaPut >= dmaGet) {
  110. par->dmaFree = par->dmaMax - par->dmaCurrent;
  111. if (par->dmaFree < size) {
  112. NVDmaNext(par, 0x20000000);
  113. if (dmaGet <= SKIPS) {
  114. if (par->dmaPut <= SKIPS)
  115. WRITE_PUT(par, SKIPS + 1);
  116. cnt = 1000000000;
  117. do {
  118. dmaGet = READ_GET(par);
  119. } while (--cnt && dmaGet <= SKIPS);
  120. if (!cnt) {
  121. printk("DMA Get lockup\n");
  122. par->lockup = 1;
  123. }
  124. }
  125. WRITE_PUT(par, SKIPS);
  126. par->dmaCurrent = par->dmaPut = SKIPS;
  127. par->dmaFree = dmaGet - (SKIPS + 1);
  128. }
  129. } else
  130. par->dmaFree = dmaGet - par->dmaCurrent - 1;
  131. }
  132. if (!count) {
  133. printk("nvidiafb: DMA Wait Lockup\n");
  134. nvidiafb_safe_mode(info);
  135. }
  136. }
  137. static void NVSetPattern(struct fb_info *info, u32 clr0, u32 clr1,
  138. u32 pat0, u32 pat1)
  139. {
  140. struct nvidia_par *par = info->par;
  141. NVDmaStart(info, par, PATTERN_COLOR_0, 4);
  142. NVDmaNext(par, clr0);
  143. NVDmaNext(par, clr1);
  144. NVDmaNext(par, pat0);
  145. NVDmaNext(par, pat1);
  146. }
  147. static void NVSetRopSolid(struct fb_info *info, u32 rop, u32 planemask)
  148. {
  149. struct nvidia_par *par = info->par;
  150. if (planemask != ~0) {
  151. NVSetPattern(info, 0, planemask, ~0, ~0);
  152. if (par->currentRop != (rop + 32)) {
  153. NVDmaStart(info, par, ROP_SET, 1);
  154. NVDmaNext(par, NVCopyROP_PM[rop]);
  155. par->currentRop = rop + 32;
  156. }
  157. } else if (par->currentRop != rop) {
  158. if (par->currentRop >= 16)
  159. NVSetPattern(info, ~0, ~0, ~0, ~0);
  160. NVDmaStart(info, par, ROP_SET, 1);
  161. NVDmaNext(par, NVCopyROP[rop]);
  162. par->currentRop = rop;
  163. }
  164. }
  165. static void NVSetClippingRectangle(struct fb_info *info, int x1, int y1,
  166. int x2, int y2)
  167. {
  168. struct nvidia_par *par = info->par;
  169. int h = y2 - y1 + 1;
  170. int w = x2 - x1 + 1;
  171. NVDmaStart(info, par, CLIP_POINT, 2);
  172. NVDmaNext(par, (y1 << 16) | x1);
  173. NVDmaNext(par, (h << 16) | w);
  174. }
  175. void NVResetGraphics(struct fb_info *info)
  176. {
  177. struct nvidia_par *par = info->par;
  178. u32 surfaceFormat, patternFormat, rectFormat, lineFormat;
  179. int pitch, i;
  180. pitch = info->fix.line_length;
  181. par->dmaBase = (u32 __iomem *) (&par->FbStart[par->FbUsableSize]);
  182. for (i = 0; i < SKIPS; i++)
  183. NV_WR32(&par->dmaBase[i], 0, 0x00000000);
  184. NV_WR32(&par->dmaBase[0x0 + SKIPS], 0, 0x00040000);
  185. NV_WR32(&par->dmaBase[0x1 + SKIPS], 0, 0x80000010);
  186. NV_WR32(&par->dmaBase[0x2 + SKIPS], 0, 0x00042000);
  187. NV_WR32(&par->dmaBase[0x3 + SKIPS], 0, 0x80000011);
  188. NV_WR32(&par->dmaBase[0x4 + SKIPS], 0, 0x00044000);
  189. NV_WR32(&par->dmaBase[0x5 + SKIPS], 0, 0x80000012);
  190. NV_WR32(&par->dmaBase[0x6 + SKIPS], 0, 0x00046000);
  191. NV_WR32(&par->dmaBase[0x7 + SKIPS], 0, 0x80000013);
  192. NV_WR32(&par->dmaBase[0x8 + SKIPS], 0, 0x00048000);
  193. NV_WR32(&par->dmaBase[0x9 + SKIPS], 0, 0x80000014);
  194. NV_WR32(&par->dmaBase[0xA + SKIPS], 0, 0x0004A000);
  195. NV_WR32(&par->dmaBase[0xB + SKIPS], 0, 0x80000015);
  196. NV_WR32(&par->dmaBase[0xC + SKIPS], 0, 0x0004C000);
  197. NV_WR32(&par->dmaBase[0xD + SKIPS], 0, 0x80000016);
  198. NV_WR32(&par->dmaBase[0xE + SKIPS], 0, 0x0004E000);
  199. NV_WR32(&par->dmaBase[0xF + SKIPS], 0, 0x80000017);
  200. par->dmaPut = 0;
  201. par->dmaCurrent = 16 + SKIPS;
  202. par->dmaMax = 8191;
  203. par->dmaFree = par->dmaMax - par->dmaCurrent;
  204. switch (info->var.bits_per_pixel) {
  205. case 32:
  206. case 24:
  207. surfaceFormat = SURFACE_FORMAT_DEPTH24;
  208. patternFormat = PATTERN_FORMAT_DEPTH24;
  209. rectFormat = RECT_FORMAT_DEPTH24;
  210. lineFormat = LINE_FORMAT_DEPTH24;
  211. break;
  212. case 16:
  213. surfaceFormat = SURFACE_FORMAT_DEPTH16;
  214. patternFormat = PATTERN_FORMAT_DEPTH16;
  215. rectFormat = RECT_FORMAT_DEPTH16;
  216. lineFormat = LINE_FORMAT_DEPTH16;
  217. break;
  218. default:
  219. surfaceFormat = SURFACE_FORMAT_DEPTH8;
  220. patternFormat = PATTERN_FORMAT_DEPTH8;
  221. rectFormat = RECT_FORMAT_DEPTH8;
  222. lineFormat = LINE_FORMAT_DEPTH8;
  223. break;
  224. }
  225. NVDmaStart(info, par, SURFACE_FORMAT, 4);
  226. NVDmaNext(par, surfaceFormat);
  227. NVDmaNext(par, pitch | (pitch << 16));
  228. NVDmaNext(par, 0);
  229. NVDmaNext(par, 0);
  230. NVDmaStart(info, par, PATTERN_FORMAT, 1);
  231. NVDmaNext(par, patternFormat);
  232. NVDmaStart(info, par, RECT_FORMAT, 1);
  233. NVDmaNext(par, rectFormat);
  234. NVDmaStart(info, par, LINE_FORMAT, 1);
  235. NVDmaNext(par, lineFormat);
  236. par->currentRop = ~0; /* set to something invalid */
  237. NVSetRopSolid(info, ROP_COPY, ~0);
  238. NVSetClippingRectangle(info, 0, 0, info->var.xres_virtual,
  239. info->var.yres_virtual);
  240. NVDmaKickoff(par);
  241. }
  242. int nvidiafb_sync(struct fb_info *info)
  243. {
  244. struct nvidia_par *par = info->par;
  245. if (info->state != FBINFO_STATE_RUNNING)
  246. return 0;
  247. if (!par->lockup)
  248. NVFlush(info);
  249. if (!par->lockup)
  250. NVSync(info);
  251. return 0;
  252. }
  253. void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
  254. {
  255. struct nvidia_par *par = info->par;
  256. if (info->state != FBINFO_STATE_RUNNING)
  257. return;
  258. if (par->lockup) {
  259. cfb_copyarea(info, region);
  260. return;
  261. }
  262. NVDmaStart(info, par, BLIT_POINT_SRC, 3);
  263. NVDmaNext(par, (region->sy << 16) | region->sx);
  264. NVDmaNext(par, (region->dy << 16) | region->dx);
  265. NVDmaNext(par, (region->height << 16) | region->width);
  266. NVDmaKickoff(par);
  267. }
  268. void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
  269. {
  270. struct nvidia_par *par = info->par;
  271. u32 color;
  272. if (info->state != FBINFO_STATE_RUNNING)
  273. return;
  274. if (par->lockup) {
  275. cfb_fillrect(info, rect);
  276. return;
  277. }
  278. if (info->var.bits_per_pixel == 8)
  279. color = rect->color;
  280. else
  281. color = ((u32 *) info->pseudo_palette)[rect->color];
  282. if (rect->rop != ROP_COPY)
  283. NVSetRopSolid(info, rect->rop, ~0);
  284. NVDmaStart(info, par, RECT_SOLID_COLOR, 1);
  285. NVDmaNext(par, color);
  286. NVDmaStart(info, par, RECT_SOLID_RECTS(0), 2);
  287. NVDmaNext(par, (rect->dx << 16) | rect->dy);
  288. NVDmaNext(par, (rect->width << 16) | rect->height);
  289. NVDmaKickoff(par);
  290. if (rect->rop != ROP_COPY)
  291. NVSetRopSolid(info, ROP_COPY, ~0);
  292. }
  293. static void nvidiafb_mono_color_expand(struct fb_info *info,
  294. const struct fb_image *image)
  295. {
  296. struct nvidia_par *par = info->par;
  297. u32 fg, bg, mask = ~(~0 >> (32 - info->var.bits_per_pixel));
  298. u32 dsize, width, *data = (u32 *) image->data, tmp;
  299. int j, k = 0;
  300. width = (image->width + 31) & ~31;
  301. dsize = (width * image->height) >> 5;
  302. if (info->var.bits_per_pixel == 8) {
  303. fg = image->fg_color | mask;
  304. bg = image->bg_color | mask;
  305. } else {
  306. fg = ((u32 *) info->pseudo_palette)[image->fg_color] | mask;
  307. bg = ((u32 *) info->pseudo_palette)[image->bg_color] | mask;
  308. }
  309. NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_CLIP, 7);
  310. NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
  311. NVDmaNext(par, ((image->dy + image->height) << 16) |
  312. ((image->dx + image->width) & 0xffff));
  313. NVDmaNext(par, bg);
  314. NVDmaNext(par, fg);
  315. NVDmaNext(par, (image->height << 16) | width);
  316. NVDmaNext(par, (image->height << 16) | width);
  317. NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
  318. while (dsize >= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS) {
  319. NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0),
  320. RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS);
  321. for (j = RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS; j--;) {
  322. tmp = data[k++];
  323. reverse_order(&tmp);
  324. NVDmaNext(par, tmp);
  325. }
  326. dsize -= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS;
  327. }
  328. if (dsize) {
  329. NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0), dsize);
  330. for (j = dsize; j--;) {
  331. tmp = data[k++];
  332. reverse_order(&tmp);
  333. NVDmaNext(par, tmp);
  334. }
  335. }
  336. NVDmaKickoff(par);
  337. }
  338. void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image)
  339. {
  340. struct nvidia_par *par = info->par;
  341. if (info->state != FBINFO_STATE_RUNNING)
  342. return;
  343. if (image->depth == 1 && !par->lockup)
  344. nvidiafb_mono_color_expand(info, image);
  345. else
  346. cfb_imageblit(info, image);
  347. }