atom.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Author: Stanislaw Skowronek
  23. */
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <asm/unaligned.h>
  28. #define ATOM_DEBUG
  29. #include "atom.h"
  30. #include "atom-names.h"
  31. #include "atom-bits.h"
  32. #include "radeon.h"
  33. #define ATOM_COND_ABOVE 0
  34. #define ATOM_COND_ABOVEOREQUAL 1
  35. #define ATOM_COND_ALWAYS 2
  36. #define ATOM_COND_BELOW 3
  37. #define ATOM_COND_BELOWOREQUAL 4
  38. #define ATOM_COND_EQUAL 5
  39. #define ATOM_COND_NOTEQUAL 6
  40. #define ATOM_PORT_ATI 0
  41. #define ATOM_PORT_PCI 1
  42. #define ATOM_PORT_SYSIO 2
  43. #define ATOM_UNIT_MICROSEC 0
  44. #define ATOM_UNIT_MILLISEC 1
  45. #define PLL_INDEX 2
  46. #define PLL_DATA 3
  47. typedef struct {
  48. struct atom_context *ctx;
  49. uint32_t *ps, *ws;
  50. int ps_shift;
  51. uint16_t start;
  52. unsigned last_jump;
  53. unsigned long last_jump_jiffies;
  54. bool abort;
  55. } atom_exec_context;
  56. int atom_debug = 0;
  57. static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
  58. int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
  59. static uint32_t atom_arg_mask[8] = {
  60. 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
  61. 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
  62. };
  63. static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
  64. static int atom_dst_to_src[8][4] = {
  65. /* translate destination alignment field to the source alignment encoding */
  66. {0, 0, 0, 0},
  67. {1, 2, 3, 0},
  68. {1, 2, 3, 0},
  69. {1, 2, 3, 0},
  70. {4, 5, 6, 7},
  71. {4, 5, 6, 7},
  72. {4, 5, 6, 7},
  73. {4, 5, 6, 7},
  74. };
  75. static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
  76. static int debug_depth = 0;
  77. #ifdef ATOM_DEBUG
  78. static void debug_print_spaces(int n)
  79. {
  80. while (n--)
  81. printk(" ");
  82. }
  83. #define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
  84. #define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
  85. #else
  86. #define DEBUG(...) do { } while (0)
  87. #define SDEBUG(...) do { } while (0)
  88. #endif
  89. static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
  90. uint32_t index, uint32_t data)
  91. {
  92. struct radeon_device *rdev = ctx->card->dev->dev_private;
  93. uint32_t temp = 0xCDCDCDCD;
  94. while (1)
  95. switch (CU8(base)) {
  96. case ATOM_IIO_NOP:
  97. base++;
  98. break;
  99. case ATOM_IIO_READ:
  100. temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  101. base += 3;
  102. break;
  103. case ATOM_IIO_WRITE:
  104. if (rdev->family == CHIP_RV515)
  105. (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
  106. ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
  107. base += 3;
  108. break;
  109. case ATOM_IIO_CLEAR:
  110. temp &=
  111. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  112. CU8(base + 2));
  113. base += 3;
  114. break;
  115. case ATOM_IIO_SET:
  116. temp |=
  117. (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
  118. 2);
  119. base += 3;
  120. break;
  121. case ATOM_IIO_MOVE_INDEX:
  122. temp &=
  123. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  124. CU8(base + 3));
  125. temp |=
  126. ((index >> CU8(base + 2)) &
  127. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  128. 3);
  129. base += 4;
  130. break;
  131. case ATOM_IIO_MOVE_DATA:
  132. temp &=
  133. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  134. CU8(base + 3));
  135. temp |=
  136. ((data >> CU8(base + 2)) &
  137. (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
  138. 3);
  139. base += 4;
  140. break;
  141. case ATOM_IIO_MOVE_ATTR:
  142. temp &=
  143. ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
  144. CU8(base + 3));
  145. temp |=
  146. ((ctx->
  147. io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
  148. CU8
  149. (base
  150. +
  151. 1))))
  152. << CU8(base + 3);
  153. base += 4;
  154. break;
  155. case ATOM_IIO_END:
  156. return temp;
  157. default:
  158. pr_info("Unknown IIO opcode\n");
  159. return 0;
  160. }
  161. }
  162. static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
  163. int *ptr, uint32_t *saved, int print)
  164. {
  165. uint32_t idx, val = 0xCDCDCDCD, align, arg;
  166. struct atom_context *gctx = ctx->ctx;
  167. arg = attr & 7;
  168. align = (attr >> 3) & 7;
  169. switch (arg) {
  170. case ATOM_ARG_REG:
  171. idx = U16(*ptr);
  172. (*ptr) += 2;
  173. if (print)
  174. DEBUG("REG[0x%04X]", idx);
  175. idx += gctx->reg_block;
  176. switch (gctx->io_mode) {
  177. case ATOM_IO_MM:
  178. val = gctx->card->reg_read(gctx->card, idx);
  179. break;
  180. case ATOM_IO_PCI:
  181. pr_info("PCI registers are not implemented\n");
  182. return 0;
  183. case ATOM_IO_SYSIO:
  184. pr_info("SYSIO registers are not implemented\n");
  185. return 0;
  186. default:
  187. if (!(gctx->io_mode & 0x80)) {
  188. pr_info("Bad IO mode\n");
  189. return 0;
  190. }
  191. if (!gctx->iio[gctx->io_mode & 0x7F]) {
  192. pr_info("Undefined indirect IO read method %d\n",
  193. gctx->io_mode & 0x7F);
  194. return 0;
  195. }
  196. val =
  197. atom_iio_execute(gctx,
  198. gctx->iio[gctx->io_mode & 0x7F],
  199. idx, 0);
  200. }
  201. break;
  202. case ATOM_ARG_PS:
  203. idx = U8(*ptr);
  204. (*ptr)++;
  205. /* get_unaligned_le32 avoids unaligned accesses from atombios
  206. * tables, noticed on a DEC Alpha. */
  207. val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
  208. if (print)
  209. DEBUG("PS[0x%02X,0x%04X]", idx, val);
  210. break;
  211. case ATOM_ARG_WS:
  212. idx = U8(*ptr);
  213. (*ptr)++;
  214. if (print)
  215. DEBUG("WS[0x%02X]", idx);
  216. switch (idx) {
  217. case ATOM_WS_QUOTIENT:
  218. val = gctx->divmul[0];
  219. break;
  220. case ATOM_WS_REMAINDER:
  221. val = gctx->divmul[1];
  222. break;
  223. case ATOM_WS_DATAPTR:
  224. val = gctx->data_block;
  225. break;
  226. case ATOM_WS_SHIFT:
  227. val = gctx->shift;
  228. break;
  229. case ATOM_WS_OR_MASK:
  230. val = 1 << gctx->shift;
  231. break;
  232. case ATOM_WS_AND_MASK:
  233. val = ~(1 << gctx->shift);
  234. break;
  235. case ATOM_WS_FB_WINDOW:
  236. val = gctx->fb_base;
  237. break;
  238. case ATOM_WS_ATTRIBUTES:
  239. val = gctx->io_attr;
  240. break;
  241. case ATOM_WS_REGPTR:
  242. val = gctx->reg_block;
  243. break;
  244. default:
  245. val = ctx->ws[idx];
  246. }
  247. break;
  248. case ATOM_ARG_ID:
  249. idx = U16(*ptr);
  250. (*ptr) += 2;
  251. if (print) {
  252. if (gctx->data_block)
  253. DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
  254. else
  255. DEBUG("ID[0x%04X]", idx);
  256. }
  257. val = U32(idx + gctx->data_block);
  258. break;
  259. case ATOM_ARG_FB:
  260. idx = U8(*ptr);
  261. (*ptr)++;
  262. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  263. DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
  264. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  265. val = 0;
  266. } else
  267. val = gctx->scratch[(gctx->fb_base / 4) + idx];
  268. if (print)
  269. DEBUG("FB[0x%02X]", idx);
  270. break;
  271. case ATOM_ARG_IMM:
  272. switch (align) {
  273. case ATOM_SRC_DWORD:
  274. val = U32(*ptr);
  275. (*ptr) += 4;
  276. if (print)
  277. DEBUG("IMM 0x%08X\n", val);
  278. return val;
  279. case ATOM_SRC_WORD0:
  280. case ATOM_SRC_WORD8:
  281. case ATOM_SRC_WORD16:
  282. val = U16(*ptr);
  283. (*ptr) += 2;
  284. if (print)
  285. DEBUG("IMM 0x%04X\n", val);
  286. return val;
  287. case ATOM_SRC_BYTE0:
  288. case ATOM_SRC_BYTE8:
  289. case ATOM_SRC_BYTE16:
  290. case ATOM_SRC_BYTE24:
  291. val = U8(*ptr);
  292. (*ptr)++;
  293. if (print)
  294. DEBUG("IMM 0x%02X\n", val);
  295. return val;
  296. }
  297. return 0;
  298. case ATOM_ARG_PLL:
  299. idx = U8(*ptr);
  300. (*ptr)++;
  301. if (print)
  302. DEBUG("PLL[0x%02X]", idx);
  303. val = gctx->card->pll_read(gctx->card, idx);
  304. break;
  305. case ATOM_ARG_MC:
  306. idx = U8(*ptr);
  307. (*ptr)++;
  308. if (print)
  309. DEBUG("MC[0x%02X]", idx);
  310. val = gctx->card->mc_read(gctx->card, idx);
  311. break;
  312. }
  313. if (saved)
  314. *saved = val;
  315. val &= atom_arg_mask[align];
  316. val >>= atom_arg_shift[align];
  317. if (print)
  318. switch (align) {
  319. case ATOM_SRC_DWORD:
  320. DEBUG(".[31:0] -> 0x%08X\n", val);
  321. break;
  322. case ATOM_SRC_WORD0:
  323. DEBUG(".[15:0] -> 0x%04X\n", val);
  324. break;
  325. case ATOM_SRC_WORD8:
  326. DEBUG(".[23:8] -> 0x%04X\n", val);
  327. break;
  328. case ATOM_SRC_WORD16:
  329. DEBUG(".[31:16] -> 0x%04X\n", val);
  330. break;
  331. case ATOM_SRC_BYTE0:
  332. DEBUG(".[7:0] -> 0x%02X\n", val);
  333. break;
  334. case ATOM_SRC_BYTE8:
  335. DEBUG(".[15:8] -> 0x%02X\n", val);
  336. break;
  337. case ATOM_SRC_BYTE16:
  338. DEBUG(".[23:16] -> 0x%02X\n", val);
  339. break;
  340. case ATOM_SRC_BYTE24:
  341. DEBUG(".[31:24] -> 0x%02X\n", val);
  342. break;
  343. }
  344. return val;
  345. }
  346. static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
  347. {
  348. uint32_t align = (attr >> 3) & 7, arg = attr & 7;
  349. switch (arg) {
  350. case ATOM_ARG_REG:
  351. case ATOM_ARG_ID:
  352. (*ptr) += 2;
  353. break;
  354. case ATOM_ARG_PLL:
  355. case ATOM_ARG_MC:
  356. case ATOM_ARG_PS:
  357. case ATOM_ARG_WS:
  358. case ATOM_ARG_FB:
  359. (*ptr)++;
  360. break;
  361. case ATOM_ARG_IMM:
  362. switch (align) {
  363. case ATOM_SRC_DWORD:
  364. (*ptr) += 4;
  365. return;
  366. case ATOM_SRC_WORD0:
  367. case ATOM_SRC_WORD8:
  368. case ATOM_SRC_WORD16:
  369. (*ptr) += 2;
  370. return;
  371. case ATOM_SRC_BYTE0:
  372. case ATOM_SRC_BYTE8:
  373. case ATOM_SRC_BYTE16:
  374. case ATOM_SRC_BYTE24:
  375. (*ptr)++;
  376. return;
  377. }
  378. return;
  379. }
  380. }
  381. static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
  382. {
  383. return atom_get_src_int(ctx, attr, ptr, NULL, 1);
  384. }
  385. static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
  386. {
  387. uint32_t val = 0xCDCDCDCD;
  388. switch (align) {
  389. case ATOM_SRC_DWORD:
  390. val = U32(*ptr);
  391. (*ptr) += 4;
  392. break;
  393. case ATOM_SRC_WORD0:
  394. case ATOM_SRC_WORD8:
  395. case ATOM_SRC_WORD16:
  396. val = U16(*ptr);
  397. (*ptr) += 2;
  398. break;
  399. case ATOM_SRC_BYTE0:
  400. case ATOM_SRC_BYTE8:
  401. case ATOM_SRC_BYTE16:
  402. case ATOM_SRC_BYTE24:
  403. val = U8(*ptr);
  404. (*ptr)++;
  405. break;
  406. }
  407. return val;
  408. }
  409. static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  410. int *ptr, uint32_t *saved, int print)
  411. {
  412. return atom_get_src_int(ctx,
  413. arg | atom_dst_to_src[(attr >> 3) &
  414. 7][(attr >> 6) & 3] << 3,
  415. ptr, saved, print);
  416. }
  417. static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
  418. {
  419. atom_skip_src_int(ctx,
  420. arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
  421. 3] << 3, ptr);
  422. }
  423. static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
  424. int *ptr, uint32_t val, uint32_t saved)
  425. {
  426. uint32_t align =
  427. atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
  428. val, idx;
  429. struct atom_context *gctx = ctx->ctx;
  430. old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
  431. val <<= atom_arg_shift[align];
  432. val &= atom_arg_mask[align];
  433. saved &= ~atom_arg_mask[align];
  434. val |= saved;
  435. switch (arg) {
  436. case ATOM_ARG_REG:
  437. idx = U16(*ptr);
  438. (*ptr) += 2;
  439. DEBUG("REG[0x%04X]", idx);
  440. idx += gctx->reg_block;
  441. switch (gctx->io_mode) {
  442. case ATOM_IO_MM:
  443. if (idx == 0)
  444. gctx->card->reg_write(gctx->card, idx,
  445. val << 2);
  446. else
  447. gctx->card->reg_write(gctx->card, idx, val);
  448. break;
  449. case ATOM_IO_PCI:
  450. pr_info("PCI registers are not implemented\n");
  451. return;
  452. case ATOM_IO_SYSIO:
  453. pr_info("SYSIO registers are not implemented\n");
  454. return;
  455. default:
  456. if (!(gctx->io_mode & 0x80)) {
  457. pr_info("Bad IO mode\n");
  458. return;
  459. }
  460. if (!gctx->iio[gctx->io_mode & 0xFF]) {
  461. pr_info("Undefined indirect IO write method %d\n",
  462. gctx->io_mode & 0x7F);
  463. return;
  464. }
  465. atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
  466. idx, val);
  467. }
  468. break;
  469. case ATOM_ARG_PS:
  470. idx = U8(*ptr);
  471. (*ptr)++;
  472. DEBUG("PS[0x%02X]", idx);
  473. ctx->ps[idx] = cpu_to_le32(val);
  474. break;
  475. case ATOM_ARG_WS:
  476. idx = U8(*ptr);
  477. (*ptr)++;
  478. DEBUG("WS[0x%02X]", idx);
  479. switch (idx) {
  480. case ATOM_WS_QUOTIENT:
  481. gctx->divmul[0] = val;
  482. break;
  483. case ATOM_WS_REMAINDER:
  484. gctx->divmul[1] = val;
  485. break;
  486. case ATOM_WS_DATAPTR:
  487. gctx->data_block = val;
  488. break;
  489. case ATOM_WS_SHIFT:
  490. gctx->shift = val;
  491. break;
  492. case ATOM_WS_OR_MASK:
  493. case ATOM_WS_AND_MASK:
  494. break;
  495. case ATOM_WS_FB_WINDOW:
  496. gctx->fb_base = val;
  497. break;
  498. case ATOM_WS_ATTRIBUTES:
  499. gctx->io_attr = val;
  500. break;
  501. case ATOM_WS_REGPTR:
  502. gctx->reg_block = val;
  503. break;
  504. default:
  505. ctx->ws[idx] = val;
  506. }
  507. break;
  508. case ATOM_ARG_FB:
  509. idx = U8(*ptr);
  510. (*ptr)++;
  511. if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
  512. DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
  513. gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
  514. } else
  515. gctx->scratch[(gctx->fb_base / 4) + idx] = val;
  516. DEBUG("FB[0x%02X]", idx);
  517. break;
  518. case ATOM_ARG_PLL:
  519. idx = U8(*ptr);
  520. (*ptr)++;
  521. DEBUG("PLL[0x%02X]", idx);
  522. gctx->card->pll_write(gctx->card, idx, val);
  523. break;
  524. case ATOM_ARG_MC:
  525. idx = U8(*ptr);
  526. (*ptr)++;
  527. DEBUG("MC[0x%02X]", idx);
  528. gctx->card->mc_write(gctx->card, idx, val);
  529. return;
  530. }
  531. switch (align) {
  532. case ATOM_SRC_DWORD:
  533. DEBUG(".[31:0] <- 0x%08X\n", old_val);
  534. break;
  535. case ATOM_SRC_WORD0:
  536. DEBUG(".[15:0] <- 0x%04X\n", old_val);
  537. break;
  538. case ATOM_SRC_WORD8:
  539. DEBUG(".[23:8] <- 0x%04X\n", old_val);
  540. break;
  541. case ATOM_SRC_WORD16:
  542. DEBUG(".[31:16] <- 0x%04X\n", old_val);
  543. break;
  544. case ATOM_SRC_BYTE0:
  545. DEBUG(".[7:0] <- 0x%02X\n", old_val);
  546. break;
  547. case ATOM_SRC_BYTE8:
  548. DEBUG(".[15:8] <- 0x%02X\n", old_val);
  549. break;
  550. case ATOM_SRC_BYTE16:
  551. DEBUG(".[23:16] <- 0x%02X\n", old_val);
  552. break;
  553. case ATOM_SRC_BYTE24:
  554. DEBUG(".[31:24] <- 0x%02X\n", old_val);
  555. break;
  556. }
  557. }
  558. static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
  559. {
  560. uint8_t attr = U8((*ptr)++);
  561. uint32_t dst, src, saved;
  562. int dptr = *ptr;
  563. SDEBUG(" dst: ");
  564. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  565. SDEBUG(" src: ");
  566. src = atom_get_src(ctx, attr, ptr);
  567. dst += src;
  568. SDEBUG(" dst: ");
  569. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  570. }
  571. static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
  572. {
  573. uint8_t attr = U8((*ptr)++);
  574. uint32_t dst, src, saved;
  575. int dptr = *ptr;
  576. SDEBUG(" dst: ");
  577. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  578. SDEBUG(" src: ");
  579. src = atom_get_src(ctx, attr, ptr);
  580. dst &= src;
  581. SDEBUG(" dst: ");
  582. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  583. }
  584. static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
  585. {
  586. printk("ATOM BIOS beeped!\n");
  587. }
  588. static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
  589. {
  590. int idx = U8((*ptr)++);
  591. int r = 0;
  592. if (idx < ATOM_TABLE_NAMES_CNT)
  593. SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
  594. else
  595. SDEBUG(" table: %d\n", idx);
  596. if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
  597. r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
  598. if (r) {
  599. ctx->abort = true;
  600. }
  601. }
  602. static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
  603. {
  604. uint8_t attr = U8((*ptr)++);
  605. uint32_t saved;
  606. int dptr = *ptr;
  607. attr &= 0x38;
  608. attr |= atom_def_dst[attr >> 3] << 6;
  609. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  610. SDEBUG(" dst: ");
  611. atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
  612. }
  613. static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
  614. {
  615. uint8_t attr = U8((*ptr)++);
  616. uint32_t dst, src;
  617. SDEBUG(" src1: ");
  618. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  619. SDEBUG(" src2: ");
  620. src = atom_get_src(ctx, attr, ptr);
  621. ctx->ctx->cs_equal = (dst == src);
  622. ctx->ctx->cs_above = (dst > src);
  623. SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
  624. ctx->ctx->cs_above ? "GT" : "LE");
  625. }
  626. static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
  627. {
  628. unsigned count = U8((*ptr)++);
  629. SDEBUG(" count: %d\n", count);
  630. if (arg == ATOM_UNIT_MICROSEC)
  631. udelay(count);
  632. else if (!drm_can_sleep())
  633. mdelay(count);
  634. else
  635. msleep(count);
  636. }
  637. static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
  638. {
  639. uint8_t attr = U8((*ptr)++);
  640. uint32_t dst, src;
  641. SDEBUG(" src1: ");
  642. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  643. SDEBUG(" src2: ");
  644. src = atom_get_src(ctx, attr, ptr);
  645. if (src != 0) {
  646. ctx->ctx->divmul[0] = dst / src;
  647. ctx->ctx->divmul[1] = dst % src;
  648. } else {
  649. ctx->ctx->divmul[0] = 0;
  650. ctx->ctx->divmul[1] = 0;
  651. }
  652. }
  653. static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
  654. {
  655. /* functionally, a nop */
  656. }
  657. static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
  658. {
  659. int execute = 0, target = U16(*ptr);
  660. unsigned long cjiffies;
  661. (*ptr) += 2;
  662. switch (arg) {
  663. case ATOM_COND_ABOVE:
  664. execute = ctx->ctx->cs_above;
  665. break;
  666. case ATOM_COND_ABOVEOREQUAL:
  667. execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
  668. break;
  669. case ATOM_COND_ALWAYS:
  670. execute = 1;
  671. break;
  672. case ATOM_COND_BELOW:
  673. execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
  674. break;
  675. case ATOM_COND_BELOWOREQUAL:
  676. execute = !ctx->ctx->cs_above;
  677. break;
  678. case ATOM_COND_EQUAL:
  679. execute = ctx->ctx->cs_equal;
  680. break;
  681. case ATOM_COND_NOTEQUAL:
  682. execute = !ctx->ctx->cs_equal;
  683. break;
  684. }
  685. if (arg != ATOM_COND_ALWAYS)
  686. SDEBUG(" taken: %s\n", execute ? "yes" : "no");
  687. SDEBUG(" target: 0x%04X\n", target);
  688. if (execute) {
  689. if (ctx->last_jump == (ctx->start + target)) {
  690. cjiffies = jiffies;
  691. if (time_after(cjiffies, ctx->last_jump_jiffies)) {
  692. cjiffies -= ctx->last_jump_jiffies;
  693. if ((jiffies_to_msecs(cjiffies) > 5000)) {
  694. DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
  695. ctx->abort = true;
  696. }
  697. } else {
  698. /* jiffies wrap around we will just wait a little longer */
  699. ctx->last_jump_jiffies = jiffies;
  700. }
  701. } else {
  702. ctx->last_jump = ctx->start + target;
  703. ctx->last_jump_jiffies = jiffies;
  704. }
  705. *ptr = ctx->start + target;
  706. }
  707. }
  708. static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
  709. {
  710. uint8_t attr = U8((*ptr)++);
  711. uint32_t dst, mask, src, saved;
  712. int dptr = *ptr;
  713. SDEBUG(" dst: ");
  714. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  715. mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
  716. SDEBUG(" mask: 0x%08x", mask);
  717. SDEBUG(" src: ");
  718. src = atom_get_src(ctx, attr, ptr);
  719. dst &= mask;
  720. dst |= src;
  721. SDEBUG(" dst: ");
  722. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  723. }
  724. static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
  725. {
  726. uint8_t attr = U8((*ptr)++);
  727. uint32_t src, saved;
  728. int dptr = *ptr;
  729. if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
  730. atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
  731. else {
  732. atom_skip_dst(ctx, arg, attr, ptr);
  733. saved = 0xCDCDCDCD;
  734. }
  735. SDEBUG(" src: ");
  736. src = atom_get_src(ctx, attr, ptr);
  737. SDEBUG(" dst: ");
  738. atom_put_dst(ctx, arg, attr, &dptr, src, saved);
  739. }
  740. static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
  741. {
  742. uint8_t attr = U8((*ptr)++);
  743. uint32_t dst, src;
  744. SDEBUG(" src1: ");
  745. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  746. SDEBUG(" src2: ");
  747. src = atom_get_src(ctx, attr, ptr);
  748. ctx->ctx->divmul[0] = dst * src;
  749. }
  750. static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
  751. {
  752. /* nothing */
  753. }
  754. static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
  755. {
  756. uint8_t attr = U8((*ptr)++);
  757. uint32_t dst, src, saved;
  758. int dptr = *ptr;
  759. SDEBUG(" dst: ");
  760. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  761. SDEBUG(" src: ");
  762. src = atom_get_src(ctx, attr, ptr);
  763. dst |= src;
  764. SDEBUG(" dst: ");
  765. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  766. }
  767. static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
  768. {
  769. uint8_t val = U8((*ptr)++);
  770. SDEBUG("POST card output: 0x%02X\n", val);
  771. }
  772. static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
  773. {
  774. pr_info("unimplemented!\n");
  775. }
  776. static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
  777. {
  778. pr_info("unimplemented!\n");
  779. }
  780. static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
  781. {
  782. pr_info("unimplemented!\n");
  783. }
  784. static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
  785. {
  786. int idx = U8(*ptr);
  787. (*ptr)++;
  788. SDEBUG(" block: %d\n", idx);
  789. if (!idx)
  790. ctx->ctx->data_block = 0;
  791. else if (idx == 255)
  792. ctx->ctx->data_block = ctx->start;
  793. else
  794. ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
  795. SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
  796. }
  797. static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
  798. {
  799. uint8_t attr = U8((*ptr)++);
  800. SDEBUG(" fb_base: ");
  801. ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
  802. }
  803. static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
  804. {
  805. int port;
  806. switch (arg) {
  807. case ATOM_PORT_ATI:
  808. port = U16(*ptr);
  809. if (port < ATOM_IO_NAMES_CNT)
  810. SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
  811. else
  812. SDEBUG(" port: %d\n", port);
  813. if (!port)
  814. ctx->ctx->io_mode = ATOM_IO_MM;
  815. else
  816. ctx->ctx->io_mode = ATOM_IO_IIO | port;
  817. (*ptr) += 2;
  818. break;
  819. case ATOM_PORT_PCI:
  820. ctx->ctx->io_mode = ATOM_IO_PCI;
  821. (*ptr)++;
  822. break;
  823. case ATOM_PORT_SYSIO:
  824. ctx->ctx->io_mode = ATOM_IO_SYSIO;
  825. (*ptr)++;
  826. break;
  827. }
  828. }
  829. static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
  830. {
  831. ctx->ctx->reg_block = U16(*ptr);
  832. (*ptr) += 2;
  833. SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
  834. }
  835. static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
  836. {
  837. uint8_t attr = U8((*ptr)++), shift;
  838. uint32_t saved, dst;
  839. int dptr = *ptr;
  840. attr &= 0x38;
  841. attr |= atom_def_dst[attr >> 3] << 6;
  842. SDEBUG(" dst: ");
  843. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  844. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  845. SDEBUG(" shift: %d\n", shift);
  846. dst <<= shift;
  847. SDEBUG(" dst: ");
  848. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  849. }
  850. static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
  851. {
  852. uint8_t attr = U8((*ptr)++), shift;
  853. uint32_t saved, dst;
  854. int dptr = *ptr;
  855. attr &= 0x38;
  856. attr |= atom_def_dst[attr >> 3] << 6;
  857. SDEBUG(" dst: ");
  858. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  859. shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
  860. SDEBUG(" shift: %d\n", shift);
  861. dst >>= shift;
  862. SDEBUG(" dst: ");
  863. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  864. }
  865. static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
  866. {
  867. uint8_t attr = U8((*ptr)++), shift;
  868. uint32_t saved, dst;
  869. int dptr = *ptr;
  870. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  871. SDEBUG(" dst: ");
  872. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  873. /* op needs to full dst value */
  874. dst = saved;
  875. shift = atom_get_src(ctx, attr, ptr);
  876. SDEBUG(" shift: %d\n", shift);
  877. dst <<= shift;
  878. dst &= atom_arg_mask[dst_align];
  879. dst >>= atom_arg_shift[dst_align];
  880. SDEBUG(" dst: ");
  881. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  882. }
  883. static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
  884. {
  885. uint8_t attr = U8((*ptr)++), shift;
  886. uint32_t saved, dst;
  887. int dptr = *ptr;
  888. uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
  889. SDEBUG(" dst: ");
  890. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  891. /* op needs to full dst value */
  892. dst = saved;
  893. shift = atom_get_src(ctx, attr, ptr);
  894. SDEBUG(" shift: %d\n", shift);
  895. dst >>= shift;
  896. dst &= atom_arg_mask[dst_align];
  897. dst >>= atom_arg_shift[dst_align];
  898. SDEBUG(" dst: ");
  899. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  900. }
  901. static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
  902. {
  903. uint8_t attr = U8((*ptr)++);
  904. uint32_t dst, src, saved;
  905. int dptr = *ptr;
  906. SDEBUG(" dst: ");
  907. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  908. SDEBUG(" src: ");
  909. src = atom_get_src(ctx, attr, ptr);
  910. dst -= src;
  911. SDEBUG(" dst: ");
  912. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  913. }
  914. static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
  915. {
  916. uint8_t attr = U8((*ptr)++);
  917. uint32_t src, val, target;
  918. SDEBUG(" switch: ");
  919. src = atom_get_src(ctx, attr, ptr);
  920. while (U16(*ptr) != ATOM_CASE_END)
  921. if (U8(*ptr) == ATOM_CASE_MAGIC) {
  922. (*ptr)++;
  923. SDEBUG(" case: ");
  924. val =
  925. atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
  926. ptr);
  927. target = U16(*ptr);
  928. if (val == src) {
  929. SDEBUG(" target: %04X\n", target);
  930. *ptr = ctx->start + target;
  931. return;
  932. }
  933. (*ptr) += 2;
  934. } else {
  935. pr_info("Bad case\n");
  936. return;
  937. }
  938. (*ptr) += 2;
  939. }
  940. static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
  941. {
  942. uint8_t attr = U8((*ptr)++);
  943. uint32_t dst, src;
  944. SDEBUG(" src1: ");
  945. dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
  946. SDEBUG(" src2: ");
  947. src = atom_get_src(ctx, attr, ptr);
  948. ctx->ctx->cs_equal = ((dst & src) == 0);
  949. SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
  950. }
  951. static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
  952. {
  953. uint8_t attr = U8((*ptr)++);
  954. uint32_t dst, src, saved;
  955. int dptr = *ptr;
  956. SDEBUG(" dst: ");
  957. dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
  958. SDEBUG(" src: ");
  959. src = atom_get_src(ctx, attr, ptr);
  960. dst ^= src;
  961. SDEBUG(" dst: ");
  962. atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
  963. }
  964. static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
  965. {
  966. pr_info("unimplemented!\n");
  967. }
  968. static struct {
  969. void (*func) (atom_exec_context *, int *, int);
  970. int arg;
  971. } opcode_table[ATOM_OP_CNT] = {
  972. {
  973. NULL, 0}, {
  974. atom_op_move, ATOM_ARG_REG}, {
  975. atom_op_move, ATOM_ARG_PS}, {
  976. atom_op_move, ATOM_ARG_WS}, {
  977. atom_op_move, ATOM_ARG_FB}, {
  978. atom_op_move, ATOM_ARG_PLL}, {
  979. atom_op_move, ATOM_ARG_MC}, {
  980. atom_op_and, ATOM_ARG_REG}, {
  981. atom_op_and, ATOM_ARG_PS}, {
  982. atom_op_and, ATOM_ARG_WS}, {
  983. atom_op_and, ATOM_ARG_FB}, {
  984. atom_op_and, ATOM_ARG_PLL}, {
  985. atom_op_and, ATOM_ARG_MC}, {
  986. atom_op_or, ATOM_ARG_REG}, {
  987. atom_op_or, ATOM_ARG_PS}, {
  988. atom_op_or, ATOM_ARG_WS}, {
  989. atom_op_or, ATOM_ARG_FB}, {
  990. atom_op_or, ATOM_ARG_PLL}, {
  991. atom_op_or, ATOM_ARG_MC}, {
  992. atom_op_shift_left, ATOM_ARG_REG}, {
  993. atom_op_shift_left, ATOM_ARG_PS}, {
  994. atom_op_shift_left, ATOM_ARG_WS}, {
  995. atom_op_shift_left, ATOM_ARG_FB}, {
  996. atom_op_shift_left, ATOM_ARG_PLL}, {
  997. atom_op_shift_left, ATOM_ARG_MC}, {
  998. atom_op_shift_right, ATOM_ARG_REG}, {
  999. atom_op_shift_right, ATOM_ARG_PS}, {
  1000. atom_op_shift_right, ATOM_ARG_WS}, {
  1001. atom_op_shift_right, ATOM_ARG_FB}, {
  1002. atom_op_shift_right, ATOM_ARG_PLL}, {
  1003. atom_op_shift_right, ATOM_ARG_MC}, {
  1004. atom_op_mul, ATOM_ARG_REG}, {
  1005. atom_op_mul, ATOM_ARG_PS}, {
  1006. atom_op_mul, ATOM_ARG_WS}, {
  1007. atom_op_mul, ATOM_ARG_FB}, {
  1008. atom_op_mul, ATOM_ARG_PLL}, {
  1009. atom_op_mul, ATOM_ARG_MC}, {
  1010. atom_op_div, ATOM_ARG_REG}, {
  1011. atom_op_div, ATOM_ARG_PS}, {
  1012. atom_op_div, ATOM_ARG_WS}, {
  1013. atom_op_div, ATOM_ARG_FB}, {
  1014. atom_op_div, ATOM_ARG_PLL}, {
  1015. atom_op_div, ATOM_ARG_MC}, {
  1016. atom_op_add, ATOM_ARG_REG}, {
  1017. atom_op_add, ATOM_ARG_PS}, {
  1018. atom_op_add, ATOM_ARG_WS}, {
  1019. atom_op_add, ATOM_ARG_FB}, {
  1020. atom_op_add, ATOM_ARG_PLL}, {
  1021. atom_op_add, ATOM_ARG_MC}, {
  1022. atom_op_sub, ATOM_ARG_REG}, {
  1023. atom_op_sub, ATOM_ARG_PS}, {
  1024. atom_op_sub, ATOM_ARG_WS}, {
  1025. atom_op_sub, ATOM_ARG_FB}, {
  1026. atom_op_sub, ATOM_ARG_PLL}, {
  1027. atom_op_sub, ATOM_ARG_MC}, {
  1028. atom_op_setport, ATOM_PORT_ATI}, {
  1029. atom_op_setport, ATOM_PORT_PCI}, {
  1030. atom_op_setport, ATOM_PORT_SYSIO}, {
  1031. atom_op_setregblock, 0}, {
  1032. atom_op_setfbbase, 0}, {
  1033. atom_op_compare, ATOM_ARG_REG}, {
  1034. atom_op_compare, ATOM_ARG_PS}, {
  1035. atom_op_compare, ATOM_ARG_WS}, {
  1036. atom_op_compare, ATOM_ARG_FB}, {
  1037. atom_op_compare, ATOM_ARG_PLL}, {
  1038. atom_op_compare, ATOM_ARG_MC}, {
  1039. atom_op_switch, 0}, {
  1040. atom_op_jump, ATOM_COND_ALWAYS}, {
  1041. atom_op_jump, ATOM_COND_EQUAL}, {
  1042. atom_op_jump, ATOM_COND_BELOW}, {
  1043. atom_op_jump, ATOM_COND_ABOVE}, {
  1044. atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
  1045. atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
  1046. atom_op_jump, ATOM_COND_NOTEQUAL}, {
  1047. atom_op_test, ATOM_ARG_REG}, {
  1048. atom_op_test, ATOM_ARG_PS}, {
  1049. atom_op_test, ATOM_ARG_WS}, {
  1050. atom_op_test, ATOM_ARG_FB}, {
  1051. atom_op_test, ATOM_ARG_PLL}, {
  1052. atom_op_test, ATOM_ARG_MC}, {
  1053. atom_op_delay, ATOM_UNIT_MILLISEC}, {
  1054. atom_op_delay, ATOM_UNIT_MICROSEC}, {
  1055. atom_op_calltable, 0}, {
  1056. atom_op_repeat, 0}, {
  1057. atom_op_clear, ATOM_ARG_REG}, {
  1058. atom_op_clear, ATOM_ARG_PS}, {
  1059. atom_op_clear, ATOM_ARG_WS}, {
  1060. atom_op_clear, ATOM_ARG_FB}, {
  1061. atom_op_clear, ATOM_ARG_PLL}, {
  1062. atom_op_clear, ATOM_ARG_MC}, {
  1063. atom_op_nop, 0}, {
  1064. atom_op_eot, 0}, {
  1065. atom_op_mask, ATOM_ARG_REG}, {
  1066. atom_op_mask, ATOM_ARG_PS}, {
  1067. atom_op_mask, ATOM_ARG_WS}, {
  1068. atom_op_mask, ATOM_ARG_FB}, {
  1069. atom_op_mask, ATOM_ARG_PLL}, {
  1070. atom_op_mask, ATOM_ARG_MC}, {
  1071. atom_op_postcard, 0}, {
  1072. atom_op_beep, 0}, {
  1073. atom_op_savereg, 0}, {
  1074. atom_op_restorereg, 0}, {
  1075. atom_op_setdatablock, 0}, {
  1076. atom_op_xor, ATOM_ARG_REG}, {
  1077. atom_op_xor, ATOM_ARG_PS}, {
  1078. atom_op_xor, ATOM_ARG_WS}, {
  1079. atom_op_xor, ATOM_ARG_FB}, {
  1080. atom_op_xor, ATOM_ARG_PLL}, {
  1081. atom_op_xor, ATOM_ARG_MC}, {
  1082. atom_op_shl, ATOM_ARG_REG}, {
  1083. atom_op_shl, ATOM_ARG_PS}, {
  1084. atom_op_shl, ATOM_ARG_WS}, {
  1085. atom_op_shl, ATOM_ARG_FB}, {
  1086. atom_op_shl, ATOM_ARG_PLL}, {
  1087. atom_op_shl, ATOM_ARG_MC}, {
  1088. atom_op_shr, ATOM_ARG_REG}, {
  1089. atom_op_shr, ATOM_ARG_PS}, {
  1090. atom_op_shr, ATOM_ARG_WS}, {
  1091. atom_op_shr, ATOM_ARG_FB}, {
  1092. atom_op_shr, ATOM_ARG_PLL}, {
  1093. atom_op_shr, ATOM_ARG_MC}, {
  1094. atom_op_debug, 0},};
  1095. static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
  1096. {
  1097. int base = CU16(ctx->cmd_table + 4 + 2 * index);
  1098. int len, ws, ps, ptr;
  1099. unsigned char op;
  1100. atom_exec_context ectx;
  1101. int ret = 0;
  1102. if (!base)
  1103. return -EINVAL;
  1104. len = CU16(base + ATOM_CT_SIZE_PTR);
  1105. ws = CU8(base + ATOM_CT_WS_PTR);
  1106. ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
  1107. ptr = base + ATOM_CT_CODE_PTR;
  1108. SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
  1109. ectx.ctx = ctx;
  1110. ectx.ps_shift = ps / 4;
  1111. ectx.start = base;
  1112. ectx.ps = params;
  1113. ectx.abort = false;
  1114. ectx.last_jump = 0;
  1115. if (ws)
  1116. ectx.ws = kcalloc(4, ws, GFP_KERNEL);
  1117. else
  1118. ectx.ws = NULL;
  1119. debug_depth++;
  1120. while (1) {
  1121. op = CU8(ptr++);
  1122. if (op < ATOM_OP_NAMES_CNT)
  1123. SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
  1124. else
  1125. SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
  1126. if (ectx.abort) {
  1127. DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
  1128. base, len, ws, ps, ptr - 1);
  1129. ret = -EINVAL;
  1130. goto free;
  1131. }
  1132. if (op < ATOM_OP_CNT && op > 0)
  1133. opcode_table[op].func(&ectx, &ptr,
  1134. opcode_table[op].arg);
  1135. else
  1136. break;
  1137. if (op == ATOM_OP_EOT)
  1138. break;
  1139. }
  1140. debug_depth--;
  1141. SDEBUG("<<\n");
  1142. free:
  1143. if (ws)
  1144. kfree(ectx.ws);
  1145. return ret;
  1146. }
  1147. int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
  1148. {
  1149. int r;
  1150. mutex_lock(&ctx->mutex);
  1151. /* reset data block */
  1152. ctx->data_block = 0;
  1153. /* reset reg block */
  1154. ctx->reg_block = 0;
  1155. /* reset fb window */
  1156. ctx->fb_base = 0;
  1157. /* reset io mode */
  1158. ctx->io_mode = ATOM_IO_MM;
  1159. /* reset divmul */
  1160. ctx->divmul[0] = 0;
  1161. ctx->divmul[1] = 0;
  1162. r = atom_execute_table_locked(ctx, index, params);
  1163. mutex_unlock(&ctx->mutex);
  1164. return r;
  1165. }
  1166. int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
  1167. {
  1168. int r;
  1169. mutex_lock(&ctx->scratch_mutex);
  1170. r = atom_execute_table_scratch_unlocked(ctx, index, params);
  1171. mutex_unlock(&ctx->scratch_mutex);
  1172. return r;
  1173. }
  1174. static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
  1175. static void atom_index_iio(struct atom_context *ctx, int base)
  1176. {
  1177. ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
  1178. if (!ctx->iio)
  1179. return;
  1180. while (CU8(base) == ATOM_IIO_START) {
  1181. ctx->iio[CU8(base + 1)] = base + 2;
  1182. base += 2;
  1183. while (CU8(base) != ATOM_IIO_END)
  1184. base += atom_iio_len[CU8(base)];
  1185. base += 3;
  1186. }
  1187. }
  1188. struct atom_context *atom_parse(struct card_info *card, void *bios)
  1189. {
  1190. int base;
  1191. struct atom_context *ctx =
  1192. kzalloc(sizeof(struct atom_context), GFP_KERNEL);
  1193. char *str;
  1194. char name[512];
  1195. int i;
  1196. if (!ctx)
  1197. return NULL;
  1198. ctx->card = card;
  1199. ctx->bios = bios;
  1200. if (CU16(0) != ATOM_BIOS_MAGIC) {
  1201. pr_info("Invalid BIOS magic\n");
  1202. kfree(ctx);
  1203. return NULL;
  1204. }
  1205. if (strncmp
  1206. (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
  1207. strlen(ATOM_ATI_MAGIC))) {
  1208. pr_info("Invalid ATI magic\n");
  1209. kfree(ctx);
  1210. return NULL;
  1211. }
  1212. base = CU16(ATOM_ROM_TABLE_PTR);
  1213. if (strncmp
  1214. (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
  1215. strlen(ATOM_ROM_MAGIC))) {
  1216. pr_info("Invalid ATOM magic\n");
  1217. kfree(ctx);
  1218. return NULL;
  1219. }
  1220. ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
  1221. ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
  1222. atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
  1223. if (!ctx->iio) {
  1224. atom_destroy(ctx);
  1225. return NULL;
  1226. }
  1227. str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
  1228. while (*str && ((*str == '\n') || (*str == '\r')))
  1229. str++;
  1230. /* name string isn't always 0 terminated */
  1231. for (i = 0; i < 511; i++) {
  1232. name[i] = str[i];
  1233. if (name[i] < '.' || name[i] > 'z') {
  1234. name[i] = 0;
  1235. break;
  1236. }
  1237. }
  1238. pr_info("ATOM BIOS: %s\n", name);
  1239. return ctx;
  1240. }
  1241. int atom_asic_init(struct atom_context *ctx)
  1242. {
  1243. struct radeon_device *rdev = ctx->card->dev->dev_private;
  1244. int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
  1245. uint32_t ps[16];
  1246. int ret;
  1247. memset(ps, 0, 64);
  1248. ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
  1249. ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
  1250. if (!ps[0] || !ps[1])
  1251. return 1;
  1252. if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
  1253. return 1;
  1254. ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
  1255. if (ret)
  1256. return ret;
  1257. memset(ps, 0, 64);
  1258. if (rdev->family < CHIP_R600) {
  1259. if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
  1260. atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
  1261. }
  1262. return ret;
  1263. }
  1264. void atom_destroy(struct atom_context *ctx)
  1265. {
  1266. kfree(ctx->iio);
  1267. kfree(ctx);
  1268. }
  1269. bool atom_parse_data_header(struct atom_context *ctx, int index,
  1270. uint16_t * size, uint8_t * frev, uint8_t * crev,
  1271. uint16_t * data_start)
  1272. {
  1273. int offset = index * 2 + 4;
  1274. int idx = CU16(ctx->data_table + offset);
  1275. u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
  1276. if (!mdt[index])
  1277. return false;
  1278. if (size)
  1279. *size = CU16(idx);
  1280. if (frev)
  1281. *frev = CU8(idx + 2);
  1282. if (crev)
  1283. *crev = CU8(idx + 3);
  1284. *data_start = idx;
  1285. return true;
  1286. }
  1287. bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
  1288. uint8_t * crev)
  1289. {
  1290. int offset = index * 2 + 4;
  1291. int idx = CU16(ctx->cmd_table + offset);
  1292. u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
  1293. if (!mct[index])
  1294. return false;
  1295. if (frev)
  1296. *frev = CU8(idx + 2);
  1297. if (crev)
  1298. *crev = CU8(idx + 3);
  1299. return true;
  1300. }
  1301. int atom_allocate_fb_scratch(struct atom_context *ctx)
  1302. {
  1303. int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
  1304. uint16_t data_offset;
  1305. int usage_bytes = 0;
  1306. struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
  1307. if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
  1308. firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
  1309. DRM_DEBUG("atom firmware requested %08x %dkb\n",
  1310. le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
  1311. le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
  1312. usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
  1313. }
  1314. ctx->scratch_size_bytes = 0;
  1315. if (usage_bytes == 0)
  1316. usage_bytes = 20 * 1024;
  1317. /* allocate some scratch memory */
  1318. ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
  1319. if (!ctx->scratch)
  1320. return -ENOMEM;
  1321. ctx->scratch_size_bytes = usage_bytes;
  1322. return 0;
  1323. }