ofdrm.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/of_address.h>
  3. #include <linux/pci.h>
  4. #include <linux/platform_device.h>
  5. #include <drm/drm_aperture.h>
  6. #include <drm/drm_atomic.h>
  7. #include <drm/drm_atomic_state_helper.h>
  8. #include <drm/drm_connector.h>
  9. #include <drm/drm_damage_helper.h>
  10. #include <drm/drm_device.h>
  11. #include <drm/drm_drv.h>
  12. #include <drm/drm_fbdev_shmem.h>
  13. #include <drm/drm_format_helper.h>
  14. #include <drm/drm_framebuffer.h>
  15. #include <drm/drm_gem_atomic_helper.h>
  16. #include <drm/drm_gem_framebuffer_helper.h>
  17. #include <drm/drm_gem_shmem_helper.h>
  18. #include <drm/drm_managed.h>
  19. #include <drm/drm_modeset_helper_vtables.h>
  20. #include <drm/drm_probe_helper.h>
  21. #include <drm/drm_simple_kms_helper.h>
  22. #define DRIVER_NAME "ofdrm"
  23. #define DRIVER_DESC "DRM driver for OF platform devices"
  24. #define DRIVER_DATE "20220501"
  25. #define DRIVER_MAJOR 1
  26. #define DRIVER_MINOR 0
  27. #define PCI_VENDOR_ID_ATI_R520 0x7100
  28. #define PCI_VENDOR_ID_ATI_R600 0x9400
  29. #define OFDRM_GAMMA_LUT_SIZE 256
  30. /* Definitions used by the Avivo palette */
  31. #define AVIVO_DC_LUT_RW_SELECT 0x6480
  32. #define AVIVO_DC_LUT_RW_MODE 0x6484
  33. #define AVIVO_DC_LUT_RW_INDEX 0x6488
  34. #define AVIVO_DC_LUT_SEQ_COLOR 0x648c
  35. #define AVIVO_DC_LUT_PWL_DATA 0x6490
  36. #define AVIVO_DC_LUT_30_COLOR 0x6494
  37. #define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
  38. #define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
  39. #define AVIVO_DC_LUT_AUTOFILL 0x64a0
  40. #define AVIVO_DC_LUTA_CONTROL 0x64c0
  41. #define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
  42. #define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
  43. #define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
  44. #define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
  45. #define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
  46. #define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
  47. #define AVIVO_DC_LUTB_CONTROL 0x6cc0
  48. #define AVIVO_DC_LUTB_BLACK_OFFSET_BLUE 0x6cc4
  49. #define AVIVO_DC_LUTB_BLACK_OFFSET_GREEN 0x6cc8
  50. #define AVIVO_DC_LUTB_BLACK_OFFSET_RED 0x6ccc
  51. #define AVIVO_DC_LUTB_WHITE_OFFSET_BLUE 0x6cd0
  52. #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
  53. #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
  54. enum ofdrm_model {
  55. OFDRM_MODEL_UNKNOWN,
  56. OFDRM_MODEL_MACH64, /* ATI Mach64 */
  57. OFDRM_MODEL_RAGE128, /* ATI Rage128 */
  58. OFDRM_MODEL_RAGE_M3A, /* ATI Rage Mobility M3 Head A */
  59. OFDRM_MODEL_RAGE_M3B, /* ATI Rage Mobility M3 Head B */
  60. OFDRM_MODEL_RADEON, /* ATI Radeon */
  61. OFDRM_MODEL_GXT2000, /* IBM GXT2000 */
  62. OFDRM_MODEL_AVIVO, /* ATI R5xx */
  63. OFDRM_MODEL_QEMU, /* QEMU VGA */
  64. };
  65. /*
  66. * Helpers for display nodes
  67. */
  68. static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
  69. {
  70. if (value > INT_MAX) {
  71. drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
  72. return -EINVAL;
  73. }
  74. return (int)value;
  75. }
  76. static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
  77. {
  78. if (!value) {
  79. drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
  80. return -EINVAL;
  81. }
  82. return display_get_validated_int(dev, name, value);
  83. }
  84. static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
  85. u32 depth, bool big_endian)
  86. {
  87. const struct drm_format_info *info;
  88. u32 format;
  89. switch (depth) {
  90. case 8:
  91. format = drm_mode_legacy_fb_format(8, 8);
  92. break;
  93. case 15:
  94. case 16:
  95. format = drm_mode_legacy_fb_format(16, depth);
  96. break;
  97. case 32:
  98. format = drm_mode_legacy_fb_format(32, 24);
  99. break;
  100. default:
  101. drm_err(dev, "unsupported framebuffer depth %u\n", depth);
  102. return ERR_PTR(-EINVAL);
  103. }
  104. /*
  105. * DRM formats assume little-endian byte order. Update the format
  106. * if the scanout buffer uses big-endian ordering.
  107. */
  108. if (big_endian) {
  109. switch (format) {
  110. case DRM_FORMAT_XRGB8888:
  111. format = DRM_FORMAT_BGRX8888;
  112. break;
  113. case DRM_FORMAT_ARGB8888:
  114. format = DRM_FORMAT_BGRA8888;
  115. break;
  116. case DRM_FORMAT_RGB565:
  117. format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN;
  118. break;
  119. case DRM_FORMAT_XRGB1555:
  120. format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN;
  121. break;
  122. default:
  123. break;
  124. }
  125. }
  126. info = drm_format_info(format);
  127. if (!info) {
  128. drm_err(dev, "cannot find framebuffer format for depth %u\n", depth);
  129. return ERR_PTR(-EINVAL);
  130. }
  131. return info;
  132. }
  133. static int display_read_u32_of(struct drm_device *dev, struct device_node *of_node,
  134. const char *name, u32 *value)
  135. {
  136. int ret = of_property_read_u32(of_node, name, value);
  137. if (ret)
  138. drm_err(dev, "cannot parse framebuffer %s: error %d\n", name, ret);
  139. return ret;
  140. }
  141. static bool display_get_big_endian_of(struct drm_device *dev, struct device_node *of_node)
  142. {
  143. bool big_endian;
  144. #ifdef __BIG_ENDIAN
  145. big_endian = !of_property_read_bool(of_node, "little-endian");
  146. #else
  147. big_endian = of_property_read_bool(of_node, "big-endian");
  148. #endif
  149. return big_endian;
  150. }
  151. static int display_get_width_of(struct drm_device *dev, struct device_node *of_node)
  152. {
  153. u32 width;
  154. int ret = display_read_u32_of(dev, of_node, "width", &width);
  155. if (ret)
  156. return ret;
  157. return display_get_validated_int0(dev, "width", width);
  158. }
  159. static int display_get_height_of(struct drm_device *dev, struct device_node *of_node)
  160. {
  161. u32 height;
  162. int ret = display_read_u32_of(dev, of_node, "height", &height);
  163. if (ret)
  164. return ret;
  165. return display_get_validated_int0(dev, "height", height);
  166. }
  167. static int display_get_depth_of(struct drm_device *dev, struct device_node *of_node)
  168. {
  169. u32 depth;
  170. int ret = display_read_u32_of(dev, of_node, "depth", &depth);
  171. if (ret)
  172. return ret;
  173. return display_get_validated_int0(dev, "depth", depth);
  174. }
  175. static int display_get_linebytes_of(struct drm_device *dev, struct device_node *of_node)
  176. {
  177. u32 linebytes;
  178. int ret = display_read_u32_of(dev, of_node, "linebytes", &linebytes);
  179. if (ret)
  180. return ret;
  181. return display_get_validated_int(dev, "linebytes", linebytes);
  182. }
  183. static u64 display_get_address_of(struct drm_device *dev, struct device_node *of_node)
  184. {
  185. u32 address;
  186. int ret;
  187. /*
  188. * Not all devices provide an address property, it's not
  189. * a bug if this fails. The driver will try to find the
  190. * framebuffer base address from the device's memory regions.
  191. */
  192. ret = of_property_read_u32(of_node, "address", &address);
  193. if (ret)
  194. return OF_BAD_ADDR;
  195. return address;
  196. }
  197. static bool is_avivo(u32 vendor, u32 device)
  198. {
  199. /* This will match most R5xx */
  200. return (vendor == PCI_VENDOR_ID_ATI) &&
  201. ((device >= PCI_VENDOR_ID_ATI_R520 && device < 0x7800) ||
  202. (PCI_VENDOR_ID_ATI_R600 >= 0x9400));
  203. }
  204. static enum ofdrm_model display_get_model_of(struct drm_device *dev, struct device_node *of_node)
  205. {
  206. enum ofdrm_model model = OFDRM_MODEL_UNKNOWN;
  207. if (of_node_name_prefix(of_node, "ATY,Rage128")) {
  208. model = OFDRM_MODEL_RAGE128;
  209. } else if (of_node_name_prefix(of_node, "ATY,RageM3pA") ||
  210. of_node_name_prefix(of_node, "ATY,RageM3p12A")) {
  211. model = OFDRM_MODEL_RAGE_M3A;
  212. } else if (of_node_name_prefix(of_node, "ATY,RageM3pB")) {
  213. model = OFDRM_MODEL_RAGE_M3B;
  214. } else if (of_node_name_prefix(of_node, "ATY,Rage6")) {
  215. model = OFDRM_MODEL_RADEON;
  216. } else if (of_node_name_prefix(of_node, "ATY,")) {
  217. return OFDRM_MODEL_MACH64;
  218. } else if (of_device_is_compatible(of_node, "pci1014,b7") ||
  219. of_device_is_compatible(of_node, "pci1014,21c")) {
  220. model = OFDRM_MODEL_GXT2000;
  221. } else if (of_node_name_prefix(of_node, "vga,Display-")) {
  222. struct device_node *of_parent;
  223. const __be32 *vendor_p, *device_p;
  224. /* Look for AVIVO initialized by SLOF */
  225. of_parent = of_get_parent(of_node);
  226. vendor_p = of_get_property(of_parent, "vendor-id", NULL);
  227. device_p = of_get_property(of_parent, "device-id", NULL);
  228. if (vendor_p && device_p) {
  229. u32 vendor = be32_to_cpup(vendor_p);
  230. u32 device = be32_to_cpup(device_p);
  231. if (is_avivo(vendor, device))
  232. model = OFDRM_MODEL_AVIVO;
  233. }
  234. of_node_put(of_parent);
  235. } else if (of_device_is_compatible(of_node, "qemu,std-vga")) {
  236. model = OFDRM_MODEL_QEMU;
  237. }
  238. return model;
  239. }
  240. /*
  241. * Open Firmware display device
  242. */
  243. struct ofdrm_device;
  244. struct ofdrm_device_funcs {
  245. void __iomem *(*cmap_ioremap)(struct ofdrm_device *odev,
  246. struct device_node *of_node,
  247. u64 fb_bas);
  248. void (*cmap_write)(struct ofdrm_device *odev, unsigned char index,
  249. unsigned char r, unsigned char g, unsigned char b);
  250. };
  251. struct ofdrm_device {
  252. struct drm_device dev;
  253. struct platform_device *pdev;
  254. const struct ofdrm_device_funcs *funcs;
  255. /* firmware-buffer settings */
  256. struct iosys_map screen_base;
  257. struct drm_display_mode mode;
  258. const struct drm_format_info *format;
  259. unsigned int pitch;
  260. /* colormap */
  261. void __iomem *cmap_base;
  262. /* modesetting */
  263. uint32_t formats[8];
  264. struct drm_plane primary_plane;
  265. struct drm_crtc crtc;
  266. struct drm_encoder encoder;
  267. struct drm_connector connector;
  268. };
  269. static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
  270. {
  271. return container_of(dev, struct ofdrm_device, dev);
  272. }
  273. /*
  274. * Hardware
  275. */
  276. #if defined(CONFIG_PCI)
  277. static struct pci_dev *display_get_pci_dev_of(struct drm_device *dev, struct device_node *of_node)
  278. {
  279. const __be32 *vendor_p, *device_p;
  280. u32 vendor, device;
  281. struct pci_dev *pcidev;
  282. vendor_p = of_get_property(of_node, "vendor-id", NULL);
  283. if (!vendor_p)
  284. return ERR_PTR(-ENODEV);
  285. vendor = be32_to_cpup(vendor_p);
  286. device_p = of_get_property(of_node, "device-id", NULL);
  287. if (!device_p)
  288. return ERR_PTR(-ENODEV);
  289. device = be32_to_cpup(device_p);
  290. pcidev = pci_get_device(vendor, device, NULL);
  291. if (!pcidev)
  292. return ERR_PTR(-ENODEV);
  293. return pcidev;
  294. }
  295. static void ofdrm_pci_release(void *data)
  296. {
  297. struct pci_dev *pcidev = data;
  298. pci_disable_device(pcidev);
  299. }
  300. static int ofdrm_device_init_pci(struct ofdrm_device *odev)
  301. {
  302. struct drm_device *dev = &odev->dev;
  303. struct platform_device *pdev = to_platform_device(dev->dev);
  304. struct device_node *of_node = pdev->dev.of_node;
  305. struct pci_dev *pcidev;
  306. int ret;
  307. /*
  308. * Never use pcim_ or other managed helpers on the returned PCI
  309. * device. Otherwise, probing the native driver will fail for
  310. * resource conflicts. PCI-device management has to be tied to
  311. * the lifetime of the platform device until the native driver
  312. * takes over.
  313. */
  314. pcidev = display_get_pci_dev_of(dev, of_node);
  315. if (IS_ERR(pcidev))
  316. return 0; /* no PCI device found; ignore the error */
  317. ret = pci_enable_device(pcidev);
  318. if (ret) {
  319. drm_err(dev, "pci_enable_device(%s) failed: %d\n",
  320. dev_name(&pcidev->dev), ret);
  321. return ret;
  322. }
  323. ret = devm_add_action_or_reset(&pdev->dev, ofdrm_pci_release, pcidev);
  324. if (ret)
  325. return ret;
  326. return 0;
  327. }
  328. #else
  329. static int ofdrm_device_init_pci(struct ofdrm_device *odev)
  330. {
  331. return 0;
  332. }
  333. #endif
  334. /*
  335. * OF display settings
  336. */
  337. static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
  338. struct resource *fb_res)
  339. {
  340. struct platform_device *pdev = to_platform_device(odev->dev.dev);
  341. struct resource *res, *max_res = NULL;
  342. u32 i;
  343. for (i = 0; pdev->num_resources; ++i) {
  344. res = platform_get_resource(pdev, IORESOURCE_MEM, i);
  345. if (!res)
  346. break; /* all resources processed */
  347. if (resource_size(res) < resource_size(fb_res))
  348. continue; /* resource too small */
  349. if (fb_res->start && resource_contains(res, fb_res))
  350. return res; /* resource contains framebuffer */
  351. if (!max_res || resource_size(res) > resource_size(max_res))
  352. max_res = res; /* store largest resource as fallback */
  353. }
  354. return max_res;
  355. }
  356. /*
  357. * Colormap / Palette
  358. */
  359. static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
  360. int bar_no, unsigned long offset, unsigned long size)
  361. {
  362. struct drm_device *dev = &odev->dev;
  363. const __be32 *addr_p;
  364. u64 max_size, address;
  365. unsigned int flags;
  366. void __iomem *mem;
  367. addr_p = of_get_pci_address(of_node, bar_no, &max_size, &flags);
  368. if (!addr_p)
  369. addr_p = of_get_address(of_node, bar_no, &max_size, &flags);
  370. if (!addr_p)
  371. return IOMEM_ERR_PTR(-ENODEV);
  372. if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
  373. return IOMEM_ERR_PTR(-ENODEV);
  374. if ((offset + size) >= max_size)
  375. return IOMEM_ERR_PTR(-ENODEV);
  376. address = of_translate_address(of_node, addr_p);
  377. if (address == OF_BAD_ADDR)
  378. return IOMEM_ERR_PTR(-ENODEV);
  379. mem = devm_ioremap(dev->dev, address + offset, size);
  380. if (!mem)
  381. return IOMEM_ERR_PTR(-ENOMEM);
  382. return mem;
  383. }
  384. static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
  385. struct device_node *of_node,
  386. u64 fb_base)
  387. {
  388. struct drm_device *dev = &odev->dev;
  389. u64 address;
  390. void __iomem *cmap_base;
  391. address = fb_base & 0xff000000ul;
  392. address += 0x7ff000;
  393. cmap_base = devm_ioremap(dev->dev, address, 0x1000);
  394. if (!cmap_base)
  395. return IOMEM_ERR_PTR(-ENOMEM);
  396. return cmap_base;
  397. }
  398. static void ofdrm_mach64_cmap_write(struct ofdrm_device *odev, unsigned char index,
  399. unsigned char r, unsigned char g, unsigned char b)
  400. {
  401. void __iomem *addr = odev->cmap_base + 0xcc0;
  402. void __iomem *data = odev->cmap_base + 0xcc0 + 1;
  403. writeb(index, addr);
  404. writeb(r, data);
  405. writeb(g, data);
  406. writeb(b, data);
  407. }
  408. static void __iomem *ofdrm_rage128_cmap_ioremap(struct ofdrm_device *odev,
  409. struct device_node *of_node,
  410. u64 fb_base)
  411. {
  412. return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
  413. }
  414. static void ofdrm_rage128_cmap_write(struct ofdrm_device *odev, unsigned char index,
  415. unsigned char r, unsigned char g, unsigned char b)
  416. {
  417. void __iomem *addr = odev->cmap_base + 0xb0;
  418. void __iomem *data = odev->cmap_base + 0xb4;
  419. u32 color = (r << 16) | (g << 8) | b;
  420. writeb(index, addr);
  421. writel(color, data);
  422. }
  423. static void __iomem *ofdrm_rage_m3a_cmap_ioremap(struct ofdrm_device *odev,
  424. struct device_node *of_node,
  425. u64 fb_base)
  426. {
  427. return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
  428. }
  429. static void ofdrm_rage_m3a_cmap_write(struct ofdrm_device *odev, unsigned char index,
  430. unsigned char r, unsigned char g, unsigned char b)
  431. {
  432. void __iomem *dac_ctl = odev->cmap_base + 0x58;
  433. void __iomem *addr = odev->cmap_base + 0xb0;
  434. void __iomem *data = odev->cmap_base + 0xb4;
  435. u32 color = (r << 16) | (g << 8) | b;
  436. u32 val;
  437. /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
  438. val = readl(dac_ctl);
  439. val &= ~0x20;
  440. writel(val, dac_ctl);
  441. /* Set color at palette index */
  442. writeb(index, addr);
  443. writel(color, data);
  444. }
  445. static void __iomem *ofdrm_rage_m3b_cmap_ioremap(struct ofdrm_device *odev,
  446. struct device_node *of_node,
  447. u64 fb_base)
  448. {
  449. return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
  450. }
  451. static void ofdrm_rage_m3b_cmap_write(struct ofdrm_device *odev, unsigned char index,
  452. unsigned char r, unsigned char g, unsigned char b)
  453. {
  454. void __iomem *dac_ctl = odev->cmap_base + 0x58;
  455. void __iomem *addr = odev->cmap_base + 0xb0;
  456. void __iomem *data = odev->cmap_base + 0xb4;
  457. u32 color = (r << 16) | (g << 8) | b;
  458. u32 val;
  459. /* Set PALETTE_ACCESS_CNTL in DAC_CNTL */
  460. val = readl(dac_ctl);
  461. val |= 0x20;
  462. writel(val, dac_ctl);
  463. /* Set color at palette index */
  464. writeb(index, addr);
  465. writel(color, data);
  466. }
  467. static void __iomem *ofdrm_radeon_cmap_ioremap(struct ofdrm_device *odev,
  468. struct device_node *of_node,
  469. u64 fb_base)
  470. {
  471. return get_cmap_address_of(odev, of_node, 1, 0, 0x1fff);
  472. }
  473. static void __iomem *ofdrm_gxt2000_cmap_ioremap(struct ofdrm_device *odev,
  474. struct device_node *of_node,
  475. u64 fb_base)
  476. {
  477. return get_cmap_address_of(odev, of_node, 0, 0x6000, 0x1000);
  478. }
  479. static void ofdrm_gxt2000_cmap_write(struct ofdrm_device *odev, unsigned char index,
  480. unsigned char r, unsigned char g, unsigned char b)
  481. {
  482. void __iomem *data = ((unsigned int __iomem *)odev->cmap_base) + index;
  483. u32 color = (r << 16) | (g << 8) | b;
  484. writel(color, data);
  485. }
  486. static void __iomem *ofdrm_avivo_cmap_ioremap(struct ofdrm_device *odev,
  487. struct device_node *of_node,
  488. u64 fb_base)
  489. {
  490. struct device_node *of_parent;
  491. void __iomem *cmap_base;
  492. of_parent = of_get_parent(of_node);
  493. cmap_base = get_cmap_address_of(odev, of_parent, 0, 0, 0x10000);
  494. of_node_put(of_parent);
  495. return cmap_base;
  496. }
  497. static void ofdrm_avivo_cmap_write(struct ofdrm_device *odev, unsigned char index,
  498. unsigned char r, unsigned char g, unsigned char b)
  499. {
  500. void __iomem *lutsel = odev->cmap_base + AVIVO_DC_LUT_RW_SELECT;
  501. void __iomem *addr = odev->cmap_base + AVIVO_DC_LUT_RW_INDEX;
  502. void __iomem *data = odev->cmap_base + AVIVO_DC_LUT_30_COLOR;
  503. u32 color = (r << 22) | (g << 12) | (b << 2);
  504. /* Write to both LUTs for now */
  505. writel(1, lutsel);
  506. writeb(index, addr);
  507. writel(color, data);
  508. writel(0, lutsel);
  509. writeb(index, addr);
  510. writel(color, data);
  511. }
  512. static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
  513. struct device_node *of_node,
  514. u64 fb_base)
  515. {
  516. static const __be32 io_of_addr[3] = {
  517. cpu_to_be32(0x01000000),
  518. cpu_to_be32(0x00),
  519. cpu_to_be32(0x00),
  520. };
  521. struct drm_device *dev = &odev->dev;
  522. u64 address;
  523. void __iomem *cmap_base;
  524. address = of_translate_address(of_node, io_of_addr);
  525. if (address == OF_BAD_ADDR)
  526. return IOMEM_ERR_PTR(-ENODEV);
  527. cmap_base = devm_ioremap(dev->dev, address + 0x3c8, 2);
  528. if (!cmap_base)
  529. return IOMEM_ERR_PTR(-ENOMEM);
  530. return cmap_base;
  531. }
  532. static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index,
  533. unsigned char r, unsigned char g, unsigned char b)
  534. {
  535. void __iomem *addr = odev->cmap_base;
  536. void __iomem *data = odev->cmap_base + 1;
  537. writeb(index, addr);
  538. writeb(r, data);
  539. writeb(g, data);
  540. writeb(b, data);
  541. }
  542. static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
  543. const struct drm_format_info *format)
  544. {
  545. struct drm_device *dev = &odev->dev;
  546. int i;
  547. switch (format->format) {
  548. case DRM_FORMAT_RGB565:
  549. case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
  550. /* Use better interpolation, to take 32 values from 0 to 255 */
  551. for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
  552. unsigned char r = i * 8 + i / 4;
  553. unsigned char g = i * 4 + i / 16;
  554. unsigned char b = i * 8 + i / 4;
  555. odev->funcs->cmap_write(odev, i, r, g, b);
  556. }
  557. /* Green has one more bit, so add padding with 0 for red and blue. */
  558. for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
  559. unsigned char r = 0;
  560. unsigned char g = i * 4 + i / 16;
  561. unsigned char b = 0;
  562. odev->funcs->cmap_write(odev, i, r, g, b);
  563. }
  564. break;
  565. case DRM_FORMAT_XRGB8888:
  566. case DRM_FORMAT_BGRX8888:
  567. for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
  568. odev->funcs->cmap_write(odev, i, i, i, i);
  569. break;
  570. default:
  571. drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
  572. &format->format);
  573. break;
  574. }
  575. }
  576. static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
  577. const struct drm_format_info *format,
  578. struct drm_color_lut *lut)
  579. {
  580. struct drm_device *dev = &odev->dev;
  581. int i;
  582. switch (format->format) {
  583. case DRM_FORMAT_RGB565:
  584. case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
  585. /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
  586. for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
  587. unsigned char r = lut[i * 8 + i / 4].red >> 8;
  588. unsigned char g = lut[i * 4 + i / 16].green >> 8;
  589. unsigned char b = lut[i * 8 + i / 4].blue >> 8;
  590. odev->funcs->cmap_write(odev, i, r, g, b);
  591. }
  592. /* Green has one more bit, so add padding with 0 for red and blue. */
  593. for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
  594. unsigned char r = 0;
  595. unsigned char g = lut[i * 4 + i / 16].green >> 8;
  596. unsigned char b = 0;
  597. odev->funcs->cmap_write(odev, i, r, g, b);
  598. }
  599. break;
  600. case DRM_FORMAT_XRGB8888:
  601. case DRM_FORMAT_BGRX8888:
  602. for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
  603. unsigned char r = lut[i].red >> 8;
  604. unsigned char g = lut[i].green >> 8;
  605. unsigned char b = lut[i].blue >> 8;
  606. odev->funcs->cmap_write(odev, i, r, g, b);
  607. }
  608. break;
  609. default:
  610. drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
  611. &format->format);
  612. break;
  613. }
  614. }
  615. /*
  616. * Modesetting
  617. */
  618. struct ofdrm_crtc_state {
  619. struct drm_crtc_state base;
  620. /* Primary-plane format; required for color mgmt. */
  621. const struct drm_format_info *format;
  622. };
  623. static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
  624. {
  625. return container_of(base, struct ofdrm_crtc_state, base);
  626. }
  627. static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
  628. {
  629. __drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
  630. kfree(ofdrm_crtc_state);
  631. }
  632. static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
  633. DRM_FORMAT_MOD_LINEAR,
  634. DRM_FORMAT_MOD_INVALID
  635. };
  636. static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
  637. struct drm_atomic_state *new_state)
  638. {
  639. struct drm_device *dev = plane->dev;
  640. struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
  641. struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
  642. struct drm_shadow_plane_state *new_shadow_plane_state =
  643. to_drm_shadow_plane_state(new_plane_state);
  644. struct drm_framebuffer *new_fb = new_plane_state->fb;
  645. struct drm_crtc *new_crtc = new_plane_state->crtc;
  646. struct drm_crtc_state *new_crtc_state = NULL;
  647. struct ofdrm_crtc_state *new_ofdrm_crtc_state;
  648. int ret;
  649. if (new_crtc)
  650. new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
  651. ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
  652. DRM_PLANE_NO_SCALING,
  653. DRM_PLANE_NO_SCALING,
  654. false, false);
  655. if (ret)
  656. return ret;
  657. else if (!new_plane_state->visible)
  658. return 0;
  659. if (new_fb->format != odev->format) {
  660. void *buf;
  661. /* format conversion necessary; reserve buffer */
  662. buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state,
  663. odev->pitch, GFP_KERNEL);
  664. if (!buf)
  665. return -ENOMEM;
  666. }
  667. new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
  668. new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
  669. new_ofdrm_crtc_state->format = new_fb->format;
  670. return 0;
  671. }
  672. static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
  673. struct drm_atomic_state *state)
  674. {
  675. struct drm_device *dev = plane->dev;
  676. struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
  677. struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
  678. struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
  679. struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
  680. struct drm_framebuffer *fb = plane_state->fb;
  681. unsigned int dst_pitch = odev->pitch;
  682. const struct drm_format_info *dst_format = odev->format;
  683. struct drm_atomic_helper_damage_iter iter;
  684. struct drm_rect damage;
  685. int ret, idx;
  686. ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
  687. if (ret)
  688. return;
  689. if (!drm_dev_enter(dev, &idx))
  690. goto out_drm_gem_fb_end_cpu_access;
  691. drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
  692. drm_atomic_for_each_plane_damage(&iter, &damage) {
  693. struct iosys_map dst = odev->screen_base;
  694. struct drm_rect dst_clip = plane_state->dst;
  695. if (!drm_rect_intersect(&dst_clip, &damage))
  696. continue;
  697. iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
  698. drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
  699. &damage, &shadow_plane_state->fmtcnv_state);
  700. }
  701. drm_dev_exit(idx);
  702. out_drm_gem_fb_end_cpu_access:
  703. drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
  704. }
  705. static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
  706. struct drm_atomic_state *state)
  707. {
  708. struct drm_device *dev = plane->dev;
  709. struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
  710. struct iosys_map dst = odev->screen_base;
  711. struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
  712. void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
  713. unsigned int dst_pitch = odev->pitch;
  714. const struct drm_format_info *dst_format = odev->format;
  715. struct drm_rect dst_clip;
  716. unsigned long lines, linepixels, i;
  717. int idx;
  718. drm_rect_init(&dst_clip,
  719. plane_state->src_x >> 16, plane_state->src_y >> 16,
  720. plane_state->src_w >> 16, plane_state->src_h >> 16);
  721. lines = drm_rect_height(&dst_clip);
  722. linepixels = drm_rect_width(&dst_clip);
  723. if (!drm_dev_enter(dev, &idx))
  724. return;
  725. /* Clear buffer to black if disabled */
  726. dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
  727. for (i = 0; i < lines; ++i) {
  728. memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
  729. dst_vmap += dst_pitch;
  730. }
  731. drm_dev_exit(idx);
  732. }
  733. static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
  734. DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
  735. .atomic_check = ofdrm_primary_plane_helper_atomic_check,
  736. .atomic_update = ofdrm_primary_plane_helper_atomic_update,
  737. .atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
  738. };
  739. static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
  740. .update_plane = drm_atomic_helper_update_plane,
  741. .disable_plane = drm_atomic_helper_disable_plane,
  742. .destroy = drm_plane_cleanup,
  743. DRM_GEM_SHADOW_PLANE_FUNCS,
  744. };
  745. static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
  746. const struct drm_display_mode *mode)
  747. {
  748. struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
  749. return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
  750. }
  751. static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
  752. struct drm_atomic_state *new_state)
  753. {
  754. static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
  755. struct drm_device *dev = crtc->dev;
  756. struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
  757. int ret;
  758. if (!new_crtc_state->enable)
  759. return 0;
  760. ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
  761. if (ret)
  762. return ret;
  763. if (new_crtc_state->color_mgmt_changed) {
  764. struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
  765. if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
  766. drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
  767. return -EINVAL;
  768. }
  769. }
  770. return 0;
  771. }
  772. static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
  773. {
  774. struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
  775. struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  776. struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
  777. if (crtc_state->enable && crtc_state->color_mgmt_changed) {
  778. const struct drm_format_info *format = ofdrm_crtc_state->format;
  779. if (crtc_state->gamma_lut)
  780. ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
  781. else
  782. ofdrm_device_set_gamma_linear(odev, format);
  783. }
  784. }
  785. /*
  786. * The CRTC is always enabled. Screen updates are performed by
  787. * the primary plane's atomic_update function. Disabling clears
  788. * the screen in the primary plane's atomic_disable function.
  789. */
  790. static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
  791. .mode_valid = ofdrm_crtc_helper_mode_valid,
  792. .atomic_check = ofdrm_crtc_helper_atomic_check,
  793. .atomic_flush = ofdrm_crtc_helper_atomic_flush,
  794. };
  795. static void ofdrm_crtc_reset(struct drm_crtc *crtc)
  796. {
  797. struct ofdrm_crtc_state *ofdrm_crtc_state =
  798. kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
  799. if (crtc->state)
  800. ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
  801. if (ofdrm_crtc_state)
  802. __drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
  803. else
  804. __drm_atomic_helper_crtc_reset(crtc, NULL);
  805. }
  806. static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
  807. {
  808. struct drm_device *dev = crtc->dev;
  809. struct drm_crtc_state *crtc_state = crtc->state;
  810. struct ofdrm_crtc_state *new_ofdrm_crtc_state;
  811. struct ofdrm_crtc_state *ofdrm_crtc_state;
  812. if (drm_WARN_ON(dev, !crtc_state))
  813. return NULL;
  814. new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
  815. if (!new_ofdrm_crtc_state)
  816. return NULL;
  817. ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
  818. __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
  819. new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
  820. return &new_ofdrm_crtc_state->base;
  821. }
  822. static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
  823. struct drm_crtc_state *crtc_state)
  824. {
  825. ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
  826. }
  827. static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
  828. .reset = ofdrm_crtc_reset,
  829. .destroy = drm_crtc_cleanup,
  830. .set_config = drm_atomic_helper_set_config,
  831. .page_flip = drm_atomic_helper_page_flip,
  832. .atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
  833. .atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
  834. };
  835. static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
  836. {
  837. struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
  838. return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
  839. }
  840. static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
  841. .get_modes = ofdrm_connector_helper_get_modes,
  842. };
  843. static const struct drm_connector_funcs ofdrm_connector_funcs = {
  844. .reset = drm_atomic_helper_connector_reset,
  845. .fill_modes = drm_helper_probe_single_connector_modes,
  846. .destroy = drm_connector_cleanup,
  847. .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
  848. .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  849. };
  850. static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
  851. .fb_create = drm_gem_fb_create_with_dirty,
  852. .atomic_check = drm_atomic_helper_check,
  853. .atomic_commit = drm_atomic_helper_commit,
  854. };
  855. /*
  856. * Init / Cleanup
  857. */
  858. static const struct ofdrm_device_funcs ofdrm_unknown_device_funcs = {
  859. };
  860. static const struct ofdrm_device_funcs ofdrm_mach64_device_funcs = {
  861. .cmap_ioremap = ofdrm_mach64_cmap_ioremap,
  862. .cmap_write = ofdrm_mach64_cmap_write,
  863. };
  864. static const struct ofdrm_device_funcs ofdrm_rage128_device_funcs = {
  865. .cmap_ioremap = ofdrm_rage128_cmap_ioremap,
  866. .cmap_write = ofdrm_rage128_cmap_write,
  867. };
  868. static const struct ofdrm_device_funcs ofdrm_rage_m3a_device_funcs = {
  869. .cmap_ioremap = ofdrm_rage_m3a_cmap_ioremap,
  870. .cmap_write = ofdrm_rage_m3a_cmap_write,
  871. };
  872. static const struct ofdrm_device_funcs ofdrm_rage_m3b_device_funcs = {
  873. .cmap_ioremap = ofdrm_rage_m3b_cmap_ioremap,
  874. .cmap_write = ofdrm_rage_m3b_cmap_write,
  875. };
  876. static const struct ofdrm_device_funcs ofdrm_radeon_device_funcs = {
  877. .cmap_ioremap = ofdrm_radeon_cmap_ioremap,
  878. .cmap_write = ofdrm_rage128_cmap_write, /* same as Rage128 */
  879. };
  880. static const struct ofdrm_device_funcs ofdrm_gxt2000_device_funcs = {
  881. .cmap_ioremap = ofdrm_gxt2000_cmap_ioremap,
  882. .cmap_write = ofdrm_gxt2000_cmap_write,
  883. };
  884. static const struct ofdrm_device_funcs ofdrm_avivo_device_funcs = {
  885. .cmap_ioremap = ofdrm_avivo_cmap_ioremap,
  886. .cmap_write = ofdrm_avivo_cmap_write,
  887. };
  888. static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
  889. .cmap_ioremap = ofdrm_qemu_cmap_ioremap,
  890. .cmap_write = ofdrm_qemu_cmap_write,
  891. };
  892. static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
  893. {
  894. /*
  895. * Assume a monitor resolution of 96 dpi to
  896. * get a somewhat reasonable screen size.
  897. */
  898. const struct drm_display_mode mode = {
  899. DRM_MODE_INIT(60, width, height,
  900. DRM_MODE_RES_MM(width, 96ul),
  901. DRM_MODE_RES_MM(height, 96ul))
  902. };
  903. return mode;
  904. }
  905. static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
  906. struct platform_device *pdev)
  907. {
  908. struct device_node *of_node = pdev->dev.of_node;
  909. struct ofdrm_device *odev;
  910. struct drm_device *dev;
  911. enum ofdrm_model model;
  912. bool big_endian;
  913. int width, height, depth, linebytes;
  914. const struct drm_format_info *format;
  915. u64 address;
  916. resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
  917. struct resource *res, *mem;
  918. void __iomem *screen_base;
  919. struct drm_plane *primary_plane;
  920. struct drm_crtc *crtc;
  921. struct drm_encoder *encoder;
  922. struct drm_connector *connector;
  923. unsigned long max_width, max_height;
  924. size_t nformats;
  925. int ret;
  926. odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
  927. if (IS_ERR(odev))
  928. return ERR_CAST(odev);
  929. dev = &odev->dev;
  930. platform_set_drvdata(pdev, dev);
  931. ret = ofdrm_device_init_pci(odev);
  932. if (ret)
  933. return ERR_PTR(ret);
  934. /*
  935. * OF display-node settings
  936. */
  937. model = display_get_model_of(dev, of_node);
  938. drm_dbg(dev, "detected model %d\n", model);
  939. switch (model) {
  940. case OFDRM_MODEL_UNKNOWN:
  941. odev->funcs = &ofdrm_unknown_device_funcs;
  942. break;
  943. case OFDRM_MODEL_MACH64:
  944. odev->funcs = &ofdrm_mach64_device_funcs;
  945. break;
  946. case OFDRM_MODEL_RAGE128:
  947. odev->funcs = &ofdrm_rage128_device_funcs;
  948. break;
  949. case OFDRM_MODEL_RAGE_M3A:
  950. odev->funcs = &ofdrm_rage_m3a_device_funcs;
  951. break;
  952. case OFDRM_MODEL_RAGE_M3B:
  953. odev->funcs = &ofdrm_rage_m3b_device_funcs;
  954. break;
  955. case OFDRM_MODEL_RADEON:
  956. odev->funcs = &ofdrm_radeon_device_funcs;
  957. break;
  958. case OFDRM_MODEL_GXT2000:
  959. odev->funcs = &ofdrm_gxt2000_device_funcs;
  960. break;
  961. case OFDRM_MODEL_AVIVO:
  962. odev->funcs = &ofdrm_avivo_device_funcs;
  963. break;
  964. case OFDRM_MODEL_QEMU:
  965. odev->funcs = &ofdrm_qemu_device_funcs;
  966. break;
  967. }
  968. big_endian = display_get_big_endian_of(dev, of_node);
  969. width = display_get_width_of(dev, of_node);
  970. if (width < 0)
  971. return ERR_PTR(width);
  972. height = display_get_height_of(dev, of_node);
  973. if (height < 0)
  974. return ERR_PTR(height);
  975. depth = display_get_depth_of(dev, of_node);
  976. if (depth < 0)
  977. return ERR_PTR(depth);
  978. linebytes = display_get_linebytes_of(dev, of_node);
  979. if (linebytes < 0)
  980. return ERR_PTR(linebytes);
  981. format = display_get_validated_format(dev, depth, big_endian);
  982. if (IS_ERR(format))
  983. return ERR_CAST(format);
  984. if (!linebytes) {
  985. linebytes = drm_format_info_min_pitch(format, 0, width);
  986. if (drm_WARN_ON(dev, !linebytes))
  987. return ERR_PTR(-EINVAL);
  988. }
  989. fb_size = linebytes * height;
  990. /*
  991. * Try to figure out the address of the framebuffer. Unfortunately, Open
  992. * Firmware doesn't provide a standard way to do so. All we can do is a
  993. * dodgy heuristic that happens to work in practice.
  994. *
  995. * On most machines, the "address" property contains what we need, though
  996. * not on Matrox cards found in IBM machines. What appears to give good
  997. * results is to go through the PCI ranges and pick one that encloses the
  998. * "address" property. If none match, we pick the largest.
  999. */
  1000. address = display_get_address_of(dev, of_node);
  1001. if (address != OF_BAD_ADDR) {
  1002. struct resource fb_res = DEFINE_RES_MEM(address, fb_size);
  1003. res = ofdrm_find_fb_resource(odev, &fb_res);
  1004. if (!res)
  1005. return ERR_PTR(-EINVAL);
  1006. if (resource_contains(res, &fb_res))
  1007. fb_base = address;
  1008. else
  1009. fb_base = res->start;
  1010. } else {
  1011. struct resource fb_res = DEFINE_RES_MEM(0u, fb_size);
  1012. res = ofdrm_find_fb_resource(odev, &fb_res);
  1013. if (!res)
  1014. return ERR_PTR(-EINVAL);
  1015. fb_base = res->start;
  1016. }
  1017. /*
  1018. * I/O resources
  1019. */
  1020. fb_pgbase = round_down(fb_base, PAGE_SIZE);
  1021. fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE);
  1022. ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize);
  1023. if (ret) {
  1024. drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret);
  1025. return ERR_PTR(ret);
  1026. }
  1027. mem = devm_request_mem_region(&pdev->dev, fb_pgbase, fb_pgsize, drv->name);
  1028. if (!mem) {
  1029. drm_warn(dev, "could not acquire memory region %pr\n", &res);
  1030. return ERR_PTR(-ENOMEM);
  1031. }
  1032. screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
  1033. if (!screen_base)
  1034. return ERR_PTR(-ENOMEM);
  1035. if (odev->funcs->cmap_ioremap) {
  1036. void __iomem *cmap_base = odev->funcs->cmap_ioremap(odev, of_node, fb_base);
  1037. if (IS_ERR(cmap_base)) {
  1038. /* Don't fail; continue without colormap */
  1039. drm_warn(dev, "could not find colormap: error %ld\n", PTR_ERR(cmap_base));
  1040. } else {
  1041. odev->cmap_base = cmap_base;
  1042. }
  1043. }
  1044. /*
  1045. * Firmware framebuffer
  1046. */
  1047. iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
  1048. odev->mode = ofdrm_mode(width, height);
  1049. odev->format = format;
  1050. odev->pitch = linebytes;
  1051. drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
  1052. drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
  1053. &format->format, width, height, linebytes);
  1054. /*
  1055. * Mode-setting pipeline
  1056. */
  1057. ret = drmm_mode_config_init(dev);
  1058. if (ret)
  1059. return ERR_PTR(ret);
  1060. max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
  1061. max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
  1062. dev->mode_config.min_width = width;
  1063. dev->mode_config.max_width = max_width;
  1064. dev->mode_config.min_height = height;
  1065. dev->mode_config.max_height = max_height;
  1066. dev->mode_config.funcs = &ofdrm_mode_config_funcs;
  1067. dev->mode_config.preferred_depth = format->depth;
  1068. dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
  1069. /* Primary plane */
  1070. nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
  1071. odev->formats, ARRAY_SIZE(odev->formats));
  1072. primary_plane = &odev->primary_plane;
  1073. ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
  1074. odev->formats, nformats,
  1075. ofdrm_primary_plane_format_modifiers,
  1076. DRM_PLANE_TYPE_PRIMARY, NULL);
  1077. if (ret)
  1078. return ERR_PTR(ret);
  1079. drm_plane_helper_add(primary_plane, &ofdrm_primary_plane_helper_funcs);
  1080. drm_plane_enable_fb_damage_clips(primary_plane);
  1081. /* CRTC */
  1082. crtc = &odev->crtc;
  1083. ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
  1084. &ofdrm_crtc_funcs, NULL);
  1085. if (ret)
  1086. return ERR_PTR(ret);
  1087. drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
  1088. if (odev->cmap_base) {
  1089. drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
  1090. drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
  1091. }
  1092. /* Encoder */
  1093. encoder = &odev->encoder;
  1094. ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
  1095. if (ret)
  1096. return ERR_PTR(ret);
  1097. encoder->possible_crtcs = drm_crtc_mask(crtc);
  1098. /* Connector */
  1099. connector = &odev->connector;
  1100. ret = drm_connector_init(dev, connector, &ofdrm_connector_funcs,
  1101. DRM_MODE_CONNECTOR_Unknown);
  1102. if (ret)
  1103. return ERR_PTR(ret);
  1104. drm_connector_helper_add(connector, &ofdrm_connector_helper_funcs);
  1105. drm_connector_set_panel_orientation_with_quirk(connector,
  1106. DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
  1107. width, height);
  1108. ret = drm_connector_attach_encoder(connector, encoder);
  1109. if (ret)
  1110. return ERR_PTR(ret);
  1111. drm_mode_config_reset(dev);
  1112. return odev;
  1113. }
  1114. /*
  1115. * DRM driver
  1116. */
  1117. DEFINE_DRM_GEM_FOPS(ofdrm_fops);
  1118. static struct drm_driver ofdrm_driver = {
  1119. DRM_GEM_SHMEM_DRIVER_OPS,
  1120. .name = DRIVER_NAME,
  1121. .desc = DRIVER_DESC,
  1122. .date = DRIVER_DATE,
  1123. .major = DRIVER_MAJOR,
  1124. .minor = DRIVER_MINOR,
  1125. .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
  1126. .fops = &ofdrm_fops,
  1127. };
  1128. /*
  1129. * Platform driver
  1130. */
  1131. static int ofdrm_probe(struct platform_device *pdev)
  1132. {
  1133. struct ofdrm_device *odev;
  1134. struct drm_device *dev;
  1135. unsigned int color_mode;
  1136. int ret;
  1137. odev = ofdrm_device_create(&ofdrm_driver, pdev);
  1138. if (IS_ERR(odev))
  1139. return PTR_ERR(odev);
  1140. dev = &odev->dev;
  1141. ret = drm_dev_register(dev, 0);
  1142. if (ret)
  1143. return ret;
  1144. color_mode = drm_format_info_bpp(odev->format, 0);
  1145. if (color_mode == 16)
  1146. color_mode = odev->format->depth; // can be 15 or 16
  1147. drm_fbdev_shmem_setup(dev, color_mode);
  1148. return 0;
  1149. }
  1150. static void ofdrm_remove(struct platform_device *pdev)
  1151. {
  1152. struct drm_device *dev = platform_get_drvdata(pdev);
  1153. drm_dev_unplug(dev);
  1154. }
  1155. static const struct of_device_id ofdrm_of_match_display[] = {
  1156. { .compatible = "display", },
  1157. { },
  1158. };
  1159. MODULE_DEVICE_TABLE(of, ofdrm_of_match_display);
  1160. static struct platform_driver ofdrm_platform_driver = {
  1161. .driver = {
  1162. .name = "of-display",
  1163. .of_match_table = ofdrm_of_match_display,
  1164. },
  1165. .probe = ofdrm_probe,
  1166. .remove_new = ofdrm_remove,
  1167. };
  1168. module_platform_driver(ofdrm_platform_driver);
  1169. MODULE_DESCRIPTION(DRIVER_DESC);
  1170. MODULE_LICENSE("GPL");