via-core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /*
  2. * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
  3. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
  4. * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
  5. */
  6. /*
  7. * Core code for the Via multifunction framebuffer device.
  8. */
  9. #include <linux/via-core.h>
  10. #include <linux/via_i2c.h>
  11. #include <linux/via-gpio.h>
  12. #include "global.h"
  13. #include <linux/module.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/list.h>
  17. #include <linux/pm.h>
  18. /*
  19. * The default port config.
  20. */
  21. static struct via_port_cfg adap_configs[] = {
  22. [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 },
  23. [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
  24. [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  25. [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
  26. [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  27. { 0, 0, 0, 0 }
  28. };
  29. /*
  30. * The OLPC XO-1.5 puts the camera power and reset lines onto
  31. * GPIO 2C.
  32. */
  33. static struct via_port_cfg olpc_adap_configs[] = {
  34. [VIA_PORT_26] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x26 },
  35. [VIA_PORT_31] = { VIA_PORT_I2C, VIA_MODE_I2C, VIASR, 0x31 },
  36. [VIA_PORT_25] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  37. [VIA_PORT_2C] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x2c },
  38. [VIA_PORT_3D] = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  39. { 0, 0, 0, 0 }
  40. };
  41. /*
  42. * We currently only support one viafb device (will there ever be
  43. * more than one?), so just declare it globally here.
  44. */
  45. static struct viafb_dev global_dev;
  46. /*
  47. * Basic register access; spinlock required.
  48. */
  49. static inline void viafb_mmio_write(int reg, u32 v)
  50. {
  51. iowrite32(v, global_dev.engine_mmio + reg);
  52. }
  53. static inline int viafb_mmio_read(int reg)
  54. {
  55. return ioread32(global_dev.engine_mmio + reg);
  56. }
  57. /* ---------------------------------------------------------------------- */
  58. /*
  59. * Interrupt management. We have a single IRQ line for a lot of
  60. * different functions, so we need to share it. The design here
  61. * is that we don't want to reimplement the shared IRQ code here;
  62. * we also want to avoid having contention for a single handler thread.
  63. * So each subdev driver which needs interrupts just requests
  64. * them directly from the kernel. We just have what's needed for
  65. * overall access to the interrupt control register.
  66. */
  67. /*
  68. * Which interrupts are enabled now?
  69. */
  70. static u32 viafb_enabled_ints;
  71. static void viafb_int_init(void)
  72. {
  73. viafb_enabled_ints = 0;
  74. viafb_mmio_write(VDE_INTERRUPT, 0);
  75. }
  76. /*
  77. * Allow subdevs to ask for specific interrupts to be enabled. These
  78. * functions must be called with reg_lock held
  79. */
  80. void viafb_irq_enable(u32 mask)
  81. {
  82. viafb_enabled_ints |= mask;
  83. viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
  84. }
  85. EXPORT_SYMBOL_GPL(viafb_irq_enable);
  86. void viafb_irq_disable(u32 mask)
  87. {
  88. viafb_enabled_ints &= ~mask;
  89. if (viafb_enabled_ints == 0)
  90. viafb_mmio_write(VDE_INTERRUPT, 0); /* Disable entirely */
  91. else
  92. viafb_mmio_write(VDE_INTERRUPT,
  93. viafb_enabled_ints | VDE_I_ENABLE);
  94. }
  95. EXPORT_SYMBOL_GPL(viafb_irq_disable);
  96. /* ---------------------------------------------------------------------- */
  97. /*
  98. * Currently, the camera driver is the only user of the DMA code, so we
  99. * only compile it in if the camera driver is being built. Chances are,
  100. * most viafb systems will not need to have this extra code for a while.
  101. * As soon as another user comes long, the ifdef can be removed.
  102. */
  103. #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
  104. /*
  105. * Access to the DMA engine. This currently provides what the camera
  106. * driver needs (i.e. outgoing only) but is easily expandable if need
  107. * be.
  108. */
  109. /*
  110. * There are four DMA channels in the vx855. For now, we only
  111. * use one of them, though. Most of the time, the DMA channel
  112. * will be idle, so we keep the IRQ handler unregistered except
  113. * when some subsystem has indicated an interest.
  114. */
  115. static int viafb_dma_users;
  116. static DECLARE_COMPLETION(viafb_dma_completion);
  117. /*
  118. * This mutex protects viafb_dma_users and our global interrupt
  119. * registration state; it also serializes access to the DMA
  120. * engine.
  121. */
  122. static DEFINE_MUTEX(viafb_dma_lock);
  123. /*
  124. * The VX855 DMA descriptor (used for s/g transfers) looks
  125. * like this.
  126. */
  127. struct viafb_vx855_dma_descr {
  128. u32 addr_low; /* Low part of phys addr */
  129. u32 addr_high; /* High 12 bits of addr */
  130. u32 fb_offset; /* Offset into FB memory */
  131. u32 seg_size; /* Size, 16-byte units */
  132. u32 tile_mode; /* "tile mode" setting */
  133. u32 next_desc_low; /* Next descriptor addr */
  134. u32 next_desc_high;
  135. u32 pad; /* Fill out to 64 bytes */
  136. };
  137. /*
  138. * Flags added to the "next descriptor low" pointers
  139. */
  140. #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */
  141. #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
  142. /*
  143. * The completion IRQ handler.
  144. */
  145. static irqreturn_t viafb_dma_irq(int irq, void *data)
  146. {
  147. int csr;
  148. irqreturn_t ret = IRQ_NONE;
  149. spin_lock(&global_dev.reg_lock);
  150. csr = viafb_mmio_read(VDMA_CSR0);
  151. if (csr & VDMA_C_DONE) {
  152. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  153. complete(&viafb_dma_completion);
  154. ret = IRQ_HANDLED;
  155. }
  156. spin_unlock(&global_dev.reg_lock);
  157. return ret;
  158. }
  159. /*
  160. * Indicate a need for DMA functionality.
  161. */
  162. int viafb_request_dma(void)
  163. {
  164. int ret = 0;
  165. /*
  166. * Only VX855 is supported currently.
  167. */
  168. if (global_dev.chip_type != UNICHROME_VX855)
  169. return -ENODEV;
  170. /*
  171. * Note the new user and set up our interrupt handler
  172. * if need be.
  173. */
  174. mutex_lock(&viafb_dma_lock);
  175. viafb_dma_users++;
  176. if (viafb_dma_users == 1) {
  177. ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
  178. IRQF_SHARED, "via-dma", &viafb_dma_users);
  179. if (ret)
  180. viafb_dma_users--;
  181. else
  182. viafb_irq_enable(VDE_I_DMA0TDEN);
  183. }
  184. mutex_unlock(&viafb_dma_lock);
  185. return ret;
  186. }
  187. EXPORT_SYMBOL_GPL(viafb_request_dma);
  188. void viafb_release_dma(void)
  189. {
  190. mutex_lock(&viafb_dma_lock);
  191. viafb_dma_users--;
  192. if (viafb_dma_users == 0) {
  193. viafb_irq_disable(VDE_I_DMA0TDEN);
  194. free_irq(global_dev.pdev->irq, &viafb_dma_users);
  195. }
  196. mutex_unlock(&viafb_dma_lock);
  197. }
  198. EXPORT_SYMBOL_GPL(viafb_release_dma);
  199. #if 0
  200. /*
  201. * Copy a single buffer from FB memory, synchronously. This code works
  202. * but is not currently used.
  203. */
  204. void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len)
  205. {
  206. unsigned long flags;
  207. int csr;
  208. mutex_lock(&viafb_dma_lock);
  209. init_completion(&viafb_dma_completion);
  210. /*
  211. * Program the controller.
  212. */
  213. spin_lock_irqsave(&global_dev.reg_lock, flags);
  214. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  215. /* Enable ints; must happen after CSR0 write! */
  216. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE);
  217. viafb_mmio_write(VDMA_MARL0, (int) (paddr & 0xfffffff0));
  218. viafb_mmio_write(VDMA_MARH0, (int) ((paddr >> 28) & 0xfff));
  219. /* Data sheet suggests DAR0 should be <<4, but it lies */
  220. viafb_mmio_write(VDMA_DAR0, offset);
  221. viafb_mmio_write(VDMA_DQWCR0, len >> 4);
  222. viafb_mmio_write(VDMA_TMR0, 0);
  223. viafb_mmio_write(VDMA_DPRL0, 0);
  224. viafb_mmio_write(VDMA_DPRH0, 0);
  225. viafb_mmio_write(VDMA_PMR0, 0);
  226. csr = viafb_mmio_read(VDMA_CSR0);
  227. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  228. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  229. /*
  230. * Now we just wait until the interrupt handler says
  231. * we're done.
  232. */
  233. wait_for_completion_interruptible(&viafb_dma_completion);
  234. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  235. mutex_unlock(&viafb_dma_lock);
  236. }
  237. EXPORT_SYMBOL_GPL(viafb_dma_copy_out);
  238. #endif
  239. /*
  240. * Do a scatter/gather DMA copy from FB memory. You must have done
  241. * a successful call to viafb_request_dma() first.
  242. */
  243. int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
  244. {
  245. struct viafb_vx855_dma_descr *descr;
  246. void *descrpages;
  247. dma_addr_t descr_handle;
  248. unsigned long flags;
  249. int i;
  250. struct scatterlist *sgentry;
  251. dma_addr_t nextdesc;
  252. /*
  253. * Get a place to put the descriptors.
  254. */
  255. descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
  256. nsg*sizeof(struct viafb_vx855_dma_descr),
  257. &descr_handle, GFP_KERNEL);
  258. if (descrpages == NULL) {
  259. dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
  260. return -ENOMEM;
  261. }
  262. mutex_lock(&viafb_dma_lock);
  263. /*
  264. * Fill them in.
  265. */
  266. descr = descrpages;
  267. nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
  268. for_each_sg(sg, sgentry, nsg, i) {
  269. dma_addr_t paddr = sg_dma_address(sgentry);
  270. descr->addr_low = paddr & 0xfffffff0;
  271. descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
  272. descr->fb_offset = offset;
  273. descr->seg_size = sg_dma_len(sgentry) >> 4;
  274. descr->tile_mode = 0;
  275. descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
  276. descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
  277. descr->pad = 0xffffffff; /* VIA driver does this */
  278. offset += sg_dma_len(sgentry);
  279. nextdesc += sizeof(struct viafb_vx855_dma_descr);
  280. descr++;
  281. }
  282. descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
  283. /*
  284. * Program the engine.
  285. */
  286. spin_lock_irqsave(&global_dev.reg_lock, flags);
  287. init_completion(&viafb_dma_completion);
  288. viafb_mmio_write(VDMA_DQWCR0, 0);
  289. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
  290. viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
  291. viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
  292. viafb_mmio_write(VDMA_DPRH0,
  293. (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
  294. (void) viafb_mmio_read(VDMA_CSR0);
  295. viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
  296. spin_unlock_irqrestore(&global_dev.reg_lock, flags);
  297. /*
  298. * Now we just wait until the interrupt handler says
  299. * we're done. Except that, actually, we need to wait a little
  300. * longer: the interrupts seem to jump the gun a little and we
  301. * get corrupted frames sometimes.
  302. */
  303. wait_for_completion_timeout(&viafb_dma_completion, 1);
  304. msleep(1);
  305. if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
  306. printk(KERN_ERR "VIA DMA timeout!\n");
  307. /*
  308. * Clean up and we're done.
  309. */
  310. viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
  311. viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
  312. mutex_unlock(&viafb_dma_lock);
  313. dma_free_coherent(&global_dev.pdev->dev,
  314. nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
  315. descr_handle);
  316. return 0;
  317. }
  318. EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
  319. #endif /* CONFIG_VIDEO_VIA_CAMERA */
  320. /* ---------------------------------------------------------------------- */
  321. /*
  322. * Figure out how big our framebuffer memory is. Kind of ugly,
  323. * but evidently we can't trust the information found in the
  324. * fbdev configuration area.
  325. */
  326. static u16 via_function3[] = {
  327. CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
  328. CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
  329. P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
  330. };
  331. /* Get the BIOS-configured framebuffer size from PCI configuration space
  332. * of function 3 in the respective chipset */
  333. static int viafb_get_fb_size_from_pci(int chip_type)
  334. {
  335. int i;
  336. u8 offset = 0;
  337. u32 FBSize;
  338. u32 VideoMemSize;
  339. /* search for the "FUNCTION3" device in this chipset */
  340. for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
  341. struct pci_dev *pdev;
  342. pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
  343. NULL);
  344. if (!pdev)
  345. continue;
  346. DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
  347. switch (pdev->device) {
  348. case CLE266_FUNCTION3:
  349. case KM400_FUNCTION3:
  350. offset = 0xE0;
  351. break;
  352. case CN400_FUNCTION3:
  353. case CN700_FUNCTION3:
  354. case CX700_FUNCTION3:
  355. case KM800_FUNCTION3:
  356. case KM890_FUNCTION3:
  357. case P4M890_FUNCTION3:
  358. case P4M900_FUNCTION3:
  359. case VX800_FUNCTION3:
  360. case VX855_FUNCTION3:
  361. case VX900_FUNCTION3:
  362. /*case CN750_FUNCTION3: */
  363. offset = 0xA0;
  364. break;
  365. }
  366. if (!offset)
  367. break;
  368. pci_read_config_dword(pdev, offset, &FBSize);
  369. pci_dev_put(pdev);
  370. }
  371. if (!offset) {
  372. printk(KERN_ERR "cannot determine framebuffer size\n");
  373. return -EIO;
  374. }
  375. FBSize = FBSize & 0x00007000;
  376. DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
  377. if (chip_type < UNICHROME_CX700) {
  378. switch (FBSize) {
  379. case 0x00004000:
  380. VideoMemSize = (16 << 20); /*16M */
  381. break;
  382. case 0x00005000:
  383. VideoMemSize = (32 << 20); /*32M */
  384. break;
  385. case 0x00006000:
  386. VideoMemSize = (64 << 20); /*64M */
  387. break;
  388. default:
  389. VideoMemSize = (32 << 20); /*32M */
  390. break;
  391. }
  392. } else {
  393. switch (FBSize) {
  394. case 0x00001000:
  395. VideoMemSize = (8 << 20); /*8M */
  396. break;
  397. case 0x00002000:
  398. VideoMemSize = (16 << 20); /*16M */
  399. break;
  400. case 0x00003000:
  401. VideoMemSize = (32 << 20); /*32M */
  402. break;
  403. case 0x00004000:
  404. VideoMemSize = (64 << 20); /*64M */
  405. break;
  406. case 0x00005000:
  407. VideoMemSize = (128 << 20); /*128M */
  408. break;
  409. case 0x00006000:
  410. VideoMemSize = (256 << 20); /*256M */
  411. break;
  412. case 0x00007000: /* Only on VX855/875 */
  413. VideoMemSize = (512 << 20); /*512M */
  414. break;
  415. default:
  416. VideoMemSize = (32 << 20); /*32M */
  417. break;
  418. }
  419. }
  420. return VideoMemSize;
  421. }
  422. /*
  423. * Figure out and map our MMIO regions.
  424. */
  425. static int via_pci_setup_mmio(struct viafb_dev *vdev)
  426. {
  427. int ret;
  428. /*
  429. * Hook up to the device registers. Note that we soldier
  430. * on if it fails; the framebuffer can operate (without
  431. * acceleration) without this region.
  432. */
  433. vdev->engine_start = pci_resource_start(vdev->pdev, 1);
  434. vdev->engine_len = pci_resource_len(vdev->pdev, 1);
  435. vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
  436. vdev->engine_len);
  437. if (vdev->engine_mmio == NULL)
  438. dev_err(&vdev->pdev->dev,
  439. "Unable to map engine MMIO; operation will be "
  440. "slow and crippled.\n");
  441. /*
  442. * Map in framebuffer memory. For now, failure here is
  443. * fatal. Unfortunately, in the absence of significant
  444. * vmalloc space, failure here is also entirely plausible.
  445. * Eventually we want to move away from mapping this
  446. * entire region.
  447. */
  448. if (vdev->chip_type == UNICHROME_VX900)
  449. vdev->fbmem_start = pci_resource_start(vdev->pdev, 2);
  450. else
  451. vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
  452. ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
  453. if (ret < 0)
  454. goto out_unmap;
  455. /* try to map less memory on failure, 8 MB should be still enough */
  456. for (; vdev->fbmem_len >= 8 << 20; vdev->fbmem_len /= 2) {
  457. vdev->fbmem = ioremap_wc(vdev->fbmem_start, vdev->fbmem_len);
  458. if (vdev->fbmem)
  459. break;
  460. }
  461. if (vdev->fbmem == NULL) {
  462. ret = -ENOMEM;
  463. goto out_unmap;
  464. }
  465. return 0;
  466. out_unmap:
  467. iounmap(vdev->engine_mmio);
  468. return ret;
  469. }
  470. static void via_pci_teardown_mmio(struct viafb_dev *vdev)
  471. {
  472. iounmap(vdev->fbmem);
  473. iounmap(vdev->engine_mmio);
  474. }
  475. /*
  476. * Create our subsidiary devices.
  477. */
  478. static struct viafb_subdev_info {
  479. char *name;
  480. struct platform_device *platdev;
  481. } viafb_subdevs[] = {
  482. {
  483. .name = "viafb-gpio",
  484. },
  485. {
  486. .name = "viafb-i2c",
  487. },
  488. #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
  489. {
  490. .name = "viafb-camera",
  491. },
  492. #endif
  493. };
  494. #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
  495. static int via_create_subdev(struct viafb_dev *vdev,
  496. struct viafb_subdev_info *info)
  497. {
  498. int ret;
  499. info->platdev = platform_device_alloc(info->name, -1);
  500. if (!info->platdev) {
  501. dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
  502. info->name);
  503. return -ENOMEM;
  504. }
  505. info->platdev->dev.parent = &vdev->pdev->dev;
  506. info->platdev->dev.platform_data = vdev;
  507. ret = platform_device_add(info->platdev);
  508. if (ret) {
  509. dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
  510. info->name);
  511. platform_device_put(info->platdev);
  512. info->platdev = NULL;
  513. }
  514. return ret;
  515. }
  516. static int via_setup_subdevs(struct viafb_dev *vdev)
  517. {
  518. int i;
  519. /*
  520. * Ignore return values. Even if some of the devices
  521. * fail to be created, we'll still be able to use some
  522. * of the rest.
  523. */
  524. for (i = 0; i < N_SUBDEVS; i++)
  525. via_create_subdev(vdev, viafb_subdevs + i);
  526. return 0;
  527. }
  528. static void via_teardown_subdevs(void)
  529. {
  530. int i;
  531. for (i = 0; i < N_SUBDEVS; i++)
  532. if (viafb_subdevs[i].platdev) {
  533. viafb_subdevs[i].platdev->dev.platform_data = NULL;
  534. platform_device_unregister(viafb_subdevs[i].platdev);
  535. }
  536. }
  537. /*
  538. * Power management functions
  539. */
  540. #ifdef CONFIG_PM
  541. static LIST_HEAD(viafb_pm_hooks);
  542. static DEFINE_MUTEX(viafb_pm_hooks_lock);
  543. void viafb_pm_register(struct viafb_pm_hooks *hooks)
  544. {
  545. INIT_LIST_HEAD(&hooks->list);
  546. mutex_lock(&viafb_pm_hooks_lock);
  547. list_add_tail(&hooks->list, &viafb_pm_hooks);
  548. mutex_unlock(&viafb_pm_hooks_lock);
  549. }
  550. EXPORT_SYMBOL_GPL(viafb_pm_register);
  551. void viafb_pm_unregister(struct viafb_pm_hooks *hooks)
  552. {
  553. mutex_lock(&viafb_pm_hooks_lock);
  554. list_del(&hooks->list);
  555. mutex_unlock(&viafb_pm_hooks_lock);
  556. }
  557. EXPORT_SYMBOL_GPL(viafb_pm_unregister);
  558. static int via_suspend(struct pci_dev *pdev, pm_message_t state)
  559. {
  560. struct viafb_pm_hooks *hooks;
  561. if (state.event != PM_EVENT_SUSPEND)
  562. return 0;
  563. /*
  564. * "I've occasionally hit a few drivers that caused suspend
  565. * failures, and each and every time it was a driver bug, and
  566. * the right thing to do was to just ignore the error and suspend
  567. * anyway - returning an error code and trying to undo the suspend
  568. * is not what anybody ever really wants, even if our model
  569. *_allows_ for it."
  570. * -- Linus Torvalds, Dec. 7, 2009
  571. */
  572. mutex_lock(&viafb_pm_hooks_lock);
  573. list_for_each_entry_reverse(hooks, &viafb_pm_hooks, list)
  574. hooks->suspend(hooks->private);
  575. mutex_unlock(&viafb_pm_hooks_lock);
  576. pci_save_state(pdev);
  577. pci_disable_device(pdev);
  578. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  579. return 0;
  580. }
  581. static int via_resume(struct pci_dev *pdev)
  582. {
  583. struct viafb_pm_hooks *hooks;
  584. /* Get the bus side powered up */
  585. pci_set_power_state(pdev, PCI_D0);
  586. pci_restore_state(pdev);
  587. if (pci_enable_device(pdev))
  588. return 0;
  589. pci_set_master(pdev);
  590. /* Now bring back any subdevs */
  591. mutex_lock(&viafb_pm_hooks_lock);
  592. list_for_each_entry(hooks, &viafb_pm_hooks, list)
  593. hooks->resume(hooks->private);
  594. mutex_unlock(&viafb_pm_hooks_lock);
  595. return 0;
  596. }
  597. #endif /* CONFIG_PM */
  598. static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  599. {
  600. int ret;
  601. ret = pci_enable_device(pdev);
  602. if (ret)
  603. return ret;
  604. /*
  605. * Global device initialization.
  606. */
  607. memset(&global_dev, 0, sizeof(global_dev));
  608. global_dev.pdev = pdev;
  609. global_dev.chip_type = ent->driver_data;
  610. global_dev.port_cfg = adap_configs;
  611. if (machine_is_olpc())
  612. global_dev.port_cfg = olpc_adap_configs;
  613. spin_lock_init(&global_dev.reg_lock);
  614. ret = via_pci_setup_mmio(&global_dev);
  615. if (ret)
  616. goto out_disable;
  617. /*
  618. * Set up interrupts and create our subdevices. Continue even if
  619. * some things fail.
  620. */
  621. viafb_int_init();
  622. via_setup_subdevs(&global_dev);
  623. /*
  624. * Set up the framebuffer device
  625. */
  626. ret = via_fb_pci_probe(&global_dev);
  627. if (ret)
  628. goto out_subdevs;
  629. return 0;
  630. out_subdevs:
  631. via_teardown_subdevs();
  632. via_pci_teardown_mmio(&global_dev);
  633. out_disable:
  634. pci_disable_device(pdev);
  635. return ret;
  636. }
  637. static void via_pci_remove(struct pci_dev *pdev)
  638. {
  639. via_teardown_subdevs();
  640. via_fb_pci_remove(pdev);
  641. via_pci_teardown_mmio(&global_dev);
  642. pci_disable_device(pdev);
  643. }
  644. static const struct pci_device_id via_pci_table[] = {
  645. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
  646. .driver_data = UNICHROME_CLE266 },
  647. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
  648. .driver_data = UNICHROME_K400 },
  649. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
  650. .driver_data = UNICHROME_K800 },
  651. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
  652. .driver_data = UNICHROME_PM800 },
  653. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
  654. .driver_data = UNICHROME_CN700 },
  655. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
  656. .driver_data = UNICHROME_CX700 },
  657. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
  658. .driver_data = UNICHROME_CN750 },
  659. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
  660. .driver_data = UNICHROME_K8M890 },
  661. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
  662. .driver_data = UNICHROME_P4M890 },
  663. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
  664. .driver_data = UNICHROME_P4M900 },
  665. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
  666. .driver_data = UNICHROME_VX800 },
  667. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
  668. .driver_data = UNICHROME_VX855 },
  669. { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID),
  670. .driver_data = UNICHROME_VX900 },
  671. { }
  672. };
  673. MODULE_DEVICE_TABLE(pci, via_pci_table);
  674. static struct pci_driver via_driver = {
  675. .name = "viafb",
  676. .id_table = via_pci_table,
  677. .probe = via_pci_probe,
  678. .remove = via_pci_remove,
  679. #ifdef CONFIG_PM
  680. .suspend = via_suspend,
  681. .resume = via_resume,
  682. #endif
  683. };
  684. static int __init via_core_init(void)
  685. {
  686. int ret;
  687. ret = viafb_init();
  688. if (ret)
  689. return ret;
  690. viafb_i2c_init();
  691. viafb_gpio_init();
  692. return pci_register_driver(&via_driver);
  693. }
  694. static void __exit via_core_exit(void)
  695. {
  696. pci_unregister_driver(&via_driver);
  697. viafb_gpio_exit();
  698. viafb_i2c_exit();
  699. viafb_exit();
  700. }
  701. module_init(via_core_init);
  702. module_exit(via_core_exit);