flexcop-pci.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
  4. * flexcop-pci.c - covers the PCI part including DMA transfers
  5. * see flexcop.c for copyright information
  6. */
  7. #define FC_LOG_PREFIX "flexcop-pci"
  8. #include "flexcop-common.h"
  9. static int enable_pid_filtering = 1;
  10. module_param(enable_pid_filtering, int, 0444);
  11. MODULE_PARM_DESC(enable_pid_filtering,
  12. "enable hardware pid filtering: supported values: 0 (fullts), 1");
  13. static int irq_chk_intv = 100;
  14. module_param(irq_chk_intv, int, 0644);
  15. MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
  16. #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
  17. #define dprintk(level, args...) \
  18. do { if ((debug & (level))) printk(args); } while (0)
  19. #define DEBSTATUS ""
  20. #else
  21. #define dprintk(level, args...) no_printk(args)
  22. #define DEBSTATUS " (debugging is not enabled)"
  23. #endif
  24. #define deb_info(args...) dprintk(0x01, args)
  25. #define deb_reg(args...) dprintk(0x02, args)
  26. #define deb_ts(args...) dprintk(0x04, args)
  27. #define deb_irq(args...) dprintk(0x08, args)
  28. #define deb_chk(args...) dprintk(0x10, args)
  29. static int debug;
  30. module_param(debug, int, 0644);
  31. MODULE_PARM_DESC(debug,
  32. "set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
  33. DEBSTATUS);
  34. #define DRIVER_VERSION "0.1"
  35. #define DRIVER_NAME "flexcop-pci"
  36. #define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
  37. struct flexcop_pci {
  38. struct pci_dev *pdev;
  39. #define FC_PCI_INIT 0x01
  40. #define FC_PCI_DMA_INIT 0x02
  41. int init_state;
  42. void __iomem *io_mem;
  43. u32 irq;
  44. /* buffersize (at least for DMA1, need to be % 188 == 0,
  45. * this logic is required */
  46. #define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
  47. #define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
  48. struct flexcop_dma dma[2];
  49. int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
  50. u32 last_dma1_cur_pos;
  51. /* position of the pointer last time the timer/packet irq occurred */
  52. int count;
  53. int count_prev;
  54. int stream_problem;
  55. spinlock_t irq_lock;
  56. unsigned long last_irq;
  57. struct delayed_work irq_check_work;
  58. struct flexcop_device *fc_dev;
  59. };
  60. static int lastwreg, lastwval, lastrreg, lastrval;
  61. static flexcop_ibi_value flexcop_pci_read_ibi_reg(struct flexcop_device *fc,
  62. flexcop_ibi_register r)
  63. {
  64. struct flexcop_pci *fc_pci = fc->bus_specific;
  65. flexcop_ibi_value v;
  66. v.raw = readl(fc_pci->io_mem + r);
  67. if (lastrreg != r || lastrval != v.raw) {
  68. lastrreg = r; lastrval = v.raw;
  69. deb_reg("new rd: %3x: %08x\n", r, v.raw);
  70. }
  71. return v;
  72. }
  73. static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc,
  74. flexcop_ibi_register r, flexcop_ibi_value v)
  75. {
  76. struct flexcop_pci *fc_pci = fc->bus_specific;
  77. if (lastwreg != r || lastwval != v.raw) {
  78. lastwreg = r; lastwval = v.raw;
  79. deb_reg("new wr: %3x: %08x\n", r, v.raw);
  80. }
  81. writel(v.raw, fc_pci->io_mem + r);
  82. return 0;
  83. }
  84. static void flexcop_pci_irq_check_work(struct work_struct *work)
  85. {
  86. struct flexcop_pci *fc_pci =
  87. container_of(work, struct flexcop_pci, irq_check_work.work);
  88. struct flexcop_device *fc = fc_pci->fc_dev;
  89. if (fc->feedcount) {
  90. if (fc_pci->count == fc_pci->count_prev) {
  91. deb_chk("no IRQ since the last check\n");
  92. if (fc_pci->stream_problem++ == 3) {
  93. struct dvb_demux_feed *feed;
  94. deb_info("flexcop-pci: stream problem, resetting pid filter\n");
  95. spin_lock_irq(&fc->demux.lock);
  96. list_for_each_entry(feed, &fc->demux.feed_list,
  97. list_head) {
  98. flexcop_pid_feed_control(fc, feed, 0);
  99. }
  100. list_for_each_entry(feed, &fc->demux.feed_list,
  101. list_head) {
  102. flexcop_pid_feed_control(fc, feed, 1);
  103. }
  104. spin_unlock_irq(&fc->demux.lock);
  105. fc_pci->stream_problem = 0;
  106. }
  107. } else {
  108. fc_pci->stream_problem = 0;
  109. fc_pci->count_prev = fc_pci->count;
  110. }
  111. }
  112. schedule_delayed_work(&fc_pci->irq_check_work,
  113. msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
  114. }
  115. /* When PID filtering is turned on, we use the timer IRQ, because small amounts
  116. * of data need to be passed to the user space instantly as well. When PID
  117. * filtering is turned off, we use the page-change-IRQ */
  118. static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
  119. {
  120. struct flexcop_pci *fc_pci = dev_id;
  121. struct flexcop_device *fc = fc_pci->fc_dev;
  122. unsigned long flags;
  123. flexcop_ibi_value v;
  124. irqreturn_t ret = IRQ_HANDLED;
  125. spin_lock_irqsave(&fc_pci->irq_lock, flags);
  126. v = fc->read_ibi_reg(fc, irq_20c);
  127. /* errors */
  128. if (v.irq_20c.Data_receiver_error)
  129. deb_chk("data receiver error\n");
  130. if (v.irq_20c.Continuity_error_flag)
  131. deb_chk("Continuity error flag is set\n");
  132. if (v.irq_20c.LLC_SNAP_FLAG_set)
  133. deb_chk("LLC_SNAP_FLAG_set is set\n");
  134. if (v.irq_20c.Transport_Error)
  135. deb_chk("Transport error\n");
  136. if ((fc_pci->count % 1000) == 0)
  137. deb_chk("%d valid irq took place so far\n", fc_pci->count);
  138. if (v.irq_20c.DMA1_IRQ_Status == 1) {
  139. if (fc_pci->active_dma1_addr == 0)
  140. flexcop_pass_dmx_packets(fc_pci->fc_dev,
  141. fc_pci->dma[0].cpu_addr0,
  142. fc_pci->dma[0].size / 188);
  143. else
  144. flexcop_pass_dmx_packets(fc_pci->fc_dev,
  145. fc_pci->dma[0].cpu_addr1,
  146. fc_pci->dma[0].size / 188);
  147. deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
  148. fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
  149. /* for the timer IRQ we only can use buffer dmx feeding, because we don't have
  150. * complete TS packets when reading from the DMA memory */
  151. } else if (v.irq_20c.DMA1_Timer_Status == 1) {
  152. dma_addr_t cur_addr =
  153. fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
  154. u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
  155. if (cur_pos > fc_pci->dma[0].size * 2)
  156. goto error;
  157. deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
  158. jiffies_to_usecs(jiffies - fc_pci->last_irq),
  159. v.raw, (unsigned long long)cur_addr, cur_pos,
  160. fc_pci->last_dma1_cur_pos);
  161. fc_pci->last_irq = jiffies;
  162. /* buffer end was reached, restarted from the beginning
  163. * pass the data from last_cur_pos to the buffer end to the demux
  164. */
  165. if (cur_pos < fc_pci->last_dma1_cur_pos) {
  166. deb_irq(" end was reached: passing %d bytes ",
  167. (fc_pci->dma[0].size*2 - 1) -
  168. fc_pci->last_dma1_cur_pos);
  169. flexcop_pass_dmx_data(fc_pci->fc_dev,
  170. fc_pci->dma[0].cpu_addr0 +
  171. fc_pci->last_dma1_cur_pos,
  172. (fc_pci->dma[0].size*2) -
  173. fc_pci->last_dma1_cur_pos);
  174. fc_pci->last_dma1_cur_pos = 0;
  175. }
  176. if (cur_pos > fc_pci->last_dma1_cur_pos) {
  177. deb_irq(" passing %d bytes ",
  178. cur_pos - fc_pci->last_dma1_cur_pos);
  179. flexcop_pass_dmx_data(fc_pci->fc_dev,
  180. fc_pci->dma[0].cpu_addr0 +
  181. fc_pci->last_dma1_cur_pos,
  182. cur_pos - fc_pci->last_dma1_cur_pos);
  183. }
  184. deb_irq("\n");
  185. fc_pci->last_dma1_cur_pos = cur_pos;
  186. fc_pci->count++;
  187. } else {
  188. deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
  189. v.raw);
  190. ret = IRQ_NONE;
  191. }
  192. error:
  193. spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
  194. return ret;
  195. }
  196. static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
  197. {
  198. struct flexcop_pci *fc_pci = fc->bus_specific;
  199. if (onoff) {
  200. flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
  201. flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
  202. flexcop_dma_config_timer(fc, FC_DMA_1, 0);
  203. flexcop_dma_xfer_control(fc, FC_DMA_1,
  204. FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 1);
  205. deb_irq("DMA xfer enabled\n");
  206. fc_pci->last_dma1_cur_pos = 0;
  207. flexcop_dma_control_timer_irq(fc, FC_DMA_1, 1);
  208. deb_irq("IRQ enabled\n");
  209. fc_pci->count_prev = fc_pci->count;
  210. } else {
  211. flexcop_dma_control_timer_irq(fc, FC_DMA_1, 0);
  212. deb_irq("IRQ disabled\n");
  213. flexcop_dma_xfer_control(fc, FC_DMA_1,
  214. FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 0);
  215. deb_irq("DMA xfer disabled\n");
  216. }
  217. return 0;
  218. }
  219. static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
  220. {
  221. int ret;
  222. ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
  223. FC_DEFAULT_DMA1_BUFSIZE);
  224. if (ret != 0)
  225. return ret;
  226. ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
  227. FC_DEFAULT_DMA2_BUFSIZE);
  228. if (ret != 0) {
  229. flexcop_dma_free(&fc_pci->dma[0]);
  230. return ret;
  231. }
  232. flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
  233. FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
  234. flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
  235. FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
  236. fc_pci->init_state |= FC_PCI_DMA_INIT;
  237. return ret;
  238. }
  239. static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
  240. {
  241. if (fc_pci->init_state & FC_PCI_DMA_INIT) {
  242. flexcop_dma_free(&fc_pci->dma[0]);
  243. flexcop_dma_free(&fc_pci->dma[1]);
  244. }
  245. fc_pci->init_state &= ~FC_PCI_DMA_INIT;
  246. }
  247. static int flexcop_pci_init(struct flexcop_pci *fc_pci)
  248. {
  249. int ret;
  250. info("card revision %x", fc_pci->pdev->revision);
  251. if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
  252. return ret;
  253. pci_set_master(fc_pci->pdev);
  254. if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
  255. goto err_pci_disable_device;
  256. fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
  257. if (!fc_pci->io_mem) {
  258. err("cannot map io memory\n");
  259. ret = -EIO;
  260. goto err_pci_release_regions;
  261. }
  262. pci_set_drvdata(fc_pci->pdev, fc_pci);
  263. spin_lock_init(&fc_pci->irq_lock);
  264. if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
  265. IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
  266. goto err_pci_iounmap;
  267. fc_pci->init_state |= FC_PCI_INIT;
  268. return ret;
  269. err_pci_iounmap:
  270. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  271. err_pci_release_regions:
  272. pci_release_regions(fc_pci->pdev);
  273. err_pci_disable_device:
  274. pci_disable_device(fc_pci->pdev);
  275. return ret;
  276. }
  277. static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
  278. {
  279. if (fc_pci->init_state & FC_PCI_INIT) {
  280. free_irq(fc_pci->pdev->irq, fc_pci);
  281. pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
  282. pci_release_regions(fc_pci->pdev);
  283. pci_disable_device(fc_pci->pdev);
  284. }
  285. fc_pci->init_state &= ~FC_PCI_INIT;
  286. }
  287. static int flexcop_pci_probe(struct pci_dev *pdev,
  288. const struct pci_device_id *ent)
  289. {
  290. struct flexcop_device *fc;
  291. struct flexcop_pci *fc_pci;
  292. int ret = -ENOMEM;
  293. if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
  294. err("out of memory\n");
  295. return -ENOMEM;
  296. }
  297. /* general flexcop init */
  298. fc_pci = fc->bus_specific;
  299. fc_pci->fc_dev = fc;
  300. fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
  301. fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
  302. fc->i2c_request = flexcop_i2c_request;
  303. fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
  304. fc->stream_control = flexcop_pci_stream_control;
  305. if (enable_pid_filtering)
  306. info("will use the HW PID filter.");
  307. else
  308. info("will pass the complete TS to the demuxer.");
  309. fc->pid_filtering = enable_pid_filtering;
  310. fc->bus_type = FC_PCI;
  311. fc->dev = &pdev->dev;
  312. fc->owner = THIS_MODULE;
  313. /* bus specific part */
  314. fc_pci->pdev = pdev;
  315. if ((ret = flexcop_pci_init(fc_pci)) != 0)
  316. goto err_kfree;
  317. /* init flexcop */
  318. if ((ret = flexcop_device_initialize(fc)) != 0)
  319. goto err_pci_exit;
  320. /* init dma */
  321. if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
  322. goto err_fc_exit;
  323. INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
  324. if (irq_chk_intv > 0)
  325. schedule_delayed_work(&fc_pci->irq_check_work,
  326. msecs_to_jiffies(irq_chk_intv < 100 ?
  327. 100 :
  328. irq_chk_intv));
  329. return ret;
  330. err_fc_exit:
  331. flexcop_device_exit(fc);
  332. err_pci_exit:
  333. flexcop_pci_exit(fc_pci);
  334. err_kfree:
  335. flexcop_device_kfree(fc);
  336. return ret;
  337. }
  338. /* in theory every _exit function should be called exactly two times,
  339. * here and in the bail-out-part of the _init-function
  340. */
  341. static void flexcop_pci_remove(struct pci_dev *pdev)
  342. {
  343. struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
  344. if (irq_chk_intv > 0)
  345. cancel_delayed_work(&fc_pci->irq_check_work);
  346. flexcop_pci_dma_exit(fc_pci);
  347. flexcop_device_exit(fc_pci->fc_dev);
  348. flexcop_pci_exit(fc_pci);
  349. flexcop_device_kfree(fc_pci->fc_dev);
  350. }
  351. static const struct pci_device_id flexcop_pci_tbl[] = {
  352. { PCI_DEVICE(0x13d0, 0x2103) },
  353. { },
  354. };
  355. MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
  356. static struct pci_driver flexcop_pci_driver = {
  357. .name = "b2c2_flexcop_pci",
  358. .id_table = flexcop_pci_tbl,
  359. .probe = flexcop_pci_probe,
  360. .remove = flexcop_pci_remove,
  361. };
  362. module_pci_driver(flexcop_pci_driver);
  363. MODULE_AUTHOR(DRIVER_AUTHOR);
  364. MODULE_DESCRIPTION(DRIVER_NAME);
  365. MODULE_LICENSE("GPL");