fsl_ucc_hdlc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. /* Freescale QUICC Engine HDLC Device Driver
  2. *
  3. * Copyright 2016 Freescale Semiconductor Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. */
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/hdlc.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/irq.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/of_address.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_platform.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/sched.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/stddef.h>
  29. #include <soc/fsl/qe/qe_tdm.h>
  30. #include <uapi/linux/if_arp.h>
  31. #include "fsl_ucc_hdlc.h"
  32. #define DRV_DESC "Freescale QE UCC HDLC Driver"
  33. #define DRV_NAME "ucc_hdlc"
  34. #define TDM_PPPOHT_SLIC_MAXIN
  35. static struct ucc_tdm_info utdm_primary_info = {
  36. .uf_info = {
  37. .tsa = 0,
  38. .cdp = 0,
  39. .cds = 1,
  40. .ctsp = 1,
  41. .ctss = 1,
  42. .revd = 0,
  43. .urfs = 256,
  44. .utfs = 256,
  45. .urfet = 128,
  46. .urfset = 192,
  47. .utfet = 128,
  48. .utftt = 0x40,
  49. .ufpt = 256,
  50. .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
  51. .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  52. .tenc = UCC_FAST_TX_ENCODING_NRZ,
  53. .renc = UCC_FAST_RX_ENCODING_NRZ,
  54. .tcrc = UCC_FAST_16_BIT_CRC,
  55. .synl = UCC_FAST_SYNC_LEN_NOT_USED,
  56. },
  57. .si_info = {
  58. #ifdef TDM_PPPOHT_SLIC_MAXIN
  59. .simr_rfsd = 1,
  60. .simr_tfsd = 2,
  61. #else
  62. .simr_rfsd = 0,
  63. .simr_tfsd = 0,
  64. #endif
  65. .simr_crt = 0,
  66. .simr_sl = 0,
  67. .simr_ce = 1,
  68. .simr_fe = 1,
  69. .simr_gm = 0,
  70. },
  71. };
  72. static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
  73. static int uhdlc_init(struct ucc_hdlc_private *priv)
  74. {
  75. struct ucc_tdm_info *ut_info;
  76. struct ucc_fast_info *uf_info;
  77. u32 cecr_subblock;
  78. u16 bd_status;
  79. int ret, i;
  80. void *bd_buffer;
  81. dma_addr_t bd_dma_addr;
  82. u32 riptr;
  83. u32 tiptr;
  84. u32 gumr;
  85. ut_info = priv->ut_info;
  86. uf_info = &ut_info->uf_info;
  87. if (priv->tsa) {
  88. uf_info->tsa = 1;
  89. uf_info->ctsp = 1;
  90. }
  91. /* This sets HPM register in CMXUCR register which configures a
  92. * open drain connected HDLC bus
  93. */
  94. if (priv->hdlc_bus)
  95. uf_info->brkpt_support = 1;
  96. uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
  97. UCC_HDLC_UCCE_TXB) << 16);
  98. ret = ucc_fast_init(uf_info, &priv->uccf);
  99. if (ret) {
  100. dev_err(priv->dev, "Failed to init uccf.");
  101. return ret;
  102. }
  103. priv->uf_regs = priv->uccf->uf_regs;
  104. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  105. /* Loopback mode */
  106. if (priv->loopback) {
  107. dev_info(priv->dev, "Loopback Mode\n");
  108. /* use the same clock when work in loopback */
  109. qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
  110. gumr = ioread32be(&priv->uf_regs->gumr);
  111. gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
  112. UCC_FAST_GUMR_TCI);
  113. gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
  114. iowrite32be(gumr, &priv->uf_regs->gumr);
  115. }
  116. /* Initialize SI */
  117. if (priv->tsa)
  118. ucc_tdm_init(priv->utdm, priv->ut_info);
  119. /* Write to QE CECR, UCCx channel to Stop Transmission */
  120. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  121. ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
  122. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  123. /* Set UPSMR normal mode (need fixed)*/
  124. iowrite32be(0, &priv->uf_regs->upsmr);
  125. /* hdlc_bus mode */
  126. if (priv->hdlc_bus) {
  127. u32 upsmr;
  128. dev_info(priv->dev, "HDLC bus Mode\n");
  129. upsmr = ioread32be(&priv->uf_regs->upsmr);
  130. /* bus mode and retransmit enable, with collision window
  131. * set to 8 bytes
  132. */
  133. upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
  134. UCC_HDLC_UPSMR_CW8;
  135. iowrite32be(upsmr, &priv->uf_regs->upsmr);
  136. /* explicitly disable CDS & CTSP */
  137. gumr = ioread32be(&priv->uf_regs->gumr);
  138. gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
  139. /* set automatic sync to explicitly ignore CD signal */
  140. gumr |= UCC_FAST_GUMR_SYNL_AUTO;
  141. iowrite32be(gumr, &priv->uf_regs->gumr);
  142. }
  143. priv->rx_ring_size = RX_BD_RING_LEN;
  144. priv->tx_ring_size = TX_BD_RING_LEN;
  145. /* Alloc Rx BD */
  146. priv->rx_bd_base = dma_alloc_coherent(priv->dev,
  147. RX_BD_RING_LEN * sizeof(struct qe_bd),
  148. &priv->dma_rx_bd, GFP_KERNEL);
  149. if (!priv->rx_bd_base) {
  150. dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
  151. ret = -ENOMEM;
  152. goto free_uccf;
  153. }
  154. /* Alloc Tx BD */
  155. priv->tx_bd_base = dma_alloc_coherent(priv->dev,
  156. TX_BD_RING_LEN * sizeof(struct qe_bd),
  157. &priv->dma_tx_bd, GFP_KERNEL);
  158. if (!priv->tx_bd_base) {
  159. dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
  160. ret = -ENOMEM;
  161. goto free_rx_bd;
  162. }
  163. /* Alloc parameter ram for ucc hdlc */
  164. priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
  165. ALIGNMENT_OF_UCC_HDLC_PRAM);
  166. if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
  167. dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
  168. ret = -ENOMEM;
  169. goto free_tx_bd;
  170. }
  171. priv->rx_skbuff = kcalloc(priv->rx_ring_size,
  172. sizeof(*priv->rx_skbuff),
  173. GFP_KERNEL);
  174. if (!priv->rx_skbuff) {
  175. ret = -ENOMEM;
  176. goto free_ucc_pram;
  177. }
  178. priv->tx_skbuff = kcalloc(priv->tx_ring_size,
  179. sizeof(*priv->tx_skbuff),
  180. GFP_KERNEL);
  181. if (!priv->tx_skbuff) {
  182. ret = -ENOMEM;
  183. goto free_rx_skbuff;
  184. }
  185. priv->skb_curtx = 0;
  186. priv->skb_dirtytx = 0;
  187. priv->curtx_bd = priv->tx_bd_base;
  188. priv->dirty_tx = priv->tx_bd_base;
  189. priv->currx_bd = priv->rx_bd_base;
  190. priv->currx_bdnum = 0;
  191. /* init parameter base */
  192. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  193. ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
  194. QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
  195. priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
  196. qe_muram_addr(priv->ucc_pram_offset);
  197. /* Zero out parameter ram */
  198. memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
  199. /* Alloc riptr, tiptr */
  200. riptr = qe_muram_alloc(32, 32);
  201. if (IS_ERR_VALUE(riptr)) {
  202. dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
  203. ret = -ENOMEM;
  204. goto free_tx_skbuff;
  205. }
  206. tiptr = qe_muram_alloc(32, 32);
  207. if (IS_ERR_VALUE(tiptr)) {
  208. dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
  209. ret = -ENOMEM;
  210. goto free_riptr;
  211. }
  212. if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
  213. dev_err(priv->dev, "MURAM allocation out of addressable range\n");
  214. ret = -ENOMEM;
  215. goto free_tiptr;
  216. }
  217. /* Set RIPTR, TIPTR */
  218. iowrite16be(riptr, &priv->ucc_pram->riptr);
  219. iowrite16be(tiptr, &priv->ucc_pram->tiptr);
  220. /* Set MRBLR */
  221. iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
  222. /* Set RBASE, TBASE */
  223. iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
  224. iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
  225. /* Set RSTATE, TSTATE */
  226. iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
  227. iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
  228. /* Set C_MASK, C_PRES for 16bit CRC */
  229. iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
  230. iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
  231. iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
  232. iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
  233. iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
  234. iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
  235. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
  236. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
  237. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
  238. iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
  239. /* Get BD buffer */
  240. bd_buffer = dma_zalloc_coherent(priv->dev,
  241. (RX_BD_RING_LEN + TX_BD_RING_LEN) *
  242. MAX_RX_BUF_LENGTH,
  243. &bd_dma_addr, GFP_KERNEL);
  244. if (!bd_buffer) {
  245. dev_err(priv->dev, "Could not allocate buffer descriptors\n");
  246. ret = -ENOMEM;
  247. goto free_tiptr;
  248. }
  249. priv->rx_buffer = bd_buffer;
  250. priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
  251. priv->dma_rx_addr = bd_dma_addr;
  252. priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
  253. for (i = 0; i < RX_BD_RING_LEN; i++) {
  254. if (i < (RX_BD_RING_LEN - 1))
  255. bd_status = R_E_S | R_I_S;
  256. else
  257. bd_status = R_E_S | R_I_S | R_W_S;
  258. iowrite16be(bd_status, &priv->rx_bd_base[i].status);
  259. iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
  260. &priv->rx_bd_base[i].buf);
  261. }
  262. for (i = 0; i < TX_BD_RING_LEN; i++) {
  263. if (i < (TX_BD_RING_LEN - 1))
  264. bd_status = T_I_S | T_TC_S;
  265. else
  266. bd_status = T_I_S | T_TC_S | T_W_S;
  267. iowrite16be(bd_status, &priv->tx_bd_base[i].status);
  268. iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
  269. &priv->tx_bd_base[i].buf);
  270. }
  271. return 0;
  272. free_tiptr:
  273. qe_muram_free(tiptr);
  274. free_riptr:
  275. qe_muram_free(riptr);
  276. free_tx_skbuff:
  277. kfree(priv->tx_skbuff);
  278. free_rx_skbuff:
  279. kfree(priv->rx_skbuff);
  280. free_ucc_pram:
  281. qe_muram_free(priv->ucc_pram_offset);
  282. free_tx_bd:
  283. dma_free_coherent(priv->dev,
  284. TX_BD_RING_LEN * sizeof(struct qe_bd),
  285. priv->tx_bd_base, priv->dma_tx_bd);
  286. free_rx_bd:
  287. dma_free_coherent(priv->dev,
  288. RX_BD_RING_LEN * sizeof(struct qe_bd),
  289. priv->rx_bd_base, priv->dma_rx_bd);
  290. free_uccf:
  291. ucc_fast_free(priv->uccf);
  292. return ret;
  293. }
  294. static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
  295. {
  296. hdlc_device *hdlc = dev_to_hdlc(dev);
  297. struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
  298. struct qe_bd __iomem *bd;
  299. u16 bd_status;
  300. unsigned long flags;
  301. u16 *proto_head;
  302. switch (dev->type) {
  303. case ARPHRD_RAWHDLC:
  304. if (skb_headroom(skb) < HDLC_HEAD_LEN) {
  305. dev->stats.tx_dropped++;
  306. dev_kfree_skb(skb);
  307. netdev_err(dev, "No enough space for hdlc head\n");
  308. return -ENOMEM;
  309. }
  310. skb_push(skb, HDLC_HEAD_LEN);
  311. proto_head = (u16 *)skb->data;
  312. *proto_head = htons(DEFAULT_HDLC_HEAD);
  313. dev->stats.tx_bytes += skb->len;
  314. break;
  315. case ARPHRD_PPP:
  316. proto_head = (u16 *)skb->data;
  317. if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
  318. dev->stats.tx_dropped++;
  319. dev_kfree_skb(skb);
  320. netdev_err(dev, "Wrong ppp header\n");
  321. return -ENOMEM;
  322. }
  323. dev->stats.tx_bytes += skb->len;
  324. break;
  325. default:
  326. dev->stats.tx_dropped++;
  327. dev_kfree_skb(skb);
  328. return -ENOMEM;
  329. }
  330. spin_lock_irqsave(&priv->lock, flags);
  331. /* Start from the next BD that should be filled */
  332. bd = priv->curtx_bd;
  333. bd_status = ioread16be(&bd->status);
  334. /* Save the skb pointer so we can free it later */
  335. priv->tx_skbuff[priv->skb_curtx] = skb;
  336. /* Update the current skb pointer (wrapping if this was the last) */
  337. priv->skb_curtx =
  338. (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
  339. /* copy skb data to tx buffer for sdma processing */
  340. memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
  341. skb->data, skb->len);
  342. /* set bd status and length */
  343. bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
  344. iowrite16be(skb->len, &bd->length);
  345. iowrite16be(bd_status, &bd->status);
  346. /* Move to next BD in the ring */
  347. if (!(bd_status & T_W_S))
  348. bd += 1;
  349. else
  350. bd = priv->tx_bd_base;
  351. if (bd == priv->dirty_tx) {
  352. if (!netif_queue_stopped(dev))
  353. netif_stop_queue(dev);
  354. }
  355. priv->curtx_bd = bd;
  356. spin_unlock_irqrestore(&priv->lock, flags);
  357. return NETDEV_TX_OK;
  358. }
  359. static int hdlc_tx_done(struct ucc_hdlc_private *priv)
  360. {
  361. /* Start from the next BD that should be filled */
  362. struct net_device *dev = priv->ndev;
  363. struct qe_bd *bd; /* BD pointer */
  364. u16 bd_status;
  365. bd = priv->dirty_tx;
  366. bd_status = ioread16be(&bd->status);
  367. /* Normal processing. */
  368. while ((bd_status & T_R_S) == 0) {
  369. struct sk_buff *skb;
  370. /* BD contains already transmitted buffer. */
  371. /* Handle the transmitted buffer and release */
  372. /* the BD to be used with the current frame */
  373. skb = priv->tx_skbuff[priv->skb_dirtytx];
  374. if (!skb)
  375. break;
  376. dev->stats.tx_packets++;
  377. memset(priv->tx_buffer +
  378. (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
  379. 0, skb->len);
  380. dev_kfree_skb_irq(skb);
  381. priv->tx_skbuff[priv->skb_dirtytx] = NULL;
  382. priv->skb_dirtytx =
  383. (priv->skb_dirtytx +
  384. 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
  385. /* We freed a buffer, so now we can restart transmission */
  386. if (netif_queue_stopped(dev))
  387. netif_wake_queue(dev);
  388. /* Advance the confirmation BD pointer */
  389. if (!(bd_status & T_W_S))
  390. bd += 1;
  391. else
  392. bd = priv->tx_bd_base;
  393. bd_status = ioread16be(&bd->status);
  394. }
  395. priv->dirty_tx = bd;
  396. return 0;
  397. }
  398. static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
  399. {
  400. struct net_device *dev = priv->ndev;
  401. struct sk_buff *skb = NULL;
  402. hdlc_device *hdlc = dev_to_hdlc(dev);
  403. struct qe_bd *bd;
  404. u16 bd_status;
  405. u16 length, howmany = 0;
  406. u8 *bdbuffer;
  407. bd = priv->currx_bd;
  408. bd_status = ioread16be(&bd->status);
  409. /* while there are received buffers and BD is full (~R_E) */
  410. while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
  411. if (bd_status & R_OV_S)
  412. dev->stats.rx_over_errors++;
  413. if (bd_status & R_CR_S) {
  414. dev->stats.rx_crc_errors++;
  415. dev->stats.rx_dropped++;
  416. goto recycle;
  417. }
  418. bdbuffer = priv->rx_buffer +
  419. (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
  420. length = ioread16be(&bd->length);
  421. switch (dev->type) {
  422. case ARPHRD_RAWHDLC:
  423. bdbuffer += HDLC_HEAD_LEN;
  424. length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
  425. skb = dev_alloc_skb(length);
  426. if (!skb) {
  427. dev->stats.rx_dropped++;
  428. return -ENOMEM;
  429. }
  430. skb_put(skb, length);
  431. skb->len = length;
  432. skb->dev = dev;
  433. memcpy(skb->data, bdbuffer, length);
  434. break;
  435. case ARPHRD_PPP:
  436. length -= HDLC_CRC_SIZE;
  437. skb = dev_alloc_skb(length);
  438. if (!skb) {
  439. dev->stats.rx_dropped++;
  440. return -ENOMEM;
  441. }
  442. skb_put(skb, length);
  443. skb->len = length;
  444. skb->dev = dev;
  445. memcpy(skb->data, bdbuffer, length);
  446. break;
  447. }
  448. dev->stats.rx_packets++;
  449. dev->stats.rx_bytes += skb->len;
  450. howmany++;
  451. if (hdlc->proto)
  452. skb->protocol = hdlc_type_trans(skb, dev);
  453. netif_receive_skb(skb);
  454. recycle:
  455. iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
  456. /* update to point at the next bd */
  457. if (bd_status & R_W_S) {
  458. priv->currx_bdnum = 0;
  459. bd = priv->rx_bd_base;
  460. } else {
  461. if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
  462. priv->currx_bdnum += 1;
  463. else
  464. priv->currx_bdnum = RX_BD_RING_LEN - 1;
  465. bd += 1;
  466. }
  467. bd_status = ioread16be(&bd->status);
  468. }
  469. priv->currx_bd = bd;
  470. return howmany;
  471. }
  472. static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
  473. {
  474. struct ucc_hdlc_private *priv = container_of(napi,
  475. struct ucc_hdlc_private,
  476. napi);
  477. int howmany;
  478. /* Tx event processing */
  479. spin_lock(&priv->lock);
  480. hdlc_tx_done(priv);
  481. spin_unlock(&priv->lock);
  482. howmany = 0;
  483. howmany += hdlc_rx_done(priv, budget - howmany);
  484. if (howmany < budget) {
  485. napi_complete_done(napi, howmany);
  486. qe_setbits32(priv->uccf->p_uccm,
  487. (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
  488. }
  489. return howmany;
  490. }
  491. static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
  492. {
  493. struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
  494. struct net_device *dev = priv->ndev;
  495. struct ucc_fast_private *uccf;
  496. struct ucc_tdm_info *ut_info;
  497. u32 ucce;
  498. u32 uccm;
  499. ut_info = priv->ut_info;
  500. uccf = priv->uccf;
  501. ucce = ioread32be(uccf->p_ucce);
  502. uccm = ioread32be(uccf->p_uccm);
  503. ucce &= uccm;
  504. iowrite32be(ucce, uccf->p_ucce);
  505. if (!ucce)
  506. return IRQ_NONE;
  507. if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
  508. if (napi_schedule_prep(&priv->napi)) {
  509. uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
  510. << 16);
  511. iowrite32be(uccm, uccf->p_uccm);
  512. __napi_schedule(&priv->napi);
  513. }
  514. }
  515. /* Errors and other events */
  516. if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
  517. dev->stats.rx_errors++;
  518. if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
  519. dev->stats.tx_errors++;
  520. return IRQ_HANDLED;
  521. }
  522. static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  523. {
  524. const size_t size = sizeof(te1_settings);
  525. te1_settings line;
  526. struct ucc_hdlc_private *priv = netdev_priv(dev);
  527. if (cmd != SIOCWANDEV)
  528. return hdlc_ioctl(dev, ifr, cmd);
  529. switch (ifr->ifr_settings.type) {
  530. case IF_GET_IFACE:
  531. ifr->ifr_settings.type = IF_IFACE_E1;
  532. if (ifr->ifr_settings.size < size) {
  533. ifr->ifr_settings.size = size; /* data size wanted */
  534. return -ENOBUFS;
  535. }
  536. memset(&line, 0, sizeof(line));
  537. line.clock_type = priv->clocking;
  538. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
  539. return -EFAULT;
  540. return 0;
  541. default:
  542. return hdlc_ioctl(dev, ifr, cmd);
  543. }
  544. }
  545. static int uhdlc_open(struct net_device *dev)
  546. {
  547. u32 cecr_subblock;
  548. hdlc_device *hdlc = dev_to_hdlc(dev);
  549. struct ucc_hdlc_private *priv = hdlc->priv;
  550. struct ucc_tdm *utdm = priv->utdm;
  551. if (priv->hdlc_busy != 1) {
  552. if (request_irq(priv->ut_info->uf_info.irq,
  553. ucc_hdlc_irq_handler, 0, "hdlc", priv))
  554. return -ENODEV;
  555. cecr_subblock = ucc_fast_get_qe_cr_subblock(
  556. priv->ut_info->uf_info.ucc_num);
  557. qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
  558. QE_CR_PROTOCOL_UNSPECIFIED, 0);
  559. ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  560. /* Enable the TDM port */
  561. if (priv->tsa)
  562. utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
  563. priv->hdlc_busy = 1;
  564. netif_device_attach(priv->ndev);
  565. napi_enable(&priv->napi);
  566. netif_start_queue(dev);
  567. hdlc_open(dev);
  568. }
  569. return 0;
  570. }
  571. static void uhdlc_memclean(struct ucc_hdlc_private *priv)
  572. {
  573. qe_muram_free(priv->ucc_pram->riptr);
  574. qe_muram_free(priv->ucc_pram->tiptr);
  575. if (priv->rx_bd_base) {
  576. dma_free_coherent(priv->dev,
  577. RX_BD_RING_LEN * sizeof(struct qe_bd),
  578. priv->rx_bd_base, priv->dma_rx_bd);
  579. priv->rx_bd_base = NULL;
  580. priv->dma_rx_bd = 0;
  581. }
  582. if (priv->tx_bd_base) {
  583. dma_free_coherent(priv->dev,
  584. TX_BD_RING_LEN * sizeof(struct qe_bd),
  585. priv->tx_bd_base, priv->dma_tx_bd);
  586. priv->tx_bd_base = NULL;
  587. priv->dma_tx_bd = 0;
  588. }
  589. if (priv->ucc_pram) {
  590. qe_muram_free(priv->ucc_pram_offset);
  591. priv->ucc_pram = NULL;
  592. priv->ucc_pram_offset = 0;
  593. }
  594. kfree(priv->rx_skbuff);
  595. priv->rx_skbuff = NULL;
  596. kfree(priv->tx_skbuff);
  597. priv->tx_skbuff = NULL;
  598. if (priv->uf_regs) {
  599. iounmap(priv->uf_regs);
  600. priv->uf_regs = NULL;
  601. }
  602. if (priv->uccf) {
  603. ucc_fast_free(priv->uccf);
  604. priv->uccf = NULL;
  605. }
  606. if (priv->rx_buffer) {
  607. dma_free_coherent(priv->dev,
  608. RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
  609. priv->rx_buffer, priv->dma_rx_addr);
  610. priv->rx_buffer = NULL;
  611. priv->dma_rx_addr = 0;
  612. }
  613. if (priv->tx_buffer) {
  614. dma_free_coherent(priv->dev,
  615. TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
  616. priv->tx_buffer, priv->dma_tx_addr);
  617. priv->tx_buffer = NULL;
  618. priv->dma_tx_addr = 0;
  619. }
  620. }
  621. static int uhdlc_close(struct net_device *dev)
  622. {
  623. struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
  624. struct ucc_tdm *utdm = priv->utdm;
  625. u32 cecr_subblock;
  626. napi_disable(&priv->napi);
  627. cecr_subblock = ucc_fast_get_qe_cr_subblock(
  628. priv->ut_info->uf_info.ucc_num);
  629. qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
  630. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  631. qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
  632. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  633. if (priv->tsa)
  634. utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
  635. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  636. free_irq(priv->ut_info->uf_info.irq, priv);
  637. netif_stop_queue(dev);
  638. priv->hdlc_busy = 0;
  639. return 0;
  640. }
  641. static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
  642. unsigned short parity)
  643. {
  644. struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
  645. if (encoding != ENCODING_NRZ &&
  646. encoding != ENCODING_NRZI)
  647. return -EINVAL;
  648. if (parity != PARITY_NONE &&
  649. parity != PARITY_CRC32_PR1_CCITT &&
  650. parity != PARITY_CRC16_PR1_CCITT)
  651. return -EINVAL;
  652. priv->encoding = encoding;
  653. priv->parity = parity;
  654. return 0;
  655. }
  656. #ifdef CONFIG_PM
  657. static void store_clk_config(struct ucc_hdlc_private *priv)
  658. {
  659. struct qe_mux *qe_mux_reg = &qe_immr->qmx;
  660. /* store si clk */
  661. priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
  662. priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
  663. /* store si sync */
  664. priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
  665. /* store ucc clk */
  666. memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
  667. }
  668. static void resume_clk_config(struct ucc_hdlc_private *priv)
  669. {
  670. struct qe_mux *qe_mux_reg = &qe_immr->qmx;
  671. memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
  672. iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
  673. iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
  674. iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
  675. }
  676. static int uhdlc_suspend(struct device *dev)
  677. {
  678. struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
  679. struct ucc_tdm_info *ut_info;
  680. struct ucc_fast __iomem *uf_regs;
  681. if (!priv)
  682. return -EINVAL;
  683. if (!netif_running(priv->ndev))
  684. return 0;
  685. netif_device_detach(priv->ndev);
  686. napi_disable(&priv->napi);
  687. ut_info = priv->ut_info;
  688. uf_regs = priv->uf_regs;
  689. /* backup gumr guemr*/
  690. priv->gumr = ioread32be(&uf_regs->gumr);
  691. priv->guemr = ioread8(&uf_regs->guemr);
  692. priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
  693. GFP_KERNEL);
  694. if (!priv->ucc_pram_bak)
  695. return -ENOMEM;
  696. /* backup HDLC parameter */
  697. memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
  698. sizeof(struct ucc_hdlc_param));
  699. /* store the clk configuration */
  700. store_clk_config(priv);
  701. /* save power */
  702. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  703. return 0;
  704. }
  705. static int uhdlc_resume(struct device *dev)
  706. {
  707. struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
  708. struct ucc_tdm *utdm;
  709. struct ucc_tdm_info *ut_info;
  710. struct ucc_fast __iomem *uf_regs;
  711. struct ucc_fast_private *uccf;
  712. struct ucc_fast_info *uf_info;
  713. int ret, i;
  714. u32 cecr_subblock;
  715. u16 bd_status;
  716. if (!priv)
  717. return -EINVAL;
  718. if (!netif_running(priv->ndev))
  719. return 0;
  720. utdm = priv->utdm;
  721. ut_info = priv->ut_info;
  722. uf_info = &ut_info->uf_info;
  723. uf_regs = priv->uf_regs;
  724. uccf = priv->uccf;
  725. /* restore gumr guemr */
  726. iowrite8(priv->guemr, &uf_regs->guemr);
  727. iowrite32be(priv->gumr, &uf_regs->gumr);
  728. /* Set Virtual Fifo registers */
  729. iowrite16be(uf_info->urfs, &uf_regs->urfs);
  730. iowrite16be(uf_info->urfet, &uf_regs->urfet);
  731. iowrite16be(uf_info->urfset, &uf_regs->urfset);
  732. iowrite16be(uf_info->utfs, &uf_regs->utfs);
  733. iowrite16be(uf_info->utfet, &uf_regs->utfet);
  734. iowrite16be(uf_info->utftt, &uf_regs->utftt);
  735. /* utfb, urfb are offsets from MURAM base */
  736. iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
  737. iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
  738. /* Rx Tx and sync clock routing */
  739. resume_clk_config(priv);
  740. iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
  741. iowrite32be(0xffffffff, &uf_regs->ucce);
  742. ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  743. /* rebuild SIRAM */
  744. if (priv->tsa)
  745. ucc_tdm_init(priv->utdm, priv->ut_info);
  746. /* Write to QE CECR, UCCx channel to Stop Transmission */
  747. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  748. ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
  749. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  750. /* Set UPSMR normal mode */
  751. iowrite32be(0, &uf_regs->upsmr);
  752. /* init parameter base */
  753. cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
  754. ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
  755. QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
  756. priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
  757. qe_muram_addr(priv->ucc_pram_offset);
  758. /* restore ucc parameter */
  759. memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
  760. sizeof(struct ucc_hdlc_param));
  761. kfree(priv->ucc_pram_bak);
  762. /* rebuild BD entry */
  763. for (i = 0; i < RX_BD_RING_LEN; i++) {
  764. if (i < (RX_BD_RING_LEN - 1))
  765. bd_status = R_E_S | R_I_S;
  766. else
  767. bd_status = R_E_S | R_I_S | R_W_S;
  768. iowrite16be(bd_status, &priv->rx_bd_base[i].status);
  769. iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
  770. &priv->rx_bd_base[i].buf);
  771. }
  772. for (i = 0; i < TX_BD_RING_LEN; i++) {
  773. if (i < (TX_BD_RING_LEN - 1))
  774. bd_status = T_I_S | T_TC_S;
  775. else
  776. bd_status = T_I_S | T_TC_S | T_W_S;
  777. iowrite16be(bd_status, &priv->tx_bd_base[i].status);
  778. iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
  779. &priv->tx_bd_base[i].buf);
  780. }
  781. /* if hdlc is busy enable TX and RX */
  782. if (priv->hdlc_busy == 1) {
  783. cecr_subblock = ucc_fast_get_qe_cr_subblock(
  784. priv->ut_info->uf_info.ucc_num);
  785. qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
  786. (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
  787. ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
  788. /* Enable the TDM port */
  789. if (priv->tsa)
  790. utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
  791. }
  792. napi_enable(&priv->napi);
  793. netif_device_attach(priv->ndev);
  794. return 0;
  795. }
  796. static const struct dev_pm_ops uhdlc_pm_ops = {
  797. .suspend = uhdlc_suspend,
  798. .resume = uhdlc_resume,
  799. .freeze = uhdlc_suspend,
  800. .thaw = uhdlc_resume,
  801. };
  802. #define HDLC_PM_OPS (&uhdlc_pm_ops)
  803. #else
  804. #define HDLC_PM_OPS NULL
  805. #endif
  806. static const struct net_device_ops uhdlc_ops = {
  807. .ndo_open = uhdlc_open,
  808. .ndo_stop = uhdlc_close,
  809. .ndo_start_xmit = hdlc_start_xmit,
  810. .ndo_do_ioctl = uhdlc_ioctl,
  811. };
  812. static int ucc_hdlc_probe(struct platform_device *pdev)
  813. {
  814. struct device_node *np = pdev->dev.of_node;
  815. struct ucc_hdlc_private *uhdlc_priv = NULL;
  816. struct ucc_tdm_info *ut_info;
  817. struct ucc_tdm *utdm = NULL;
  818. struct resource res;
  819. struct net_device *dev;
  820. hdlc_device *hdlc;
  821. int ucc_num;
  822. const char *sprop;
  823. int ret;
  824. u32 val;
  825. ret = of_property_read_u32_index(np, "cell-index", 0, &val);
  826. if (ret) {
  827. dev_err(&pdev->dev, "Invalid ucc property\n");
  828. return -ENODEV;
  829. }
  830. ucc_num = val - 1;
  831. if ((ucc_num > 3) || (ucc_num < 0)) {
  832. dev_err(&pdev->dev, ": Invalid UCC num\n");
  833. return -EINVAL;
  834. }
  835. memcpy(&utdm_info[ucc_num], &utdm_primary_info,
  836. sizeof(utdm_primary_info));
  837. ut_info = &utdm_info[ucc_num];
  838. ut_info->uf_info.ucc_num = ucc_num;
  839. sprop = of_get_property(np, "rx-clock-name", NULL);
  840. if (sprop) {
  841. ut_info->uf_info.rx_clock = qe_clock_source(sprop);
  842. if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
  843. (ut_info->uf_info.rx_clock > QE_CLK24)) {
  844. dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
  845. return -EINVAL;
  846. }
  847. } else {
  848. dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
  849. return -EINVAL;
  850. }
  851. sprop = of_get_property(np, "tx-clock-name", NULL);
  852. if (sprop) {
  853. ut_info->uf_info.tx_clock = qe_clock_source(sprop);
  854. if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
  855. (ut_info->uf_info.tx_clock > QE_CLK24)) {
  856. dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
  857. return -EINVAL;
  858. }
  859. } else {
  860. dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
  861. return -EINVAL;
  862. }
  863. ret = of_address_to_resource(np, 0, &res);
  864. if (ret)
  865. return -EINVAL;
  866. ut_info->uf_info.regs = res.start;
  867. ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
  868. uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
  869. if (!uhdlc_priv) {
  870. return -ENOMEM;
  871. }
  872. dev_set_drvdata(&pdev->dev, uhdlc_priv);
  873. uhdlc_priv->dev = &pdev->dev;
  874. uhdlc_priv->ut_info = ut_info;
  875. if (of_get_property(np, "fsl,tdm-interface", NULL))
  876. uhdlc_priv->tsa = 1;
  877. if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
  878. uhdlc_priv->loopback = 1;
  879. if (of_get_property(np, "fsl,hdlc-bus", NULL))
  880. uhdlc_priv->hdlc_bus = 1;
  881. if (uhdlc_priv->tsa == 1) {
  882. utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
  883. if (!utdm) {
  884. ret = -ENOMEM;
  885. dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
  886. goto free_uhdlc_priv;
  887. }
  888. uhdlc_priv->utdm = utdm;
  889. ret = ucc_of_parse_tdm(np, utdm, ut_info);
  890. if (ret)
  891. goto free_utdm;
  892. }
  893. ret = uhdlc_init(uhdlc_priv);
  894. if (ret) {
  895. dev_err(&pdev->dev, "Failed to init uhdlc\n");
  896. goto free_utdm;
  897. }
  898. dev = alloc_hdlcdev(uhdlc_priv);
  899. if (!dev) {
  900. ret = -ENOMEM;
  901. pr_err("ucc_hdlc: unable to allocate memory\n");
  902. goto undo_uhdlc_init;
  903. }
  904. uhdlc_priv->ndev = dev;
  905. hdlc = dev_to_hdlc(dev);
  906. dev->tx_queue_len = 16;
  907. dev->netdev_ops = &uhdlc_ops;
  908. hdlc->attach = ucc_hdlc_attach;
  909. hdlc->xmit = ucc_hdlc_tx;
  910. netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
  911. if (register_hdlc_device(dev)) {
  912. ret = -ENOBUFS;
  913. pr_err("ucc_hdlc: unable to register hdlc device\n");
  914. goto free_dev;
  915. }
  916. return 0;
  917. free_dev:
  918. free_netdev(dev);
  919. undo_uhdlc_init:
  920. free_utdm:
  921. if (uhdlc_priv->tsa)
  922. kfree(utdm);
  923. free_uhdlc_priv:
  924. kfree(uhdlc_priv);
  925. return ret;
  926. }
  927. static int ucc_hdlc_remove(struct platform_device *pdev)
  928. {
  929. struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
  930. uhdlc_memclean(priv);
  931. if (priv->utdm->si_regs) {
  932. iounmap(priv->utdm->si_regs);
  933. priv->utdm->si_regs = NULL;
  934. }
  935. if (priv->utdm->siram) {
  936. iounmap(priv->utdm->siram);
  937. priv->utdm->siram = NULL;
  938. }
  939. kfree(priv);
  940. dev_info(&pdev->dev, "UCC based hdlc module removed\n");
  941. return 0;
  942. }
  943. static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
  944. {
  945. .compatible = "fsl,ucc-hdlc",
  946. },
  947. {},
  948. };
  949. MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
  950. static struct platform_driver ucc_hdlc_driver = {
  951. .probe = ucc_hdlc_probe,
  952. .remove = ucc_hdlc_remove,
  953. .driver = {
  954. .name = DRV_NAME,
  955. .pm = HDLC_PM_OPS,
  956. .of_match_table = fsl_ucc_hdlc_of_match,
  957. },
  958. };
  959. module_platform_driver(ucc_hdlc_driver);
  960. MODULE_LICENSE("GPL");