serial-tegra.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * serial_tegra.c
  4. *
  5. * High-speed serial driver for NVIDIA Tegra SoCs
  6. *
  7. * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
  8. *
  9. * Author: Laxman Dewangan <ldewangan@nvidia.com>
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/delay.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/err.h>
  18. #include <linux/io.h>
  19. #include <linux/irq.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/reset.h>
  26. #include <linux/serial.h>
  27. #include <linux/serial_8250.h>
  28. #include <linux/serial_core.h>
  29. #include <linux/serial_reg.h>
  30. #include <linux/slab.h>
  31. #include <linux/string.h>
  32. #include <linux/termios.h>
  33. #include <linux/tty.h>
  34. #include <linux/tty_flip.h>
  35. #define TEGRA_UART_TYPE "TEGRA_UART"
  36. #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
  37. #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
  38. #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
  39. #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
  40. #define TEGRA_UART_IER_EORD 0x20
  41. #define TEGRA_UART_MCR_RTS_EN 0x40
  42. #define TEGRA_UART_MCR_CTS_EN 0x20
  43. #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
  44. UART_LSR_PE | UART_LSR_FE)
  45. #define TEGRA_UART_IRDA_CSR 0x08
  46. #define TEGRA_UART_SIR_ENABLED 0x80
  47. #define TEGRA_UART_TX_PIO 1
  48. #define TEGRA_UART_TX_DMA 2
  49. #define TEGRA_UART_MIN_DMA 16
  50. #define TEGRA_UART_FIFO_SIZE 32
  51. /*
  52. * Tx fifo trigger level setting in tegra uart is in
  53. * reverse way then conventional uart.
  54. */
  55. #define TEGRA_UART_TX_TRIG_16B 0x00
  56. #define TEGRA_UART_TX_TRIG_8B 0x10
  57. #define TEGRA_UART_TX_TRIG_4B 0x20
  58. #define TEGRA_UART_TX_TRIG_1B 0x30
  59. #define TEGRA_UART_MAXIMUM 5
  60. /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
  61. #define TEGRA_UART_DEFAULT_BAUD 115200
  62. #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
  63. /* Tx transfer mode */
  64. #define TEGRA_TX_PIO 1
  65. #define TEGRA_TX_DMA 2
  66. /**
  67. * tegra_uart_chip_data: SOC specific data.
  68. *
  69. * @tx_fifo_full_status: Status flag available for checking tx fifo full.
  70. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
  71. * Tegra30 does not allow this.
  72. * @support_clk_src_div: Clock source support the clock divider.
  73. */
  74. struct tegra_uart_chip_data {
  75. bool tx_fifo_full_status;
  76. bool allow_txfifo_reset_fifo_mode;
  77. bool support_clk_src_div;
  78. };
  79. struct tegra_uart_port {
  80. struct uart_port uport;
  81. const struct tegra_uart_chip_data *cdata;
  82. struct clk *uart_clk;
  83. struct reset_control *rst;
  84. unsigned int current_baud;
  85. /* Register shadow */
  86. unsigned long fcr_shadow;
  87. unsigned long mcr_shadow;
  88. unsigned long lcr_shadow;
  89. unsigned long ier_shadow;
  90. bool rts_active;
  91. int tx_in_progress;
  92. unsigned int tx_bytes;
  93. bool enable_modem_interrupt;
  94. bool rx_timeout;
  95. int rx_in_progress;
  96. int symb_bit;
  97. struct dma_chan *rx_dma_chan;
  98. struct dma_chan *tx_dma_chan;
  99. dma_addr_t rx_dma_buf_phys;
  100. dma_addr_t tx_dma_buf_phys;
  101. unsigned char *rx_dma_buf_virt;
  102. unsigned char *tx_dma_buf_virt;
  103. struct dma_async_tx_descriptor *tx_dma_desc;
  104. struct dma_async_tx_descriptor *rx_dma_desc;
  105. dma_cookie_t tx_cookie;
  106. dma_cookie_t rx_cookie;
  107. unsigned int tx_bytes_requested;
  108. unsigned int rx_bytes_requested;
  109. };
  110. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
  111. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
  112. static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
  113. unsigned long reg)
  114. {
  115. return readl(tup->uport.membase + (reg << tup->uport.regshift));
  116. }
  117. static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
  118. unsigned long reg)
  119. {
  120. writel(val, tup->uport.membase + (reg << tup->uport.regshift));
  121. }
  122. static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
  123. {
  124. return container_of(u, struct tegra_uart_port, uport);
  125. }
  126. static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
  127. {
  128. struct tegra_uart_port *tup = to_tegra_uport(u);
  129. /*
  130. * RI - Ring detector is active
  131. * CD/DCD/CAR - Carrier detect is always active. For some reason
  132. * linux has different names for carrier detect.
  133. * DSR - Data Set ready is active as the hardware doesn't support it.
  134. * Don't know if the linux support this yet?
  135. * CTS - Clear to send. Always set to active, as the hardware handles
  136. * CTS automatically.
  137. */
  138. if (tup->enable_modem_interrupt)
  139. return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
  140. return TIOCM_CTS;
  141. }
  142. static void set_rts(struct tegra_uart_port *tup, bool active)
  143. {
  144. unsigned long mcr;
  145. mcr = tup->mcr_shadow;
  146. if (active)
  147. mcr |= TEGRA_UART_MCR_RTS_EN;
  148. else
  149. mcr &= ~TEGRA_UART_MCR_RTS_EN;
  150. if (mcr != tup->mcr_shadow) {
  151. tegra_uart_write(tup, mcr, UART_MCR);
  152. tup->mcr_shadow = mcr;
  153. }
  154. }
  155. static void set_dtr(struct tegra_uart_port *tup, bool active)
  156. {
  157. unsigned long mcr;
  158. mcr = tup->mcr_shadow;
  159. if (active)
  160. mcr |= UART_MCR_DTR;
  161. else
  162. mcr &= ~UART_MCR_DTR;
  163. if (mcr != tup->mcr_shadow) {
  164. tegra_uart_write(tup, mcr, UART_MCR);
  165. tup->mcr_shadow = mcr;
  166. }
  167. }
  168. static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
  169. {
  170. struct tegra_uart_port *tup = to_tegra_uport(u);
  171. int dtr_enable;
  172. tup->rts_active = !!(mctrl & TIOCM_RTS);
  173. set_rts(tup, tup->rts_active);
  174. dtr_enable = !!(mctrl & TIOCM_DTR);
  175. set_dtr(tup, dtr_enable);
  176. }
  177. static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
  178. {
  179. struct tegra_uart_port *tup = to_tegra_uport(u);
  180. unsigned long lcr;
  181. lcr = tup->lcr_shadow;
  182. if (break_ctl)
  183. lcr |= UART_LCR_SBC;
  184. else
  185. lcr &= ~UART_LCR_SBC;
  186. tegra_uart_write(tup, lcr, UART_LCR);
  187. tup->lcr_shadow = lcr;
  188. }
  189. /**
  190. * tegra_uart_wait_cycle_time: Wait for N UART clock periods
  191. *
  192. * @tup: Tegra serial port data structure.
  193. * @cycles: Number of clock periods to wait.
  194. *
  195. * Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
  196. * clock speed is 16X the current baud rate.
  197. */
  198. static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
  199. unsigned int cycles)
  200. {
  201. if (tup->current_baud)
  202. udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
  203. }
  204. /* Wait for a symbol-time. */
  205. static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
  206. unsigned int syms)
  207. {
  208. if (tup->current_baud)
  209. udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
  210. tup->current_baud));
  211. }
  212. static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
  213. {
  214. unsigned long fcr = tup->fcr_shadow;
  215. if (tup->cdata->allow_txfifo_reset_fifo_mode) {
  216. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  217. tegra_uart_write(tup, fcr, UART_FCR);
  218. } else {
  219. fcr &= ~UART_FCR_ENABLE_FIFO;
  220. tegra_uart_write(tup, fcr, UART_FCR);
  221. udelay(60);
  222. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  223. tegra_uart_write(tup, fcr, UART_FCR);
  224. fcr |= UART_FCR_ENABLE_FIFO;
  225. tegra_uart_write(tup, fcr, UART_FCR);
  226. }
  227. /* Dummy read to ensure the write is posted */
  228. tegra_uart_read(tup, UART_SCR);
  229. /*
  230. * For all tegra devices (up to t210), there is a hardware issue that
  231. * requires software to wait for 32 UART clock periods for the flush
  232. * to propagate, otherwise data could be lost.
  233. */
  234. tegra_uart_wait_cycle_time(tup, 32);
  235. }
  236. static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
  237. {
  238. unsigned long rate;
  239. unsigned int divisor;
  240. unsigned long lcr;
  241. int ret;
  242. if (tup->current_baud == baud)
  243. return 0;
  244. if (tup->cdata->support_clk_src_div) {
  245. rate = baud * 16;
  246. ret = clk_set_rate(tup->uart_clk, rate);
  247. if (ret < 0) {
  248. dev_err(tup->uport.dev,
  249. "clk_set_rate() failed for rate %lu\n", rate);
  250. return ret;
  251. }
  252. divisor = 1;
  253. } else {
  254. rate = clk_get_rate(tup->uart_clk);
  255. divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
  256. }
  257. lcr = tup->lcr_shadow;
  258. lcr |= UART_LCR_DLAB;
  259. tegra_uart_write(tup, lcr, UART_LCR);
  260. tegra_uart_write(tup, divisor & 0xFF, UART_TX);
  261. tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
  262. lcr &= ~UART_LCR_DLAB;
  263. tegra_uart_write(tup, lcr, UART_LCR);
  264. /* Dummy read to ensure the write is posted */
  265. tegra_uart_read(tup, UART_SCR);
  266. tup->current_baud = baud;
  267. /* wait two character intervals at new rate */
  268. tegra_uart_wait_sym_time(tup, 2);
  269. return 0;
  270. }
  271. static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
  272. unsigned long lsr)
  273. {
  274. char flag = TTY_NORMAL;
  275. if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
  276. if (lsr & UART_LSR_OE) {
  277. /* Overrrun error */
  278. flag = TTY_OVERRUN;
  279. tup->uport.icount.overrun++;
  280. dev_err(tup->uport.dev, "Got overrun errors\n");
  281. } else if (lsr & UART_LSR_PE) {
  282. /* Parity error */
  283. flag = TTY_PARITY;
  284. tup->uport.icount.parity++;
  285. dev_err(tup->uport.dev, "Got Parity errors\n");
  286. } else if (lsr & UART_LSR_FE) {
  287. flag = TTY_FRAME;
  288. tup->uport.icount.frame++;
  289. dev_err(tup->uport.dev, "Got frame errors\n");
  290. } else if (lsr & UART_LSR_BI) {
  291. dev_err(tup->uport.dev, "Got Break\n");
  292. tup->uport.icount.brk++;
  293. /* If FIFO read error without any data, reset Rx FIFO */
  294. if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
  295. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
  296. }
  297. }
  298. return flag;
  299. }
  300. static int tegra_uart_request_port(struct uart_port *u)
  301. {
  302. return 0;
  303. }
  304. static void tegra_uart_release_port(struct uart_port *u)
  305. {
  306. /* Nothing to do here */
  307. }
  308. static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
  309. {
  310. struct circ_buf *xmit = &tup->uport.state->xmit;
  311. int i;
  312. for (i = 0; i < max_bytes; i++) {
  313. BUG_ON(uart_circ_empty(xmit));
  314. if (tup->cdata->tx_fifo_full_status) {
  315. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  316. if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
  317. break;
  318. }
  319. tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
  320. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  321. tup->uport.icount.tx++;
  322. }
  323. }
  324. static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
  325. unsigned int bytes)
  326. {
  327. if (bytes > TEGRA_UART_MIN_DMA)
  328. bytes = TEGRA_UART_MIN_DMA;
  329. tup->tx_in_progress = TEGRA_UART_TX_PIO;
  330. tup->tx_bytes = bytes;
  331. tup->ier_shadow |= UART_IER_THRI;
  332. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  333. }
  334. static void tegra_uart_tx_dma_complete(void *args)
  335. {
  336. struct tegra_uart_port *tup = args;
  337. struct circ_buf *xmit = &tup->uport.state->xmit;
  338. struct dma_tx_state state;
  339. unsigned long flags;
  340. unsigned int count;
  341. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  342. count = tup->tx_bytes_requested - state.residue;
  343. async_tx_ack(tup->tx_dma_desc);
  344. spin_lock_irqsave(&tup->uport.lock, flags);
  345. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  346. tup->tx_in_progress = 0;
  347. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  348. uart_write_wakeup(&tup->uport);
  349. tegra_uart_start_next_tx(tup);
  350. spin_unlock_irqrestore(&tup->uport.lock, flags);
  351. }
  352. static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
  353. unsigned long count)
  354. {
  355. struct circ_buf *xmit = &tup->uport.state->xmit;
  356. dma_addr_t tx_phys_addr;
  357. dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
  358. UART_XMIT_SIZE, DMA_TO_DEVICE);
  359. tup->tx_bytes = count & ~(0xF);
  360. tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
  361. tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
  362. tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
  363. DMA_PREP_INTERRUPT);
  364. if (!tup->tx_dma_desc) {
  365. dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
  366. return -EIO;
  367. }
  368. tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
  369. tup->tx_dma_desc->callback_param = tup;
  370. tup->tx_in_progress = TEGRA_UART_TX_DMA;
  371. tup->tx_bytes_requested = tup->tx_bytes;
  372. tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
  373. dma_async_issue_pending(tup->tx_dma_chan);
  374. return 0;
  375. }
  376. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
  377. {
  378. unsigned long tail;
  379. unsigned long count;
  380. struct circ_buf *xmit = &tup->uport.state->xmit;
  381. tail = (unsigned long)&xmit->buf[xmit->tail];
  382. count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  383. if (!count)
  384. return;
  385. if (count < TEGRA_UART_MIN_DMA)
  386. tegra_uart_start_pio_tx(tup, count);
  387. else if (BYTES_TO_ALIGN(tail) > 0)
  388. tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
  389. else
  390. tegra_uart_start_tx_dma(tup, count);
  391. }
  392. /* Called by serial core driver with u->lock taken. */
  393. static void tegra_uart_start_tx(struct uart_port *u)
  394. {
  395. struct tegra_uart_port *tup = to_tegra_uport(u);
  396. struct circ_buf *xmit = &u->state->xmit;
  397. if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
  398. tegra_uart_start_next_tx(tup);
  399. }
  400. static unsigned int tegra_uart_tx_empty(struct uart_port *u)
  401. {
  402. struct tegra_uart_port *tup = to_tegra_uport(u);
  403. unsigned int ret = 0;
  404. unsigned long flags;
  405. spin_lock_irqsave(&u->lock, flags);
  406. if (!tup->tx_in_progress) {
  407. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  408. if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
  409. ret = TIOCSER_TEMT;
  410. }
  411. spin_unlock_irqrestore(&u->lock, flags);
  412. return ret;
  413. }
  414. static void tegra_uart_stop_tx(struct uart_port *u)
  415. {
  416. struct tegra_uart_port *tup = to_tegra_uport(u);
  417. struct circ_buf *xmit = &tup->uport.state->xmit;
  418. struct dma_tx_state state;
  419. unsigned int count;
  420. if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
  421. return;
  422. dmaengine_terminate_all(tup->tx_dma_chan);
  423. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  424. count = tup->tx_bytes_requested - state.residue;
  425. async_tx_ack(tup->tx_dma_desc);
  426. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  427. tup->tx_in_progress = 0;
  428. }
  429. static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
  430. {
  431. struct circ_buf *xmit = &tup->uport.state->xmit;
  432. tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
  433. tup->tx_in_progress = 0;
  434. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  435. uart_write_wakeup(&tup->uport);
  436. tegra_uart_start_next_tx(tup);
  437. }
  438. static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
  439. struct tty_port *tty)
  440. {
  441. do {
  442. char flag = TTY_NORMAL;
  443. unsigned long lsr = 0;
  444. unsigned char ch;
  445. lsr = tegra_uart_read(tup, UART_LSR);
  446. if (!(lsr & UART_LSR_DR))
  447. break;
  448. flag = tegra_uart_decode_rx_error(tup, lsr);
  449. ch = (unsigned char) tegra_uart_read(tup, UART_RX);
  450. tup->uport.icount.rx++;
  451. if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
  452. tty_insert_flip_char(tty, ch, flag);
  453. } while (1);
  454. }
  455. static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
  456. struct tty_port *tty,
  457. unsigned int count)
  458. {
  459. int copied;
  460. /* If count is zero, then there is no data to be copied */
  461. if (!count)
  462. return;
  463. tup->uport.icount.rx += count;
  464. if (!tty) {
  465. dev_err(tup->uport.dev, "No tty port\n");
  466. return;
  467. }
  468. dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
  469. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  470. copied = tty_insert_flip_string(tty,
  471. ((unsigned char *)(tup->rx_dma_buf_virt)), count);
  472. if (copied != count) {
  473. WARN_ON(1);
  474. dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
  475. }
  476. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  477. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  478. }
  479. static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
  480. unsigned int residue)
  481. {
  482. struct tty_port *port = &tup->uport.state->port;
  483. struct tty_struct *tty = tty_port_tty_get(port);
  484. unsigned int count;
  485. async_tx_ack(tup->rx_dma_desc);
  486. count = tup->rx_bytes_requested - residue;
  487. /* If we are here, DMA is stopped */
  488. tegra_uart_copy_rx_to_tty(tup, port, count);
  489. tegra_uart_handle_rx_pio(tup, port);
  490. if (tty) {
  491. tty_flip_buffer_push(port);
  492. tty_kref_put(tty);
  493. }
  494. }
  495. static void tegra_uart_rx_dma_complete(void *args)
  496. {
  497. struct tegra_uart_port *tup = args;
  498. struct uart_port *u = &tup->uport;
  499. unsigned long flags;
  500. struct dma_tx_state state;
  501. enum dma_status status;
  502. spin_lock_irqsave(&u->lock, flags);
  503. status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  504. if (status == DMA_IN_PROGRESS) {
  505. dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
  506. goto done;
  507. }
  508. /* Deactivate flow control to stop sender */
  509. if (tup->rts_active)
  510. set_rts(tup, false);
  511. tegra_uart_rx_buffer_push(tup, 0);
  512. tegra_uart_start_rx_dma(tup);
  513. /* Activate flow control to start transfer */
  514. if (tup->rts_active)
  515. set_rts(tup, true);
  516. done:
  517. spin_unlock_irqrestore(&u->lock, flags);
  518. }
  519. static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
  520. {
  521. struct dma_tx_state state;
  522. /* Deactivate flow control to stop sender */
  523. if (tup->rts_active)
  524. set_rts(tup, false);
  525. dmaengine_terminate_all(tup->rx_dma_chan);
  526. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  527. tegra_uart_rx_buffer_push(tup, state.residue);
  528. tegra_uart_start_rx_dma(tup);
  529. if (tup->rts_active)
  530. set_rts(tup, true);
  531. }
  532. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
  533. {
  534. unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
  535. tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
  536. tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
  537. DMA_PREP_INTERRUPT);
  538. if (!tup->rx_dma_desc) {
  539. dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
  540. return -EIO;
  541. }
  542. tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
  543. tup->rx_dma_desc->callback_param = tup;
  544. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  545. count, DMA_TO_DEVICE);
  546. tup->rx_bytes_requested = count;
  547. tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
  548. dma_async_issue_pending(tup->rx_dma_chan);
  549. return 0;
  550. }
  551. static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
  552. {
  553. struct tegra_uart_port *tup = to_tegra_uport(u);
  554. unsigned long msr;
  555. msr = tegra_uart_read(tup, UART_MSR);
  556. if (!(msr & UART_MSR_ANY_DELTA))
  557. return;
  558. if (msr & UART_MSR_TERI)
  559. tup->uport.icount.rng++;
  560. if (msr & UART_MSR_DDSR)
  561. tup->uport.icount.dsr++;
  562. /* We may only get DDCD when HW init and reset */
  563. if (msr & UART_MSR_DDCD)
  564. uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
  565. /* Will start/stop_tx accordingly */
  566. if (msr & UART_MSR_DCTS)
  567. uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
  568. }
  569. static irqreturn_t tegra_uart_isr(int irq, void *data)
  570. {
  571. struct tegra_uart_port *tup = data;
  572. struct uart_port *u = &tup->uport;
  573. unsigned long iir;
  574. unsigned long ier;
  575. bool is_rx_int = false;
  576. unsigned long flags;
  577. spin_lock_irqsave(&u->lock, flags);
  578. while (1) {
  579. iir = tegra_uart_read(tup, UART_IIR);
  580. if (iir & UART_IIR_NO_INT) {
  581. if (is_rx_int) {
  582. tegra_uart_handle_rx_dma(tup);
  583. if (tup->rx_in_progress) {
  584. ier = tup->ier_shadow;
  585. ier |= (UART_IER_RLSI | UART_IER_RTOIE |
  586. TEGRA_UART_IER_EORD);
  587. tup->ier_shadow = ier;
  588. tegra_uart_write(tup, ier, UART_IER);
  589. }
  590. }
  591. spin_unlock_irqrestore(&u->lock, flags);
  592. return IRQ_HANDLED;
  593. }
  594. switch ((iir >> 1) & 0x7) {
  595. case 0: /* Modem signal change interrupt */
  596. tegra_uart_handle_modem_signal_change(u);
  597. break;
  598. case 1: /* Transmit interrupt only triggered when using PIO */
  599. tup->ier_shadow &= ~UART_IER_THRI;
  600. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  601. tegra_uart_handle_tx_pio(tup);
  602. break;
  603. case 4: /* End of data */
  604. case 6: /* Rx timeout */
  605. case 2: /* Receive */
  606. if (!is_rx_int) {
  607. is_rx_int = true;
  608. /* Disable Rx interrupts */
  609. ier = tup->ier_shadow;
  610. ier |= UART_IER_RDI;
  611. tegra_uart_write(tup, ier, UART_IER);
  612. ier &= ~(UART_IER_RDI | UART_IER_RLSI |
  613. UART_IER_RTOIE | TEGRA_UART_IER_EORD);
  614. tup->ier_shadow = ier;
  615. tegra_uart_write(tup, ier, UART_IER);
  616. }
  617. break;
  618. case 3: /* Receive error */
  619. tegra_uart_decode_rx_error(tup,
  620. tegra_uart_read(tup, UART_LSR));
  621. break;
  622. case 5: /* break nothing to handle */
  623. case 7: /* break nothing to handle */
  624. break;
  625. }
  626. }
  627. }
  628. static void tegra_uart_stop_rx(struct uart_port *u)
  629. {
  630. struct tegra_uart_port *tup = to_tegra_uport(u);
  631. struct dma_tx_state state;
  632. unsigned long ier;
  633. if (tup->rts_active)
  634. set_rts(tup, false);
  635. if (!tup->rx_in_progress)
  636. return;
  637. tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
  638. ier = tup->ier_shadow;
  639. ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
  640. TEGRA_UART_IER_EORD);
  641. tup->ier_shadow = ier;
  642. tegra_uart_write(tup, ier, UART_IER);
  643. tup->rx_in_progress = 0;
  644. dmaengine_terminate_all(tup->rx_dma_chan);
  645. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  646. tegra_uart_rx_buffer_push(tup, state.residue);
  647. }
  648. static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
  649. {
  650. unsigned long flags;
  651. unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
  652. unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
  653. unsigned long wait_time;
  654. unsigned long lsr;
  655. unsigned long msr;
  656. unsigned long mcr;
  657. /* Disable interrupts */
  658. tegra_uart_write(tup, 0, UART_IER);
  659. lsr = tegra_uart_read(tup, UART_LSR);
  660. if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  661. msr = tegra_uart_read(tup, UART_MSR);
  662. mcr = tegra_uart_read(tup, UART_MCR);
  663. if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
  664. dev_err(tup->uport.dev,
  665. "Tx Fifo not empty, CTS disabled, waiting\n");
  666. /* Wait for Tx fifo to be empty */
  667. while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  668. wait_time = min(fifo_empty_time, 100lu);
  669. udelay(wait_time);
  670. fifo_empty_time -= wait_time;
  671. if (!fifo_empty_time) {
  672. msr = tegra_uart_read(tup, UART_MSR);
  673. mcr = tegra_uart_read(tup, UART_MCR);
  674. if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
  675. (msr & UART_MSR_CTS))
  676. dev_err(tup->uport.dev,
  677. "Slave not ready\n");
  678. break;
  679. }
  680. lsr = tegra_uart_read(tup, UART_LSR);
  681. }
  682. }
  683. spin_lock_irqsave(&tup->uport.lock, flags);
  684. /* Reset the Rx and Tx FIFOs */
  685. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
  686. tup->current_baud = 0;
  687. spin_unlock_irqrestore(&tup->uport.lock, flags);
  688. clk_disable_unprepare(tup->uart_clk);
  689. }
  690. static int tegra_uart_hw_init(struct tegra_uart_port *tup)
  691. {
  692. int ret;
  693. tup->fcr_shadow = 0;
  694. tup->mcr_shadow = 0;
  695. tup->lcr_shadow = 0;
  696. tup->ier_shadow = 0;
  697. tup->current_baud = 0;
  698. clk_prepare_enable(tup->uart_clk);
  699. /* Reset the UART controller to clear all previous status.*/
  700. reset_control_assert(tup->rst);
  701. udelay(10);
  702. reset_control_deassert(tup->rst);
  703. tup->rx_in_progress = 0;
  704. tup->tx_in_progress = 0;
  705. /*
  706. * Set the trigger level
  707. *
  708. * For PIO mode:
  709. *
  710. * For receive, this will interrupt the CPU after that many number of
  711. * bytes are received, for the remaining bytes the receive timeout
  712. * interrupt is received. Rx high watermark is set to 4.
  713. *
  714. * For transmit, if the trasnmit interrupt is enabled, this will
  715. * interrupt the CPU when the number of entries in the FIFO reaches the
  716. * low watermark. Tx low watermark is set to 16 bytes.
  717. *
  718. * For DMA mode:
  719. *
  720. * Set the Tx trigger to 16. This should match the DMA burst size that
  721. * programmed in the DMA registers.
  722. */
  723. tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
  724. tup->fcr_shadow |= UART_FCR_R_TRIG_01;
  725. tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
  726. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  727. /* Dummy read to ensure the write is posted */
  728. tegra_uart_read(tup, UART_SCR);
  729. /*
  730. * For all tegra devices (up to t210), there is a hardware issue that
  731. * requires software to wait for 3 UART clock periods after enabling
  732. * the TX fifo, otherwise data could be lost.
  733. */
  734. tegra_uart_wait_cycle_time(tup, 3);
  735. /*
  736. * Initialize the UART with default configuration
  737. * (115200, N, 8, 1) so that the receive DMA buffer may be
  738. * enqueued
  739. */
  740. tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
  741. tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
  742. tup->fcr_shadow |= UART_FCR_DMA_SELECT;
  743. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  744. ret = tegra_uart_start_rx_dma(tup);
  745. if (ret < 0) {
  746. dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
  747. return ret;
  748. }
  749. tup->rx_in_progress = 1;
  750. /*
  751. * Enable IE_RXS for the receive status interrupts like line errros.
  752. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
  753. *
  754. * If using DMA mode, enable EORD instead of receive interrupt which
  755. * will interrupt after the UART is done with the receive instead of
  756. * the interrupt when the FIFO "threshold" is reached.
  757. *
  758. * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
  759. * the DATA is sitting in the FIFO and couldn't be transferred to the
  760. * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
  761. * triggered when there is a pause of the incomming data stream for 4
  762. * characters long.
  763. *
  764. * For pauses in the data which is not aligned to 4 bytes, we get
  765. * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
  766. * then the EORD.
  767. */
  768. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
  769. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  770. return 0;
  771. }
  772. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  773. bool dma_to_memory)
  774. {
  775. if (dma_to_memory) {
  776. dmaengine_terminate_all(tup->rx_dma_chan);
  777. dma_release_channel(tup->rx_dma_chan);
  778. dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
  779. tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
  780. tup->rx_dma_chan = NULL;
  781. tup->rx_dma_buf_phys = 0;
  782. tup->rx_dma_buf_virt = NULL;
  783. } else {
  784. dmaengine_terminate_all(tup->tx_dma_chan);
  785. dma_release_channel(tup->tx_dma_chan);
  786. dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
  787. UART_XMIT_SIZE, DMA_TO_DEVICE);
  788. tup->tx_dma_chan = NULL;
  789. tup->tx_dma_buf_phys = 0;
  790. tup->tx_dma_buf_virt = NULL;
  791. }
  792. }
  793. static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
  794. bool dma_to_memory)
  795. {
  796. struct dma_chan *dma_chan;
  797. unsigned char *dma_buf;
  798. dma_addr_t dma_phys;
  799. int ret;
  800. struct dma_slave_config dma_sconfig;
  801. dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
  802. dma_to_memory ? "rx" : "tx");
  803. if (IS_ERR(dma_chan)) {
  804. ret = PTR_ERR(dma_chan);
  805. dev_err(tup->uport.dev,
  806. "DMA channel alloc failed: %d\n", ret);
  807. return ret;
  808. }
  809. if (dma_to_memory) {
  810. dma_buf = dma_alloc_coherent(tup->uport.dev,
  811. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  812. &dma_phys, GFP_KERNEL);
  813. if (!dma_buf) {
  814. dev_err(tup->uport.dev,
  815. "Not able to allocate the dma buffer\n");
  816. dma_release_channel(dma_chan);
  817. return -ENOMEM;
  818. }
  819. dma_sconfig.src_addr = tup->uport.mapbase;
  820. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  821. dma_sconfig.src_maxburst = 4;
  822. tup->rx_dma_chan = dma_chan;
  823. tup->rx_dma_buf_virt = dma_buf;
  824. tup->rx_dma_buf_phys = dma_phys;
  825. } else {
  826. dma_phys = dma_map_single(tup->uport.dev,
  827. tup->uport.state->xmit.buf, UART_XMIT_SIZE,
  828. DMA_TO_DEVICE);
  829. if (dma_mapping_error(tup->uport.dev, dma_phys)) {
  830. dev_err(tup->uport.dev, "dma_map_single tx failed\n");
  831. dma_release_channel(dma_chan);
  832. return -ENOMEM;
  833. }
  834. dma_buf = tup->uport.state->xmit.buf;
  835. dma_sconfig.dst_addr = tup->uport.mapbase;
  836. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  837. dma_sconfig.dst_maxburst = 16;
  838. tup->tx_dma_chan = dma_chan;
  839. tup->tx_dma_buf_virt = dma_buf;
  840. tup->tx_dma_buf_phys = dma_phys;
  841. }
  842. ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
  843. if (ret < 0) {
  844. dev_err(tup->uport.dev,
  845. "Dma slave config failed, err = %d\n", ret);
  846. tegra_uart_dma_channel_free(tup, dma_to_memory);
  847. return ret;
  848. }
  849. return 0;
  850. }
  851. static int tegra_uart_startup(struct uart_port *u)
  852. {
  853. struct tegra_uart_port *tup = to_tegra_uport(u);
  854. int ret;
  855. ret = tegra_uart_dma_channel_allocate(tup, false);
  856. if (ret < 0) {
  857. dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
  858. return ret;
  859. }
  860. ret = tegra_uart_dma_channel_allocate(tup, true);
  861. if (ret < 0) {
  862. dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
  863. goto fail_rx_dma;
  864. }
  865. ret = tegra_uart_hw_init(tup);
  866. if (ret < 0) {
  867. dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
  868. goto fail_hw_init;
  869. }
  870. ret = request_irq(u->irq, tegra_uart_isr, 0,
  871. dev_name(u->dev), tup);
  872. if (ret < 0) {
  873. dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
  874. goto fail_hw_init;
  875. }
  876. return 0;
  877. fail_hw_init:
  878. tegra_uart_dma_channel_free(tup, true);
  879. fail_rx_dma:
  880. tegra_uart_dma_channel_free(tup, false);
  881. return ret;
  882. }
  883. /*
  884. * Flush any TX data submitted for DMA and PIO. Called when the
  885. * TX circular buffer is reset.
  886. */
  887. static void tegra_uart_flush_buffer(struct uart_port *u)
  888. {
  889. struct tegra_uart_port *tup = to_tegra_uport(u);
  890. tup->tx_bytes = 0;
  891. if (tup->tx_dma_chan)
  892. dmaengine_terminate_all(tup->tx_dma_chan);
  893. }
  894. static void tegra_uart_shutdown(struct uart_port *u)
  895. {
  896. struct tegra_uart_port *tup = to_tegra_uport(u);
  897. tegra_uart_hw_deinit(tup);
  898. tup->rx_in_progress = 0;
  899. tup->tx_in_progress = 0;
  900. tegra_uart_dma_channel_free(tup, true);
  901. tegra_uart_dma_channel_free(tup, false);
  902. free_irq(u->irq, tup);
  903. }
  904. static void tegra_uart_enable_ms(struct uart_port *u)
  905. {
  906. struct tegra_uart_port *tup = to_tegra_uport(u);
  907. if (tup->enable_modem_interrupt) {
  908. tup->ier_shadow |= UART_IER_MSI;
  909. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  910. }
  911. }
  912. static void tegra_uart_set_termios(struct uart_port *u,
  913. struct ktermios *termios, struct ktermios *oldtermios)
  914. {
  915. struct tegra_uart_port *tup = to_tegra_uport(u);
  916. unsigned int baud;
  917. unsigned long flags;
  918. unsigned int lcr;
  919. int symb_bit = 1;
  920. struct clk *parent_clk = clk_get_parent(tup->uart_clk);
  921. unsigned long parent_clk_rate = clk_get_rate(parent_clk);
  922. int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
  923. max_divider *= 16;
  924. spin_lock_irqsave(&u->lock, flags);
  925. /* Changing configuration, it is safe to stop any rx now */
  926. if (tup->rts_active)
  927. set_rts(tup, false);
  928. /* Clear all interrupts as configuration is going to be change */
  929. tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
  930. tegra_uart_read(tup, UART_IER);
  931. tegra_uart_write(tup, 0, UART_IER);
  932. tegra_uart_read(tup, UART_IER);
  933. /* Parity */
  934. lcr = tup->lcr_shadow;
  935. lcr &= ~UART_LCR_PARITY;
  936. /* CMSPAR isn't supported by this driver */
  937. termios->c_cflag &= ~CMSPAR;
  938. if ((termios->c_cflag & PARENB) == PARENB) {
  939. symb_bit++;
  940. if (termios->c_cflag & PARODD) {
  941. lcr |= UART_LCR_PARITY;
  942. lcr &= ~UART_LCR_EPAR;
  943. lcr &= ~UART_LCR_SPAR;
  944. } else {
  945. lcr |= UART_LCR_PARITY;
  946. lcr |= UART_LCR_EPAR;
  947. lcr &= ~UART_LCR_SPAR;
  948. }
  949. }
  950. lcr &= ~UART_LCR_WLEN8;
  951. switch (termios->c_cflag & CSIZE) {
  952. case CS5:
  953. lcr |= UART_LCR_WLEN5;
  954. symb_bit += 5;
  955. break;
  956. case CS6:
  957. lcr |= UART_LCR_WLEN6;
  958. symb_bit += 6;
  959. break;
  960. case CS7:
  961. lcr |= UART_LCR_WLEN7;
  962. symb_bit += 7;
  963. break;
  964. default:
  965. lcr |= UART_LCR_WLEN8;
  966. symb_bit += 8;
  967. break;
  968. }
  969. /* Stop bits */
  970. if (termios->c_cflag & CSTOPB) {
  971. lcr |= UART_LCR_STOP;
  972. symb_bit += 2;
  973. } else {
  974. lcr &= ~UART_LCR_STOP;
  975. symb_bit++;
  976. }
  977. tegra_uart_write(tup, lcr, UART_LCR);
  978. tup->lcr_shadow = lcr;
  979. tup->symb_bit = symb_bit;
  980. /* Baud rate. */
  981. baud = uart_get_baud_rate(u, termios, oldtermios,
  982. parent_clk_rate/max_divider,
  983. parent_clk_rate/16);
  984. spin_unlock_irqrestore(&u->lock, flags);
  985. tegra_set_baudrate(tup, baud);
  986. if (tty_termios_baud_rate(termios))
  987. tty_termios_encode_baud_rate(termios, baud, baud);
  988. spin_lock_irqsave(&u->lock, flags);
  989. /* Flow control */
  990. if (termios->c_cflag & CRTSCTS) {
  991. tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
  992. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  993. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  994. /* if top layer has asked to set rts active then do so here */
  995. if (tup->rts_active)
  996. set_rts(tup, true);
  997. } else {
  998. tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
  999. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  1000. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  1001. }
  1002. /* update the port timeout based on new settings */
  1003. uart_update_timeout(u, termios->c_cflag, baud);
  1004. /* Make sure all write has completed */
  1005. tegra_uart_read(tup, UART_IER);
  1006. /* Reenable interrupt */
  1007. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1008. tegra_uart_read(tup, UART_IER);
  1009. spin_unlock_irqrestore(&u->lock, flags);
  1010. }
  1011. static const char *tegra_uart_type(struct uart_port *u)
  1012. {
  1013. return TEGRA_UART_TYPE;
  1014. }
  1015. static const struct uart_ops tegra_uart_ops = {
  1016. .tx_empty = tegra_uart_tx_empty,
  1017. .set_mctrl = tegra_uart_set_mctrl,
  1018. .get_mctrl = tegra_uart_get_mctrl,
  1019. .stop_tx = tegra_uart_stop_tx,
  1020. .start_tx = tegra_uart_start_tx,
  1021. .stop_rx = tegra_uart_stop_rx,
  1022. .flush_buffer = tegra_uart_flush_buffer,
  1023. .enable_ms = tegra_uart_enable_ms,
  1024. .break_ctl = tegra_uart_break_ctl,
  1025. .startup = tegra_uart_startup,
  1026. .shutdown = tegra_uart_shutdown,
  1027. .set_termios = tegra_uart_set_termios,
  1028. .type = tegra_uart_type,
  1029. .request_port = tegra_uart_request_port,
  1030. .release_port = tegra_uart_release_port,
  1031. };
  1032. static struct uart_driver tegra_uart_driver = {
  1033. .owner = THIS_MODULE,
  1034. .driver_name = "tegra_hsuart",
  1035. .dev_name = "ttyTHS",
  1036. .cons = NULL,
  1037. .nr = TEGRA_UART_MAXIMUM,
  1038. };
  1039. static int tegra_uart_parse_dt(struct platform_device *pdev,
  1040. struct tegra_uart_port *tup)
  1041. {
  1042. struct device_node *np = pdev->dev.of_node;
  1043. int port;
  1044. port = of_alias_get_id(np, "serial");
  1045. if (port < 0) {
  1046. dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
  1047. return port;
  1048. }
  1049. tup->uport.line = port;
  1050. tup->enable_modem_interrupt = of_property_read_bool(np,
  1051. "nvidia,enable-modem-interrupt");
  1052. return 0;
  1053. }
  1054. static struct tegra_uart_chip_data tegra20_uart_chip_data = {
  1055. .tx_fifo_full_status = false,
  1056. .allow_txfifo_reset_fifo_mode = true,
  1057. .support_clk_src_div = false,
  1058. };
  1059. static struct tegra_uart_chip_data tegra30_uart_chip_data = {
  1060. .tx_fifo_full_status = true,
  1061. .allow_txfifo_reset_fifo_mode = false,
  1062. .support_clk_src_div = true,
  1063. };
  1064. static const struct of_device_id tegra_uart_of_match[] = {
  1065. {
  1066. .compatible = "nvidia,tegra30-hsuart",
  1067. .data = &tegra30_uart_chip_data,
  1068. }, {
  1069. .compatible = "nvidia,tegra20-hsuart",
  1070. .data = &tegra20_uart_chip_data,
  1071. }, {
  1072. },
  1073. };
  1074. MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
  1075. static int tegra_uart_probe(struct platform_device *pdev)
  1076. {
  1077. struct tegra_uart_port *tup;
  1078. struct uart_port *u;
  1079. struct resource *resource;
  1080. int ret;
  1081. const struct tegra_uart_chip_data *cdata;
  1082. const struct of_device_id *match;
  1083. match = of_match_device(tegra_uart_of_match, &pdev->dev);
  1084. if (!match) {
  1085. dev_err(&pdev->dev, "Error: No device match found\n");
  1086. return -ENODEV;
  1087. }
  1088. cdata = match->data;
  1089. tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
  1090. if (!tup) {
  1091. dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
  1092. return -ENOMEM;
  1093. }
  1094. ret = tegra_uart_parse_dt(pdev, tup);
  1095. if (ret < 0)
  1096. return ret;
  1097. u = &tup->uport;
  1098. u->dev = &pdev->dev;
  1099. u->ops = &tegra_uart_ops;
  1100. u->type = PORT_TEGRA;
  1101. u->fifosize = 32;
  1102. tup->cdata = cdata;
  1103. platform_set_drvdata(pdev, tup);
  1104. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1105. if (!resource) {
  1106. dev_err(&pdev->dev, "No IO memory resource\n");
  1107. return -ENODEV;
  1108. }
  1109. u->mapbase = resource->start;
  1110. u->membase = devm_ioremap_resource(&pdev->dev, resource);
  1111. if (IS_ERR(u->membase))
  1112. return PTR_ERR(u->membase);
  1113. tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
  1114. if (IS_ERR(tup->uart_clk)) {
  1115. dev_err(&pdev->dev, "Couldn't get the clock\n");
  1116. return PTR_ERR(tup->uart_clk);
  1117. }
  1118. tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
  1119. if (IS_ERR(tup->rst)) {
  1120. dev_err(&pdev->dev, "Couldn't get the reset\n");
  1121. return PTR_ERR(tup->rst);
  1122. }
  1123. u->iotype = UPIO_MEM32;
  1124. ret = platform_get_irq(pdev, 0);
  1125. if (ret < 0) {
  1126. dev_err(&pdev->dev, "Couldn't get IRQ\n");
  1127. return ret;
  1128. }
  1129. u->irq = ret;
  1130. u->regshift = 2;
  1131. ret = uart_add_one_port(&tegra_uart_driver, u);
  1132. if (ret < 0) {
  1133. dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
  1134. return ret;
  1135. }
  1136. return ret;
  1137. }
  1138. static int tegra_uart_remove(struct platform_device *pdev)
  1139. {
  1140. struct tegra_uart_port *tup = platform_get_drvdata(pdev);
  1141. struct uart_port *u = &tup->uport;
  1142. uart_remove_one_port(&tegra_uart_driver, u);
  1143. return 0;
  1144. }
  1145. #ifdef CONFIG_PM_SLEEP
  1146. static int tegra_uart_suspend(struct device *dev)
  1147. {
  1148. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1149. struct uart_port *u = &tup->uport;
  1150. return uart_suspend_port(&tegra_uart_driver, u);
  1151. }
  1152. static int tegra_uart_resume(struct device *dev)
  1153. {
  1154. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1155. struct uart_port *u = &tup->uport;
  1156. return uart_resume_port(&tegra_uart_driver, u);
  1157. }
  1158. #endif
  1159. static const struct dev_pm_ops tegra_uart_pm_ops = {
  1160. SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
  1161. };
  1162. static struct platform_driver tegra_uart_platform_driver = {
  1163. .probe = tegra_uart_probe,
  1164. .remove = tegra_uart_remove,
  1165. .driver = {
  1166. .name = "serial-tegra",
  1167. .of_match_table = tegra_uart_of_match,
  1168. .pm = &tegra_uart_pm_ops,
  1169. },
  1170. };
  1171. static int __init tegra_uart_init(void)
  1172. {
  1173. int ret;
  1174. ret = uart_register_driver(&tegra_uart_driver);
  1175. if (ret < 0) {
  1176. pr_err("Could not register %s driver\n",
  1177. tegra_uart_driver.driver_name);
  1178. return ret;
  1179. }
  1180. ret = platform_driver_register(&tegra_uart_platform_driver);
  1181. if (ret < 0) {
  1182. pr_err("Uart platform driver register failed, e = %d\n", ret);
  1183. uart_unregister_driver(&tegra_uart_driver);
  1184. return ret;
  1185. }
  1186. return 0;
  1187. }
  1188. static void __exit tegra_uart_exit(void)
  1189. {
  1190. pr_info("Unloading tegra uart driver\n");
  1191. platform_driver_unregister(&tegra_uart_platform_driver);
  1192. uart_unregister_driver(&tegra_uart_driver);
  1193. }
  1194. module_init(tegra_uart_init);
  1195. module_exit(tegra_uart_exit);
  1196. MODULE_ALIAS("platform:serial-tegra");
  1197. MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
  1198. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1199. MODULE_LICENSE("GPL v2");