omap_ssi_port.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431
  1. /* OMAP SSI port driver.
  2. *
  3. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  4. * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
  5. *
  6. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * version 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20. * 02110-1301 USA
  21. */
  22. #include <linux/mod_devicetable.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/delay.h>
  27. #include <linux/gpio/consumer.h>
  28. #include <linux/pinctrl/consumer.h>
  29. #include <linux/debugfs.h>
  30. #include "omap_ssi_regs.h"
  31. #include "omap_ssi.h"
  32. static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
  33. {
  34. return 0;
  35. }
  36. static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
  37. {
  38. return 0;
  39. }
  40. static inline unsigned int ssi_wakein(struct hsi_port *port)
  41. {
  42. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  43. return gpiod_get_value(omap_port->wake_gpio);
  44. }
  45. #ifdef CONFIG_DEBUG_FS
  46. static void ssi_debug_remove_port(struct hsi_port *port)
  47. {
  48. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  49. debugfs_remove_recursive(omap_port->dir);
  50. }
  51. static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
  52. {
  53. struct hsi_port *port = m->private;
  54. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  55. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  56. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  57. void __iomem *base = omap_ssi->sys;
  58. unsigned int ch;
  59. pm_runtime_get_sync(omap_port->pdev);
  60. if (omap_port->wake_irq > 0)
  61. seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
  62. seq_printf(m, "WAKE\t\t: 0x%08x\n",
  63. readl(base + SSI_WAKE_REG(port->num)));
  64. seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
  65. readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
  66. seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
  67. readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
  68. /* SST */
  69. base = omap_port->sst_base;
  70. seq_puts(m, "\nSST\n===\n");
  71. seq_printf(m, "ID SST\t\t: 0x%08x\n",
  72. readl(base + SSI_SST_ID_REG));
  73. seq_printf(m, "MODE\t\t: 0x%08x\n",
  74. readl(base + SSI_SST_MODE_REG));
  75. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  76. readl(base + SSI_SST_FRAMESIZE_REG));
  77. seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
  78. readl(base + SSI_SST_DIVISOR_REG));
  79. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  80. readl(base + SSI_SST_CHANNELS_REG));
  81. seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
  82. readl(base + SSI_SST_ARBMODE_REG));
  83. seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
  84. readl(base + SSI_SST_TXSTATE_REG));
  85. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  86. readl(base + SSI_SST_BUFSTATE_REG));
  87. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  88. readl(base + SSI_SST_BREAK_REG));
  89. for (ch = 0; ch < omap_port->channels; ch++) {
  90. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  91. readl(base + SSI_SST_BUFFER_CH_REG(ch)));
  92. }
  93. /* SSR */
  94. base = omap_port->ssr_base;
  95. seq_puts(m, "\nSSR\n===\n");
  96. seq_printf(m, "ID SSR\t\t: 0x%08x\n",
  97. readl(base + SSI_SSR_ID_REG));
  98. seq_printf(m, "MODE\t\t: 0x%08x\n",
  99. readl(base + SSI_SSR_MODE_REG));
  100. seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
  101. readl(base + SSI_SSR_FRAMESIZE_REG));
  102. seq_printf(m, "CHANNELS\t: 0x%08x\n",
  103. readl(base + SSI_SSR_CHANNELS_REG));
  104. seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
  105. readl(base + SSI_SSR_TIMEOUT_REG));
  106. seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
  107. readl(base + SSI_SSR_RXSTATE_REG));
  108. seq_printf(m, "BUFSTATE\t: 0x%08x\n",
  109. readl(base + SSI_SSR_BUFSTATE_REG));
  110. seq_printf(m, "BREAK\t\t: 0x%08x\n",
  111. readl(base + SSI_SSR_BREAK_REG));
  112. seq_printf(m, "ERROR\t\t: 0x%08x\n",
  113. readl(base + SSI_SSR_ERROR_REG));
  114. seq_printf(m, "ERRORACK\t: 0x%08x\n",
  115. readl(base + SSI_SSR_ERRORACK_REG));
  116. for (ch = 0; ch < omap_port->channels; ch++) {
  117. seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
  118. readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
  119. }
  120. pm_runtime_put_autosuspend(omap_port->pdev);
  121. return 0;
  122. }
  123. static int ssi_port_regs_open(struct inode *inode, struct file *file)
  124. {
  125. return single_open(file, ssi_debug_port_show, inode->i_private);
  126. }
  127. static const struct file_operations ssi_port_regs_fops = {
  128. .open = ssi_port_regs_open,
  129. .read = seq_read,
  130. .llseek = seq_lseek,
  131. .release = single_release,
  132. };
  133. static int ssi_div_get(void *data, u64 *val)
  134. {
  135. struct hsi_port *port = data;
  136. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  137. pm_runtime_get_sync(omap_port->pdev);
  138. *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
  139. pm_runtime_put_autosuspend(omap_port->pdev);
  140. return 0;
  141. }
  142. static int ssi_div_set(void *data, u64 val)
  143. {
  144. struct hsi_port *port = data;
  145. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  146. if (val > 127)
  147. return -EINVAL;
  148. pm_runtime_get_sync(omap_port->pdev);
  149. writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
  150. omap_port->sst.divisor = val;
  151. pm_runtime_put_autosuspend(omap_port->pdev);
  152. return 0;
  153. }
  154. DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
  155. static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
  156. struct dentry *dir)
  157. {
  158. struct hsi_port *port = to_hsi_port(omap_port->dev);
  159. dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
  160. if (!dir)
  161. return -ENOMEM;
  162. omap_port->dir = dir;
  163. debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
  164. dir = debugfs_create_dir("sst", dir);
  165. if (!dir)
  166. return -ENOMEM;
  167. debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
  168. &ssi_sst_div_fops);
  169. return 0;
  170. }
  171. #endif
  172. static void ssi_process_errqueue(struct work_struct *work)
  173. {
  174. struct omap_ssi_port *omap_port;
  175. struct list_head *head, *tmp;
  176. struct hsi_msg *msg;
  177. omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
  178. list_for_each_safe(head, tmp, &omap_port->errqueue) {
  179. msg = list_entry(head, struct hsi_msg, link);
  180. msg->complete(msg);
  181. list_del(head);
  182. }
  183. }
  184. static int ssi_claim_lch(struct hsi_msg *msg)
  185. {
  186. struct hsi_port *port = hsi_get_port(msg->cl);
  187. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  188. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  189. int lch;
  190. for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
  191. if (!omap_ssi->gdd_trn[lch].msg) {
  192. omap_ssi->gdd_trn[lch].msg = msg;
  193. omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
  194. return lch;
  195. }
  196. return -EBUSY;
  197. }
  198. static int ssi_start_dma(struct hsi_msg *msg, int lch)
  199. {
  200. struct hsi_port *port = hsi_get_port(msg->cl);
  201. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  202. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  203. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  204. void __iomem *gdd = omap_ssi->gdd;
  205. int err;
  206. u16 csdp;
  207. u16 ccr;
  208. u32 s_addr;
  209. u32 d_addr;
  210. u32 tmp;
  211. /* Hold clocks during the transfer */
  212. pm_runtime_get(omap_port->pdev);
  213. if (!pm_runtime_active(omap_port->pdev)) {
  214. dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
  215. pm_runtime_put_autosuspend(omap_port->pdev);
  216. return -EREMOTEIO;
  217. }
  218. if (msg->ttype == HSI_MSG_READ) {
  219. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  220. DMA_FROM_DEVICE);
  221. if (err < 0) {
  222. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  223. pm_runtime_put_autosuspend(omap_port->pdev);
  224. return err;
  225. }
  226. csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
  227. SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
  228. SSI_DATA_TYPE_S32;
  229. ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
  230. ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
  231. SSI_CCR_ENABLE;
  232. s_addr = omap_port->ssr_dma +
  233. SSI_SSR_BUFFER_CH_REG(msg->channel);
  234. d_addr = sg_dma_address(msg->sgt.sgl);
  235. } else {
  236. err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
  237. DMA_TO_DEVICE);
  238. if (err < 0) {
  239. dev_dbg(&ssi->device, "DMA map SG failed !\n");
  240. pm_runtime_put_autosuspend(omap_port->pdev);
  241. return err;
  242. }
  243. csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
  244. SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
  245. SSI_DATA_TYPE_S32;
  246. ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
  247. ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
  248. SSI_CCR_ENABLE;
  249. s_addr = sg_dma_address(msg->sgt.sgl);
  250. d_addr = omap_port->sst_dma +
  251. SSI_SST_BUFFER_CH_REG(msg->channel);
  252. }
  253. dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
  254. lch, csdp, ccr, s_addr, d_addr);
  255. writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
  256. writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
  257. writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
  258. writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
  259. writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
  260. gdd + SSI_GDD_CEN_REG(lch));
  261. spin_lock_bh(&omap_ssi->lock);
  262. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  263. tmp |= SSI_GDD_LCH(lch);
  264. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  265. spin_unlock_bh(&omap_ssi->lock);
  266. writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
  267. msg->status = HSI_STATUS_PROCEEDING;
  268. return 0;
  269. }
  270. static int ssi_start_pio(struct hsi_msg *msg)
  271. {
  272. struct hsi_port *port = hsi_get_port(msg->cl);
  273. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  274. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  275. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  276. u32 val;
  277. pm_runtime_get(omap_port->pdev);
  278. if (!pm_runtime_active(omap_port->pdev)) {
  279. dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
  280. pm_runtime_put_autosuspend(omap_port->pdev);
  281. return -EREMOTEIO;
  282. }
  283. if (msg->ttype == HSI_MSG_WRITE) {
  284. val = SSI_DATAACCEPT(msg->channel);
  285. /* Hold clocks for pio writes */
  286. pm_runtime_get(omap_port->pdev);
  287. } else {
  288. val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
  289. }
  290. dev_dbg(&port->device, "Single %s transfer\n",
  291. msg->ttype ? "write" : "read");
  292. val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  293. writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  294. pm_runtime_put_autosuspend(omap_port->pdev);
  295. msg->actual_len = 0;
  296. msg->status = HSI_STATUS_PROCEEDING;
  297. return 0;
  298. }
  299. static int ssi_start_transfer(struct list_head *queue)
  300. {
  301. struct hsi_msg *msg;
  302. int lch = -1;
  303. if (list_empty(queue))
  304. return 0;
  305. msg = list_first_entry(queue, struct hsi_msg, link);
  306. if (msg->status != HSI_STATUS_QUEUED)
  307. return 0;
  308. if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
  309. lch = ssi_claim_lch(msg);
  310. if (lch >= 0)
  311. return ssi_start_dma(msg, lch);
  312. else
  313. return ssi_start_pio(msg);
  314. }
  315. static int ssi_async_break(struct hsi_msg *msg)
  316. {
  317. struct hsi_port *port = hsi_get_port(msg->cl);
  318. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  319. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  320. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  321. int err = 0;
  322. u32 tmp;
  323. pm_runtime_get_sync(omap_port->pdev);
  324. if (msg->ttype == HSI_MSG_WRITE) {
  325. if (omap_port->sst.mode != SSI_MODE_FRAME) {
  326. err = -EINVAL;
  327. goto out;
  328. }
  329. writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
  330. msg->status = HSI_STATUS_COMPLETED;
  331. msg->complete(msg);
  332. } else {
  333. if (omap_port->ssr.mode != SSI_MODE_FRAME) {
  334. err = -EINVAL;
  335. goto out;
  336. }
  337. spin_lock_bh(&omap_port->lock);
  338. tmp = readl(omap_ssi->sys +
  339. SSI_MPU_ENABLE_REG(port->num, 0));
  340. writel(tmp | SSI_BREAKDETECTED,
  341. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  342. msg->status = HSI_STATUS_PROCEEDING;
  343. list_add_tail(&msg->link, &omap_port->brkqueue);
  344. spin_unlock_bh(&omap_port->lock);
  345. }
  346. out:
  347. pm_runtime_mark_last_busy(omap_port->pdev);
  348. pm_runtime_put_autosuspend(omap_port->pdev);
  349. return err;
  350. }
  351. static int ssi_async(struct hsi_msg *msg)
  352. {
  353. struct hsi_port *port = hsi_get_port(msg->cl);
  354. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  355. struct list_head *queue;
  356. int err = 0;
  357. BUG_ON(!msg);
  358. if (msg->sgt.nents > 1)
  359. return -ENOSYS; /* TODO: Add sg support */
  360. if (msg->break_frame)
  361. return ssi_async_break(msg);
  362. if (msg->ttype) {
  363. BUG_ON(msg->channel >= omap_port->sst.channels);
  364. queue = &omap_port->txqueue[msg->channel];
  365. } else {
  366. BUG_ON(msg->channel >= omap_port->ssr.channels);
  367. queue = &omap_port->rxqueue[msg->channel];
  368. }
  369. msg->status = HSI_STATUS_QUEUED;
  370. pm_runtime_get_sync(omap_port->pdev);
  371. spin_lock_bh(&omap_port->lock);
  372. list_add_tail(&msg->link, queue);
  373. err = ssi_start_transfer(queue);
  374. if (err < 0) {
  375. list_del(&msg->link);
  376. msg->status = HSI_STATUS_ERROR;
  377. }
  378. spin_unlock_bh(&omap_port->lock);
  379. pm_runtime_mark_last_busy(omap_port->pdev);
  380. pm_runtime_put_autosuspend(omap_port->pdev);
  381. dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
  382. msg->status, msg->ttype, msg->channel);
  383. return err;
  384. }
  385. static u32 ssi_calculate_div(struct hsi_controller *ssi)
  386. {
  387. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  388. u32 tx_fckrate = (u32) omap_ssi->fck_rate;
  389. /* / 2 : SSI TX clock is always half of the SSI functional clock */
  390. tx_fckrate >>= 1;
  391. /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
  392. tx_fckrate--;
  393. dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
  394. tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
  395. omap_ssi->max_speed);
  396. return tx_fckrate / omap_ssi->max_speed;
  397. }
  398. static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
  399. {
  400. struct list_head *node, *tmp;
  401. struct hsi_msg *msg;
  402. list_for_each_safe(node, tmp, queue) {
  403. msg = list_entry(node, struct hsi_msg, link);
  404. if ((cl) && (cl != msg->cl))
  405. continue;
  406. list_del(node);
  407. pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
  408. msg->channel, msg, msg->sgt.sgl->length,
  409. msg->ttype, msg->context);
  410. if (msg->destructor)
  411. msg->destructor(msg);
  412. else
  413. hsi_free_msg(msg);
  414. }
  415. }
  416. static int ssi_setup(struct hsi_client *cl)
  417. {
  418. struct hsi_port *port = to_hsi_port(cl->device.parent);
  419. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  420. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  421. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  422. void __iomem *sst = omap_port->sst_base;
  423. void __iomem *ssr = omap_port->ssr_base;
  424. u32 div;
  425. u32 val;
  426. int err = 0;
  427. pm_runtime_get_sync(omap_port->pdev);
  428. spin_lock_bh(&omap_port->lock);
  429. if (cl->tx_cfg.speed)
  430. omap_ssi->max_speed = cl->tx_cfg.speed;
  431. div = ssi_calculate_div(ssi);
  432. if (div > SSI_MAX_DIVISOR) {
  433. dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
  434. cl->tx_cfg.speed, div);
  435. err = -EINVAL;
  436. goto out;
  437. }
  438. /* Set TX/RX module to sleep to stop TX/RX during cfg update */
  439. writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
  440. writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
  441. /* Flush posted write */
  442. val = readl(ssr + SSI_SSR_MODE_REG);
  443. /* TX */
  444. writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
  445. writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
  446. writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
  447. writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
  448. writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
  449. /* RX */
  450. writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
  451. writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
  452. writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
  453. /* Cleanup the break queue if we leave FRAME mode */
  454. if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
  455. (cl->rx_cfg.mode != SSI_MODE_FRAME))
  456. ssi_flush_queue(&omap_port->brkqueue, cl);
  457. writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
  458. omap_port->channels = max(cl->rx_cfg.num_hw_channels,
  459. cl->tx_cfg.num_hw_channels);
  460. /* Shadow registering for OFF mode */
  461. /* SST */
  462. omap_port->sst.divisor = div;
  463. omap_port->sst.frame_size = 31;
  464. omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
  465. omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
  466. omap_port->sst.mode = cl->tx_cfg.mode;
  467. /* SSR */
  468. omap_port->ssr.frame_size = 31;
  469. omap_port->ssr.timeout = 0;
  470. omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
  471. omap_port->ssr.mode = cl->rx_cfg.mode;
  472. out:
  473. spin_unlock_bh(&omap_port->lock);
  474. pm_runtime_mark_last_busy(omap_port->pdev);
  475. pm_runtime_put_autosuspend(omap_port->pdev);
  476. return err;
  477. }
  478. static int ssi_flush(struct hsi_client *cl)
  479. {
  480. struct hsi_port *port = hsi_get_port(cl);
  481. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  482. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  483. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  484. struct hsi_msg *msg;
  485. void __iomem *sst = omap_port->sst_base;
  486. void __iomem *ssr = omap_port->ssr_base;
  487. unsigned int i;
  488. u32 err;
  489. pm_runtime_get_sync(omap_port->pdev);
  490. spin_lock_bh(&omap_port->lock);
  491. /* stop all ssi communication */
  492. pinctrl_pm_select_idle_state(omap_port->pdev);
  493. udelay(1); /* wait for racing frames */
  494. /* Stop all DMA transfers */
  495. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  496. msg = omap_ssi->gdd_trn[i].msg;
  497. if (!msg || (port != hsi_get_port(msg->cl)))
  498. continue;
  499. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  500. if (msg->ttype == HSI_MSG_READ)
  501. pm_runtime_put_autosuspend(omap_port->pdev);
  502. omap_ssi->gdd_trn[i].msg = NULL;
  503. }
  504. /* Flush all SST buffers */
  505. writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
  506. writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
  507. /* Flush all SSR buffers */
  508. writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
  509. writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
  510. /* Flush all errors */
  511. err = readl(ssr + SSI_SSR_ERROR_REG);
  512. writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
  513. /* Flush break */
  514. writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
  515. /* Clear interrupts */
  516. writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  517. writel_relaxed(0xffffff00,
  518. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  519. writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  520. writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  521. /* Dequeue all pending requests */
  522. for (i = 0; i < omap_port->channels; i++) {
  523. /* Release write clocks */
  524. if (!list_empty(&omap_port->txqueue[i]))
  525. pm_runtime_put_autosuspend(omap_port->pdev);
  526. ssi_flush_queue(&omap_port->txqueue[i], NULL);
  527. ssi_flush_queue(&omap_port->rxqueue[i], NULL);
  528. }
  529. ssi_flush_queue(&omap_port->brkqueue, NULL);
  530. /* Resume SSI communication */
  531. pinctrl_pm_select_default_state(omap_port->pdev);
  532. spin_unlock_bh(&omap_port->lock);
  533. pm_runtime_mark_last_busy(omap_port->pdev);
  534. pm_runtime_put_autosuspend(omap_port->pdev);
  535. return 0;
  536. }
  537. static void start_tx_work(struct work_struct *work)
  538. {
  539. struct omap_ssi_port *omap_port =
  540. container_of(work, struct omap_ssi_port, work);
  541. struct hsi_port *port = to_hsi_port(omap_port->dev);
  542. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  543. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  544. pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
  545. writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  546. }
  547. static int ssi_start_tx(struct hsi_client *cl)
  548. {
  549. struct hsi_port *port = hsi_get_port(cl);
  550. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  551. dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
  552. spin_lock_bh(&omap_port->wk_lock);
  553. if (omap_port->wk_refcount++) {
  554. spin_unlock_bh(&omap_port->wk_lock);
  555. return 0;
  556. }
  557. spin_unlock_bh(&omap_port->wk_lock);
  558. schedule_work(&omap_port->work);
  559. return 0;
  560. }
  561. static int ssi_stop_tx(struct hsi_client *cl)
  562. {
  563. struct hsi_port *port = hsi_get_port(cl);
  564. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  565. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  566. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  567. dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
  568. spin_lock_bh(&omap_port->wk_lock);
  569. BUG_ON(!omap_port->wk_refcount);
  570. if (--omap_port->wk_refcount) {
  571. spin_unlock_bh(&omap_port->wk_lock);
  572. return 0;
  573. }
  574. writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  575. spin_unlock_bh(&omap_port->wk_lock);
  576. pm_runtime_mark_last_busy(omap_port->pdev);
  577. pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
  578. return 0;
  579. }
  580. static void ssi_transfer(struct omap_ssi_port *omap_port,
  581. struct list_head *queue)
  582. {
  583. struct hsi_msg *msg;
  584. int err = -1;
  585. pm_runtime_get(omap_port->pdev);
  586. spin_lock_bh(&omap_port->lock);
  587. while (err < 0) {
  588. err = ssi_start_transfer(queue);
  589. if (err < 0) {
  590. msg = list_first_entry(queue, struct hsi_msg, link);
  591. msg->status = HSI_STATUS_ERROR;
  592. msg->actual_len = 0;
  593. list_del(&msg->link);
  594. spin_unlock_bh(&omap_port->lock);
  595. msg->complete(msg);
  596. spin_lock_bh(&omap_port->lock);
  597. }
  598. }
  599. spin_unlock_bh(&omap_port->lock);
  600. pm_runtime_mark_last_busy(omap_port->pdev);
  601. pm_runtime_put_autosuspend(omap_port->pdev);
  602. }
  603. static void ssi_cleanup_queues(struct hsi_client *cl)
  604. {
  605. struct hsi_port *port = hsi_get_port(cl);
  606. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  607. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  608. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  609. struct hsi_msg *msg;
  610. unsigned int i;
  611. u32 rxbufstate = 0;
  612. u32 txbufstate = 0;
  613. u32 status = SSI_ERROROCCURED;
  614. u32 tmp;
  615. ssi_flush_queue(&omap_port->brkqueue, cl);
  616. if (list_empty(&omap_port->brkqueue))
  617. status |= SSI_BREAKDETECTED;
  618. for (i = 0; i < omap_port->channels; i++) {
  619. if (list_empty(&omap_port->txqueue[i]))
  620. continue;
  621. msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
  622. link);
  623. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  624. txbufstate |= (1 << i);
  625. status |= SSI_DATAACCEPT(i);
  626. /* Release the clocks writes, also GDD ones */
  627. pm_runtime_mark_last_busy(omap_port->pdev);
  628. pm_runtime_put_autosuspend(omap_port->pdev);
  629. }
  630. ssi_flush_queue(&omap_port->txqueue[i], cl);
  631. }
  632. for (i = 0; i < omap_port->channels; i++) {
  633. if (list_empty(&omap_port->rxqueue[i]))
  634. continue;
  635. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  636. link);
  637. if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
  638. rxbufstate |= (1 << i);
  639. status |= SSI_DATAAVAILABLE(i);
  640. }
  641. ssi_flush_queue(&omap_port->rxqueue[i], cl);
  642. /* Check if we keep the error detection interrupt armed */
  643. if (!list_empty(&omap_port->rxqueue[i]))
  644. status &= ~SSI_ERROROCCURED;
  645. }
  646. /* Cleanup write buffers */
  647. tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  648. tmp &= ~txbufstate;
  649. writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
  650. /* Cleanup read buffers */
  651. tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  652. tmp &= ~rxbufstate;
  653. writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
  654. /* Disarm and ack pending interrupts */
  655. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  656. tmp &= ~status;
  657. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  658. writel_relaxed(status, omap_ssi->sys +
  659. SSI_MPU_STATUS_REG(port->num, 0));
  660. }
  661. static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
  662. {
  663. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  664. struct hsi_port *port = hsi_get_port(cl);
  665. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  666. struct hsi_msg *msg;
  667. unsigned int i;
  668. u32 val = 0;
  669. u32 tmp;
  670. for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
  671. msg = omap_ssi->gdd_trn[i].msg;
  672. if ((!msg) || (msg->cl != cl))
  673. continue;
  674. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  675. val |= (1 << i);
  676. /*
  677. * Clock references for write will be handled in
  678. * ssi_cleanup_queues
  679. */
  680. if (msg->ttype == HSI_MSG_READ) {
  681. pm_runtime_mark_last_busy(omap_port->pdev);
  682. pm_runtime_put_autosuspend(omap_port->pdev);
  683. }
  684. omap_ssi->gdd_trn[i].msg = NULL;
  685. }
  686. tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  687. tmp &= ~val;
  688. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  689. writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
  690. }
  691. static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
  692. {
  693. writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
  694. writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
  695. /* OCP barrier */
  696. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  697. return 0;
  698. }
  699. static int ssi_release(struct hsi_client *cl)
  700. {
  701. struct hsi_port *port = hsi_get_port(cl);
  702. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  703. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  704. pm_runtime_get_sync(omap_port->pdev);
  705. spin_lock_bh(&omap_port->lock);
  706. /* Stop all the pending DMA requests for that client */
  707. ssi_cleanup_gdd(ssi, cl);
  708. /* Now cleanup all the queues */
  709. ssi_cleanup_queues(cl);
  710. /* If it is the last client of the port, do extra checks and cleanup */
  711. if (port->claimed <= 1) {
  712. /*
  713. * Drop the clock reference for the incoming wake line
  714. * if it is still kept high by the other side.
  715. */
  716. if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
  717. pm_runtime_put_sync(omap_port->pdev);
  718. pm_runtime_get(omap_port->pdev);
  719. /* Stop any SSI TX/RX without a client */
  720. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  721. omap_port->sst.mode = SSI_MODE_SLEEP;
  722. omap_port->ssr.mode = SSI_MODE_SLEEP;
  723. pm_runtime_put(omap_port->pdev);
  724. WARN_ON(omap_port->wk_refcount != 0);
  725. }
  726. spin_unlock_bh(&omap_port->lock);
  727. pm_runtime_put_sync(omap_port->pdev);
  728. return 0;
  729. }
  730. static void ssi_error(struct hsi_port *port)
  731. {
  732. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  733. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  734. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  735. struct hsi_msg *msg;
  736. unsigned int i;
  737. u32 err;
  738. u32 val;
  739. u32 tmp;
  740. /* ACK error */
  741. err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
  742. dev_err(&port->device, "SSI error: 0x%02x\n", err);
  743. if (!err) {
  744. dev_dbg(&port->device, "spurious SSI error ignored!\n");
  745. return;
  746. }
  747. spin_lock(&omap_ssi->lock);
  748. /* Cancel all GDD read transfers */
  749. for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
  750. msg = omap_ssi->gdd_trn[i].msg;
  751. if ((msg) && (msg->ttype == HSI_MSG_READ)) {
  752. writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
  753. val |= (1 << i);
  754. omap_ssi->gdd_trn[i].msg = NULL;
  755. }
  756. }
  757. tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  758. tmp &= ~val;
  759. writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
  760. spin_unlock(&omap_ssi->lock);
  761. /* Cancel all PIO read transfers */
  762. spin_lock(&omap_port->lock);
  763. tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  764. tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
  765. writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  766. /* ACK error */
  767. writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
  768. writel_relaxed(SSI_ERROROCCURED,
  769. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  770. /* Signal the error all current pending read requests */
  771. for (i = 0; i < omap_port->channels; i++) {
  772. if (list_empty(&omap_port->rxqueue[i]))
  773. continue;
  774. msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
  775. link);
  776. list_del(&msg->link);
  777. msg->status = HSI_STATUS_ERROR;
  778. spin_unlock(&omap_port->lock);
  779. msg->complete(msg);
  780. /* Now restart queued reads if any */
  781. ssi_transfer(omap_port, &omap_port->rxqueue[i]);
  782. spin_lock(&omap_port->lock);
  783. }
  784. spin_unlock(&omap_port->lock);
  785. }
  786. static void ssi_break_complete(struct hsi_port *port)
  787. {
  788. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  789. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  790. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  791. struct hsi_msg *msg;
  792. struct hsi_msg *tmp;
  793. u32 val;
  794. dev_dbg(&port->device, "HWBREAK received\n");
  795. spin_lock(&omap_port->lock);
  796. val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  797. val &= ~SSI_BREAKDETECTED;
  798. writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  799. writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
  800. writel(SSI_BREAKDETECTED,
  801. omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  802. spin_unlock(&omap_port->lock);
  803. list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
  804. msg->status = HSI_STATUS_COMPLETED;
  805. spin_lock(&omap_port->lock);
  806. list_del(&msg->link);
  807. spin_unlock(&omap_port->lock);
  808. msg->complete(msg);
  809. }
  810. }
  811. static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
  812. {
  813. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  814. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  815. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  816. struct hsi_msg *msg;
  817. u32 *buf;
  818. u32 reg;
  819. u32 val;
  820. spin_lock_bh(&omap_port->lock);
  821. msg = list_first_entry(queue, struct hsi_msg, link);
  822. if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
  823. msg->actual_len = 0;
  824. msg->status = HSI_STATUS_PENDING;
  825. }
  826. if (msg->ttype == HSI_MSG_WRITE)
  827. val = SSI_DATAACCEPT(msg->channel);
  828. else
  829. val = SSI_DATAAVAILABLE(msg->channel);
  830. if (msg->status == HSI_STATUS_PROCEEDING) {
  831. buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
  832. if (msg->ttype == HSI_MSG_WRITE)
  833. writel(*buf, omap_port->sst_base +
  834. SSI_SST_BUFFER_CH_REG(msg->channel));
  835. else
  836. *buf = readl(omap_port->ssr_base +
  837. SSI_SSR_BUFFER_CH_REG(msg->channel));
  838. dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
  839. msg->ttype, *buf);
  840. msg->actual_len += sizeof(*buf);
  841. if (msg->actual_len >= msg->sgt.sgl->length)
  842. msg->status = HSI_STATUS_COMPLETED;
  843. /*
  844. * Wait for the last written frame to be really sent before
  845. * we call the complete callback
  846. */
  847. if ((msg->status == HSI_STATUS_PROCEEDING) ||
  848. ((msg->status == HSI_STATUS_COMPLETED) &&
  849. (msg->ttype == HSI_MSG_WRITE))) {
  850. writel(val, omap_ssi->sys +
  851. SSI_MPU_STATUS_REG(port->num, 0));
  852. spin_unlock_bh(&omap_port->lock);
  853. return;
  854. }
  855. }
  856. /* Transfer completed at this point */
  857. reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  858. if (msg->ttype == HSI_MSG_WRITE) {
  859. /* Release clocks for write transfer */
  860. pm_runtime_mark_last_busy(omap_port->pdev);
  861. pm_runtime_put_autosuspend(omap_port->pdev);
  862. }
  863. reg &= ~val;
  864. writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  865. writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
  866. list_del(&msg->link);
  867. spin_unlock_bh(&omap_port->lock);
  868. msg->complete(msg);
  869. ssi_transfer(omap_port, queue);
  870. }
  871. static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
  872. {
  873. struct hsi_port *port = (struct hsi_port *)ssi_port;
  874. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  875. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  876. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  877. void __iomem *sys = omap_ssi->sys;
  878. unsigned int ch;
  879. u32 status_reg;
  880. pm_runtime_get_sync(omap_port->pdev);
  881. do {
  882. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  883. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  884. for (ch = 0; ch < omap_port->channels; ch++) {
  885. if (status_reg & SSI_DATAACCEPT(ch))
  886. ssi_pio_complete(port, &omap_port->txqueue[ch]);
  887. if (status_reg & SSI_DATAAVAILABLE(ch))
  888. ssi_pio_complete(port, &omap_port->rxqueue[ch]);
  889. }
  890. if (status_reg & SSI_BREAKDETECTED)
  891. ssi_break_complete(port);
  892. if (status_reg & SSI_ERROROCCURED)
  893. ssi_error(port);
  894. status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
  895. status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
  896. /* TODO: sleep if we retry? */
  897. } while (status_reg);
  898. pm_runtime_mark_last_busy(omap_port->pdev);
  899. pm_runtime_put_autosuspend(omap_port->pdev);
  900. return IRQ_HANDLED;
  901. }
  902. static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
  903. {
  904. struct hsi_port *port = (struct hsi_port *)ssi_port;
  905. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  906. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  907. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  908. if (ssi_wakein(port)) {
  909. /**
  910. * We can have a quick High-Low-High transition in the line.
  911. * In such a case if we have long interrupt latencies,
  912. * we can miss the low event or get twice a high event.
  913. * This workaround will avoid breaking the clock reference
  914. * count when such a situation ocurrs.
  915. */
  916. if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
  917. pm_runtime_get_sync(omap_port->pdev);
  918. dev_dbg(&ssi->device, "Wake in high\n");
  919. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  920. writel(SSI_WAKE(0),
  921. omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
  922. }
  923. hsi_event(port, HSI_EVENT_START_RX);
  924. } else {
  925. dev_dbg(&ssi->device, "Wake in low\n");
  926. if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
  927. writel(SSI_WAKE(0),
  928. omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
  929. }
  930. hsi_event(port, HSI_EVENT_STOP_RX);
  931. if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
  932. pm_runtime_mark_last_busy(omap_port->pdev);
  933. pm_runtime_put_autosuspend(omap_port->pdev);
  934. }
  935. }
  936. return IRQ_HANDLED;
  937. }
  938. static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
  939. {
  940. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  941. int err;
  942. err = platform_get_irq(pd, 0);
  943. if (err < 0) {
  944. dev_err(&port->device, "Port IRQ resource missing\n");
  945. return err;
  946. }
  947. omap_port->irq = err;
  948. err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
  949. ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
  950. if (err < 0)
  951. dev_err(&port->device, "Request IRQ %d failed (%d)\n",
  952. omap_port->irq, err);
  953. return err;
  954. }
  955. static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
  956. {
  957. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  958. int cawake_irq;
  959. int err;
  960. if (!omap_port->wake_gpio) {
  961. omap_port->wake_irq = -1;
  962. return 0;
  963. }
  964. cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
  965. omap_port->wake_irq = cawake_irq;
  966. err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
  967. ssi_wake_thread,
  968. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  969. "SSI cawake", port);
  970. if (err < 0)
  971. dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
  972. cawake_irq, err);
  973. err = enable_irq_wake(cawake_irq);
  974. if (err < 0)
  975. dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
  976. cawake_irq, err);
  977. return err;
  978. }
  979. static void ssi_queues_init(struct omap_ssi_port *omap_port)
  980. {
  981. unsigned int ch;
  982. for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
  983. INIT_LIST_HEAD(&omap_port->txqueue[ch]);
  984. INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
  985. }
  986. INIT_LIST_HEAD(&omap_port->brkqueue);
  987. }
  988. static int ssi_port_get_iomem(struct platform_device *pd,
  989. const char *name, void __iomem **pbase, dma_addr_t *phy)
  990. {
  991. struct hsi_port *port = platform_get_drvdata(pd);
  992. struct resource *mem;
  993. struct resource *ioarea;
  994. void __iomem *base;
  995. mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
  996. if (!mem) {
  997. dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
  998. return -ENXIO;
  999. }
  1000. ioarea = devm_request_mem_region(&port->device, mem->start,
  1001. resource_size(mem), dev_name(&pd->dev));
  1002. if (!ioarea) {
  1003. dev_err(&pd->dev, "%s IO memory region request failed\n",
  1004. mem->name);
  1005. return -ENXIO;
  1006. }
  1007. base = devm_ioremap(&port->device, mem->start, resource_size(mem));
  1008. if (!base) {
  1009. dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
  1010. return -ENXIO;
  1011. }
  1012. *pbase = base;
  1013. if (phy)
  1014. *phy = mem->start;
  1015. return 0;
  1016. }
  1017. static int ssi_port_probe(struct platform_device *pd)
  1018. {
  1019. struct device_node *np = pd->dev.of_node;
  1020. struct hsi_port *port;
  1021. struct omap_ssi_port *omap_port;
  1022. struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
  1023. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1024. struct gpio_desc *cawake_gpio = NULL;
  1025. u32 port_id;
  1026. int err;
  1027. dev_dbg(&pd->dev, "init ssi port...\n");
  1028. if (!ssi->port || !omap_ssi->port) {
  1029. dev_err(&pd->dev, "ssi controller not initialized!\n");
  1030. err = -ENODEV;
  1031. goto error;
  1032. }
  1033. /* get id of first uninitialized port in controller */
  1034. for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
  1035. port_id++)
  1036. ;
  1037. if (port_id >= ssi->num_ports) {
  1038. dev_err(&pd->dev, "port id out of range!\n");
  1039. err = -ENODEV;
  1040. goto error;
  1041. }
  1042. port = ssi->port[port_id];
  1043. if (!np) {
  1044. dev_err(&pd->dev, "missing device tree data\n");
  1045. err = -EINVAL;
  1046. goto error;
  1047. }
  1048. cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
  1049. if (IS_ERR(cawake_gpio)) {
  1050. err = PTR_ERR(cawake_gpio);
  1051. dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
  1052. goto error;
  1053. }
  1054. omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
  1055. if (!omap_port) {
  1056. err = -ENOMEM;
  1057. goto error;
  1058. }
  1059. omap_port->wake_gpio = cawake_gpio;
  1060. omap_port->pdev = &pd->dev;
  1061. omap_port->port_id = port_id;
  1062. INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
  1063. INIT_WORK(&omap_port->work, start_tx_work);
  1064. /* initialize HSI port */
  1065. port->async = ssi_async;
  1066. port->setup = ssi_setup;
  1067. port->flush = ssi_flush;
  1068. port->start_tx = ssi_start_tx;
  1069. port->stop_tx = ssi_stop_tx;
  1070. port->release = ssi_release;
  1071. hsi_port_set_drvdata(port, omap_port);
  1072. omap_ssi->port[port_id] = omap_port;
  1073. platform_set_drvdata(pd, port);
  1074. err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
  1075. &omap_port->sst_dma);
  1076. if (err < 0)
  1077. goto error;
  1078. err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
  1079. &omap_port->ssr_dma);
  1080. if (err < 0)
  1081. goto error;
  1082. err = ssi_port_irq(port, pd);
  1083. if (err < 0)
  1084. goto error;
  1085. err = ssi_wake_irq(port, pd);
  1086. if (err < 0)
  1087. goto error;
  1088. ssi_queues_init(omap_port);
  1089. spin_lock_init(&omap_port->lock);
  1090. spin_lock_init(&omap_port->wk_lock);
  1091. omap_port->dev = &port->device;
  1092. pm_runtime_use_autosuspend(omap_port->pdev);
  1093. pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
  1094. pm_runtime_enable(omap_port->pdev);
  1095. #ifdef CONFIG_DEBUG_FS
  1096. err = ssi_debug_add_port(omap_port, omap_ssi->dir);
  1097. if (err < 0) {
  1098. pm_runtime_disable(omap_port->pdev);
  1099. goto error;
  1100. }
  1101. #endif
  1102. hsi_add_clients_from_dt(port, np);
  1103. dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
  1104. return 0;
  1105. error:
  1106. return err;
  1107. }
  1108. static int ssi_port_remove(struct platform_device *pd)
  1109. {
  1110. struct hsi_port *port = platform_get_drvdata(pd);
  1111. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1112. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1113. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1114. #ifdef CONFIG_DEBUG_FS
  1115. ssi_debug_remove_port(port);
  1116. #endif
  1117. cancel_delayed_work_sync(&omap_port->errqueue_work);
  1118. hsi_port_unregister_clients(port);
  1119. port->async = hsi_dummy_msg;
  1120. port->setup = hsi_dummy_cl;
  1121. port->flush = hsi_dummy_cl;
  1122. port->start_tx = hsi_dummy_cl;
  1123. port->stop_tx = hsi_dummy_cl;
  1124. port->release = hsi_dummy_cl;
  1125. omap_ssi->port[omap_port->port_id] = NULL;
  1126. platform_set_drvdata(pd, NULL);
  1127. pm_runtime_dont_use_autosuspend(&pd->dev);
  1128. pm_runtime_disable(&pd->dev);
  1129. return 0;
  1130. }
  1131. static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
  1132. {
  1133. writel_relaxed(omap_port->sst.divisor,
  1134. omap_port->sst_base + SSI_SST_DIVISOR_REG);
  1135. return 0;
  1136. }
  1137. void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
  1138. struct omap_ssi_port *omap_port)
  1139. {
  1140. /* update divisor */
  1141. u32 div = ssi_calculate_div(ssi);
  1142. omap_port->sst.divisor = div;
  1143. ssi_restore_divisor(omap_port);
  1144. }
  1145. #ifdef CONFIG_PM
  1146. static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
  1147. {
  1148. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1149. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1150. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1151. omap_port->sys_mpu_enable = readl(omap_ssi->sys +
  1152. SSI_MPU_ENABLE_REG(port->num, 0));
  1153. return 0;
  1154. }
  1155. static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
  1156. {
  1157. struct hsi_port *port = to_hsi_port(omap_port->dev);
  1158. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1159. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1160. void __iomem *base;
  1161. writel_relaxed(omap_port->sys_mpu_enable,
  1162. omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
  1163. /* SST context */
  1164. base = omap_port->sst_base;
  1165. writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
  1166. writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
  1167. writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
  1168. /* SSR context */
  1169. base = omap_port->ssr_base;
  1170. writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
  1171. writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
  1172. writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
  1173. return 0;
  1174. }
  1175. static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
  1176. {
  1177. u32 mode;
  1178. writel_relaxed(omap_port->sst.mode,
  1179. omap_port->sst_base + SSI_SST_MODE_REG);
  1180. writel_relaxed(omap_port->ssr.mode,
  1181. omap_port->ssr_base + SSI_SSR_MODE_REG);
  1182. /* OCP barrier */
  1183. mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
  1184. return 0;
  1185. }
  1186. static int omap_ssi_port_runtime_suspend(struct device *dev)
  1187. {
  1188. struct hsi_port *port = dev_get_drvdata(dev);
  1189. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1190. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1191. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1192. dev_dbg(dev, "port runtime suspend!\n");
  1193. ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
  1194. if (omap_ssi->get_loss)
  1195. omap_port->loss_count =
  1196. omap_ssi->get_loss(ssi->device.parent);
  1197. ssi_save_port_ctx(omap_port);
  1198. return 0;
  1199. }
  1200. static int omap_ssi_port_runtime_resume(struct device *dev)
  1201. {
  1202. struct hsi_port *port = dev_get_drvdata(dev);
  1203. struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
  1204. struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
  1205. struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
  1206. dev_dbg(dev, "port runtime resume!\n");
  1207. if ((omap_ssi->get_loss) && (omap_port->loss_count ==
  1208. omap_ssi->get_loss(ssi->device.parent)))
  1209. goto mode; /* We always need to restore the mode & TX divisor */
  1210. ssi_restore_port_ctx(omap_port);
  1211. mode:
  1212. ssi_restore_divisor(omap_port);
  1213. ssi_restore_port_mode(omap_port);
  1214. return 0;
  1215. }
  1216. static const struct dev_pm_ops omap_ssi_port_pm_ops = {
  1217. SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
  1218. omap_ssi_port_runtime_resume, NULL)
  1219. };
  1220. #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
  1221. #else
  1222. #define DEV_PM_OPS NULL
  1223. #endif
  1224. #ifdef CONFIG_OF
  1225. static const struct of_device_id omap_ssi_port_of_match[] = {
  1226. { .compatible = "ti,omap3-ssi-port", },
  1227. {},
  1228. };
  1229. MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
  1230. #else
  1231. #define omap_ssi_port_of_match NULL
  1232. #endif
  1233. struct platform_driver ssi_port_pdriver = {
  1234. .probe = ssi_port_probe,
  1235. .remove = ssi_port_remove,
  1236. .driver = {
  1237. .name = "omap_ssi_port",
  1238. .of_match_table = omap_ssi_port_of_match,
  1239. .pm = DEV_PM_OPS,
  1240. },
  1241. };