imx-mailbox.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
  4. * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/clk.h>
  8. #include <linux/firmware/imx/ipc.h>
  9. #include <linux/firmware/imx/s4.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mailbox_controller.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/suspend.h>
  22. #include <linux/slab.h>
  23. #include <linux/workqueue.h>
  24. #include "mailbox.h"
  25. #define IMX_MU_CHANS 24
  26. /* TX0/RX0/RXDB[0-3] */
  27. #define IMX_MU_SCU_CHANS 6
  28. /* TX0/RX0 */
  29. #define IMX_MU_S4_CHANS 2
  30. #define IMX_MU_CHAN_NAME_SIZE 32
  31. #define IMX_MU_V2_PAR_OFF 0x4
  32. #define IMX_MU_V2_TR_MASK GENMASK(7, 0)
  33. #define IMX_MU_V2_RR_MASK GENMASK(15, 8)
  34. #define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
  35. #define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
  36. /* Please not change TX & RX */
  37. enum imx_mu_chan_type {
  38. IMX_MU_TYPE_TX = 0, /* Tx */
  39. IMX_MU_TYPE_RX = 1, /* Rx */
  40. IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
  41. IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
  42. IMX_MU_TYPE_RST = 4, /* Reset */
  43. IMX_MU_TYPE_TXDB_V2 = 5, /* Tx doorbell with S/W ACK */
  44. };
  45. enum imx_mu_xcr {
  46. IMX_MU_CR,
  47. IMX_MU_GIER,
  48. IMX_MU_GCR,
  49. IMX_MU_TCR,
  50. IMX_MU_RCR,
  51. IMX_MU_xCR_MAX,
  52. };
  53. enum imx_mu_xsr {
  54. IMX_MU_SR,
  55. IMX_MU_GSR,
  56. IMX_MU_TSR,
  57. IMX_MU_RSR,
  58. IMX_MU_xSR_MAX,
  59. };
  60. struct imx_sc_rpc_msg_max {
  61. struct imx_sc_rpc_msg hdr;
  62. u32 data[30];
  63. };
  64. struct imx_s4_rpc_msg_max {
  65. struct imx_s4_rpc_msg hdr;
  66. u32 data[254];
  67. };
  68. struct imx_mu_con_priv {
  69. unsigned int idx;
  70. char irq_desc[IMX_MU_CHAN_NAME_SIZE];
  71. enum imx_mu_chan_type type;
  72. struct mbox_chan *chan;
  73. struct work_struct txdb_work;
  74. };
  75. struct imx_mu_priv {
  76. struct device *dev;
  77. void __iomem *base;
  78. void *msg;
  79. spinlock_t xcr_lock; /* control register lock */
  80. struct mbox_controller mbox;
  81. struct mbox_chan mbox_chans[IMX_MU_CHANS];
  82. struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
  83. const struct imx_mu_dcfg *dcfg;
  84. struct clk *clk;
  85. int irq[IMX_MU_CHANS];
  86. bool suspend;
  87. bool side_b;
  88. u32 xcr[IMX_MU_xCR_MAX];
  89. u32 num_tr;
  90. u32 num_rr;
  91. };
  92. enum imx_mu_type {
  93. IMX_MU_V1,
  94. IMX_MU_V2 = BIT(1),
  95. IMX_MU_V2_S4 = BIT(15),
  96. IMX_MU_V2_IRQ = BIT(16),
  97. };
  98. struct imx_mu_dcfg {
  99. int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
  100. int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
  101. int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
  102. int (*init)(struct imx_mu_priv *priv);
  103. enum imx_mu_type type;
  104. u32 xTR; /* Transmit Register0 */
  105. u32 xRR; /* Receive Register0 */
  106. u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
  107. u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
  108. };
  109. #define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
  110. #define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
  111. #define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
  112. /* General Purpose Interrupt Enable */
  113. #define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
  114. /* Receive Interrupt Enable */
  115. #define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
  116. /* Transmit Interrupt Enable */
  117. #define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
  118. /* General Purpose Interrupt Request */
  119. #define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
  120. /* MU reset */
  121. #define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
  122. #define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
  123. static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
  124. {
  125. return container_of(mbox, struct imx_mu_priv, mbox);
  126. }
  127. static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
  128. {
  129. iowrite32(val, priv->base + offs);
  130. }
  131. static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
  132. {
  133. return ioread32(priv->base + offs);
  134. }
  135. static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx)
  136. {
  137. u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
  138. u32 status;
  139. u32 can_write;
  140. dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
  141. do {
  142. status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
  143. can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4);
  144. } while (!can_write && time_is_after_jiffies64(timeout_time));
  145. if (!can_write) {
  146. dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
  147. val, idx, status);
  148. return -ETIME;
  149. }
  150. imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4);
  151. return 0;
  152. }
  153. static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx)
  154. {
  155. u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
  156. u32 status;
  157. u32 can_read;
  158. dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
  159. do {
  160. status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
  161. can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4);
  162. } while (!can_read && time_is_after_jiffies64(timeout_time));
  163. if (!can_read) {
  164. dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
  165. idx, status);
  166. return -ETIME;
  167. }
  168. *val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4);
  169. dev_dbg(priv->dev, "Read %.8x\n", *val);
  170. return 0;
  171. }
  172. static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
  173. {
  174. unsigned long flags;
  175. u32 val;
  176. spin_lock_irqsave(&priv->xcr_lock, flags);
  177. val = imx_mu_read(priv, priv->dcfg->xCR[type]);
  178. val &= ~clr;
  179. val |= set;
  180. imx_mu_write(priv, val, priv->dcfg->xCR[type]);
  181. spin_unlock_irqrestore(&priv->xcr_lock, flags);
  182. return val;
  183. }
  184. static int imx_mu_generic_tx(struct imx_mu_priv *priv,
  185. struct imx_mu_con_priv *cp,
  186. void *data)
  187. {
  188. u32 *arg = data;
  189. u32 val;
  190. int ret, count;
  191. switch (cp->type) {
  192. case IMX_MU_TYPE_TX:
  193. imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
  194. imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
  195. break;
  196. case IMX_MU_TYPE_TXDB:
  197. imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
  198. queue_work(system_bh_wq, &cp->txdb_work);
  199. break;
  200. case IMX_MU_TYPE_TXDB_V2:
  201. imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
  202. priv->dcfg->xCR[IMX_MU_GCR]);
  203. ret = -ETIMEDOUT;
  204. count = 0;
  205. while (ret && (count < 10)) {
  206. ret =
  207. readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
  208. !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
  209. 0, 10000);
  210. if (ret) {
  211. dev_warn_ratelimited(priv->dev,
  212. "channel type: %d timeout, %d times, retry\n",
  213. cp->type, ++count);
  214. }
  215. }
  216. break;
  217. default:
  218. dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
  219. return -EINVAL;
  220. }
  221. return 0;
  222. }
  223. static int imx_mu_generic_rx(struct imx_mu_priv *priv,
  224. struct imx_mu_con_priv *cp)
  225. {
  226. u32 dat;
  227. dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
  228. mbox_chan_received_data(cp->chan, (void *)&dat);
  229. return 0;
  230. }
  231. static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
  232. struct imx_mu_con_priv *cp)
  233. {
  234. imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
  235. priv->dcfg->xSR[IMX_MU_GSR]);
  236. mbox_chan_received_data(cp->chan, NULL);
  237. return 0;
  238. }
  239. static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
  240. {
  241. u32 *arg = data;
  242. u32 num_tr = priv->num_tr;
  243. int i, ret;
  244. u32 xsr;
  245. u32 size, max_size;
  246. if (priv->dcfg->type & IMX_MU_V2_S4) {
  247. size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
  248. max_size = sizeof(struct imx_s4_rpc_msg_max);
  249. } else {
  250. size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
  251. max_size = sizeof(struct imx_sc_rpc_msg_max);
  252. }
  253. switch (cp->type) {
  254. case IMX_MU_TYPE_TX:
  255. /*
  256. * msg->hdr.size specifies the number of u32 words while
  257. * sizeof yields bytes.
  258. */
  259. if (size > max_size / 4) {
  260. /*
  261. * The real message size can be different to
  262. * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
  263. */
  264. dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
  265. return -EINVAL;
  266. }
  267. for (i = 0; i < num_tr && i < size; i++)
  268. imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
  269. for (; i < size; i++) {
  270. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
  271. xsr,
  272. xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
  273. 0, 5 * USEC_PER_SEC);
  274. if (ret) {
  275. dev_err(priv->dev, "Send data index: %d timeout\n", i);
  276. return ret;
  277. }
  278. imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
  279. }
  280. imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
  281. break;
  282. default:
  283. dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
  284. return -EINVAL;
  285. }
  286. return 0;
  287. }
  288. static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
  289. {
  290. u32 *data;
  291. int i, ret;
  292. u32 xsr;
  293. u32 size, max_size;
  294. u32 num_rr = priv->num_rr;
  295. data = (u32 *)priv->msg;
  296. imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
  297. *data++ = imx_mu_read(priv, priv->dcfg->xRR);
  298. if (priv->dcfg->type & IMX_MU_V2_S4) {
  299. size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
  300. max_size = sizeof(struct imx_s4_rpc_msg_max);
  301. } else {
  302. size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
  303. max_size = sizeof(struct imx_sc_rpc_msg_max);
  304. }
  305. if (size > max_size / 4) {
  306. dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
  307. return -EINVAL;
  308. }
  309. for (i = 1; i < size; i++) {
  310. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
  311. xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % num_rr), 0,
  312. 5 * USEC_PER_SEC);
  313. if (ret) {
  314. dev_err(priv->dev, "timeout read idx %d\n", i);
  315. return ret;
  316. }
  317. *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % num_rr) * 4);
  318. }
  319. imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
  320. mbox_chan_received_data(cp->chan, (void *)priv->msg);
  321. return 0;
  322. }
  323. static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
  324. void *data)
  325. {
  326. struct imx_sc_rpc_msg_max *msg = data;
  327. u32 *arg = data;
  328. u32 byte_size;
  329. int err;
  330. int i;
  331. dev_dbg(priv->dev, "Sending message\n");
  332. switch (cp->type) {
  333. case IMX_MU_TYPE_TXDB:
  334. byte_size = msg->hdr.size * sizeof(u32);
  335. if (byte_size > sizeof(*msg)) {
  336. /*
  337. * The real message size can be different to
  338. * struct imx_sc_rpc_msg_max size
  339. */
  340. dev_err(priv->dev,
  341. "Exceed max msg size (%zu) on TX, got: %i\n",
  342. sizeof(*msg), byte_size);
  343. return -EINVAL;
  344. }
  345. print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
  346. data, byte_size, false);
  347. /* Send first word */
  348. dev_dbg(priv->dev, "Sending header\n");
  349. imx_mu_write(priv, *arg++, priv->dcfg->xTR);
  350. /* Send signaling */
  351. dev_dbg(priv->dev, "Sending signaling\n");
  352. imx_mu_xcr_rmw(priv, IMX_MU_GCR,
  353. IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
  354. /* Send words to fill the mailbox */
  355. for (i = 1; i < 4 && i < msg->hdr.size; i++) {
  356. dev_dbg(priv->dev, "Sending word %d\n", i);
  357. imx_mu_write(priv, *arg++,
  358. priv->dcfg->xTR + (i % 4) * 4);
  359. }
  360. /* Send rest of message waiting for remote read */
  361. for (; i < msg->hdr.size; i++) {
  362. dev_dbg(priv->dev, "Sending word %d\n", i);
  363. err = imx_mu_tx_waiting_write(priv, *arg++, i);
  364. if (err) {
  365. dev_err(priv->dev, "Timeout tx %d\n", i);
  366. return err;
  367. }
  368. }
  369. /* Simulate hack for mbox framework */
  370. queue_work(system_bh_wq, &cp->txdb_work);
  371. break;
  372. default:
  373. dev_warn_ratelimited(priv->dev,
  374. "Send data on wrong channel type: %d\n",
  375. cp->type);
  376. return -EINVAL;
  377. }
  378. return 0;
  379. }
  380. static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
  381. {
  382. struct imx_sc_rpc_msg_max msg;
  383. u32 *data = (u32 *)&msg;
  384. u32 byte_size;
  385. int err = 0;
  386. int i;
  387. dev_dbg(priv->dev, "Receiving message\n");
  388. /* Read header */
  389. dev_dbg(priv->dev, "Receiving header\n");
  390. *data++ = imx_mu_read(priv, priv->dcfg->xRR);
  391. byte_size = msg.hdr.size * sizeof(u32);
  392. if (byte_size > sizeof(msg)) {
  393. dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
  394. sizeof(msg), byte_size);
  395. err = -EINVAL;
  396. goto error;
  397. }
  398. /* Read message waiting they are written */
  399. for (i = 1; i < msg.hdr.size; i++) {
  400. dev_dbg(priv->dev, "Receiving word %d\n", i);
  401. err = imx_mu_rx_waiting_read(priv, data++, i);
  402. if (err) {
  403. dev_err(priv->dev, "Timeout rx %d\n", i);
  404. goto error;
  405. }
  406. }
  407. /* Clear GIP */
  408. imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
  409. priv->dcfg->xSR[IMX_MU_GSR]);
  410. print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
  411. &msg, byte_size, false);
  412. /* send data to client */
  413. dev_dbg(priv->dev, "Sending message to client\n");
  414. mbox_chan_received_data(cp->chan, (void *)&msg);
  415. goto exit;
  416. error:
  417. mbox_chan_received_data(cp->chan, ERR_PTR(err));
  418. exit:
  419. return err;
  420. }
  421. static void imx_mu_txdb_work(struct work_struct *t)
  422. {
  423. struct imx_mu_con_priv *cp = from_work(cp, t, txdb_work);
  424. mbox_chan_txdone(cp->chan, 0);
  425. }
  426. static irqreturn_t imx_mu_isr(int irq, void *p)
  427. {
  428. struct mbox_chan *chan = p;
  429. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  430. struct imx_mu_con_priv *cp = chan->con_priv;
  431. u32 val, ctrl;
  432. switch (cp->type) {
  433. case IMX_MU_TYPE_TX:
  434. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
  435. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
  436. val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
  437. (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  438. break;
  439. case IMX_MU_TYPE_RX:
  440. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
  441. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
  442. val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
  443. (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
  444. break;
  445. case IMX_MU_TYPE_RXDB:
  446. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
  447. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
  448. val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
  449. (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
  450. break;
  451. case IMX_MU_TYPE_RST:
  452. return IRQ_NONE;
  453. default:
  454. dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
  455. cp->type);
  456. return IRQ_NONE;
  457. }
  458. if (!val)
  459. return IRQ_NONE;
  460. if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
  461. (cp->type == IMX_MU_TYPE_TX)) {
  462. imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  463. mbox_chan_txdone(chan, 0);
  464. } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
  465. (cp->type == IMX_MU_TYPE_RX)) {
  466. priv->dcfg->rx(priv, cp);
  467. } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
  468. (cp->type == IMX_MU_TYPE_RXDB)) {
  469. priv->dcfg->rxdb(priv, cp);
  470. } else {
  471. dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
  472. return IRQ_NONE;
  473. }
  474. if (priv->suspend)
  475. pm_system_wakeup();
  476. return IRQ_HANDLED;
  477. }
  478. static int imx_mu_send_data(struct mbox_chan *chan, void *data)
  479. {
  480. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  481. struct imx_mu_con_priv *cp = chan->con_priv;
  482. return priv->dcfg->tx(priv, cp, data);
  483. }
  484. static int imx_mu_startup(struct mbox_chan *chan)
  485. {
  486. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  487. struct imx_mu_con_priv *cp = chan->con_priv;
  488. unsigned long irq_flag = 0;
  489. int ret;
  490. pm_runtime_get_sync(priv->dev);
  491. if (cp->type == IMX_MU_TYPE_TXDB_V2)
  492. return 0;
  493. if (cp->type == IMX_MU_TYPE_TXDB) {
  494. /* Tx doorbell don't have ACK support */
  495. INIT_WORK(&cp->txdb_work, imx_mu_txdb_work);
  496. return 0;
  497. }
  498. /* IPC MU should be with IRQF_NO_SUSPEND set */
  499. if (!priv->dev->pm_domain)
  500. irq_flag |= IRQF_NO_SUSPEND;
  501. if (!(priv->dcfg->type & IMX_MU_V2_IRQ))
  502. irq_flag |= IRQF_SHARED;
  503. ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan);
  504. if (ret) {
  505. dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]);
  506. return ret;
  507. }
  508. switch (cp->type) {
  509. case IMX_MU_TYPE_RX:
  510. imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
  511. break;
  512. case IMX_MU_TYPE_RXDB:
  513. imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
  514. break;
  515. default:
  516. break;
  517. }
  518. return 0;
  519. }
  520. static void imx_mu_shutdown(struct mbox_chan *chan)
  521. {
  522. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  523. struct imx_mu_con_priv *cp = chan->con_priv;
  524. int ret;
  525. u32 sr;
  526. if (cp->type == IMX_MU_TYPE_TXDB_V2) {
  527. pm_runtime_put_sync(priv->dev);
  528. return;
  529. }
  530. if (cp->type == IMX_MU_TYPE_TXDB) {
  531. cancel_work_sync(&cp->txdb_work);
  532. pm_runtime_put_sync(priv->dev);
  533. return;
  534. }
  535. switch (cp->type) {
  536. case IMX_MU_TYPE_TX:
  537. imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  538. break;
  539. case IMX_MU_TYPE_RX:
  540. imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
  541. break;
  542. case IMX_MU_TYPE_RXDB:
  543. imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
  544. break;
  545. case IMX_MU_TYPE_RST:
  546. imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
  547. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
  548. !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
  549. if (ret)
  550. dev_warn(priv->dev, "RST channel timeout\n");
  551. break;
  552. default:
  553. break;
  554. }
  555. free_irq(priv->irq[cp->type], chan);
  556. pm_runtime_put_sync(priv->dev);
  557. }
  558. static const struct mbox_chan_ops imx_mu_ops = {
  559. .send_data = imx_mu_send_data,
  560. .startup = imx_mu_startup,
  561. .shutdown = imx_mu_shutdown,
  562. };
  563. static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
  564. const struct of_phandle_args *sp)
  565. {
  566. u32 type, idx, chan;
  567. if (sp->args_count != 2) {
  568. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  569. return ERR_PTR(-EINVAL);
  570. }
  571. type = sp->args[0]; /* channel type */
  572. idx = sp->args[1]; /* index */
  573. switch (type) {
  574. case IMX_MU_TYPE_TX:
  575. case IMX_MU_TYPE_RX:
  576. if (idx != 0)
  577. dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
  578. chan = type;
  579. break;
  580. case IMX_MU_TYPE_RXDB:
  581. chan = 2 + idx;
  582. break;
  583. default:
  584. dev_err(mbox->dev, "Invalid chan type: %d\n", type);
  585. return ERR_PTR(-EINVAL);
  586. }
  587. if (chan >= mbox->num_chans) {
  588. dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
  589. return ERR_PTR(-EINVAL);
  590. }
  591. return &mbox->chans[chan];
  592. }
  593. static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
  594. const struct of_phandle_args *sp)
  595. {
  596. struct mbox_chan *p_chan;
  597. u32 type, idx, chan;
  598. if (sp->args_count != 2) {
  599. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  600. return ERR_PTR(-EINVAL);
  601. }
  602. type = sp->args[0]; /* channel type */
  603. idx = sp->args[1]; /* index */
  604. /* RST only supports 1 channel */
  605. if ((type == IMX_MU_TYPE_RST) && idx) {
  606. dev_err(mbox->dev, "Invalid RST channel %d\n", idx);
  607. return ERR_PTR(-EINVAL);
  608. }
  609. chan = type * 4 + idx;
  610. if (chan >= mbox->num_chans) {
  611. dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
  612. return ERR_PTR(-EINVAL);
  613. }
  614. p_chan = &mbox->chans[chan];
  615. if (type == IMX_MU_TYPE_TXDB_V2)
  616. p_chan->txdone_method = TXDONE_BY_ACK;
  617. return p_chan;
  618. }
  619. static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
  620. const struct of_phandle_args *sp)
  621. {
  622. u32 type;
  623. if (sp->args_count < 1) {
  624. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  625. return ERR_PTR(-EINVAL);
  626. }
  627. type = sp->args[0]; /* channel type */
  628. /* Only supports TXDB and RXDB */
  629. if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
  630. dev_err(mbox->dev, "Invalid type: %d\n", type);
  631. return ERR_PTR(-EINVAL);
  632. }
  633. return imx_mu_xlate(mbox, sp);
  634. }
  635. static void imx_mu_get_tr_rr(struct imx_mu_priv *priv)
  636. {
  637. u32 val;
  638. if (priv->dcfg->type & IMX_MU_V2) {
  639. val = imx_mu_read(priv, IMX_MU_V2_PAR_OFF);
  640. priv->num_tr = FIELD_GET(IMX_MU_V2_TR_MASK, val);
  641. priv->num_rr = FIELD_GET(IMX_MU_V2_RR_MASK, val);
  642. } else {
  643. priv->num_tr = 4;
  644. priv->num_rr = 4;
  645. }
  646. }
  647. static int imx_mu_init_generic(struct imx_mu_priv *priv)
  648. {
  649. unsigned int i;
  650. unsigned int val;
  651. if (priv->num_rr > 4 || priv->num_tr > 4) {
  652. WARN_ONCE(true, "%s not support TR/RR larger than 4\n", __func__);
  653. return -EOPNOTSUPP;
  654. }
  655. for (i = 0; i < IMX_MU_CHANS; i++) {
  656. struct imx_mu_con_priv *cp = &priv->con_priv[i];
  657. cp->idx = i % 4;
  658. cp->type = i >> 2;
  659. cp->chan = &priv->mbox_chans[i];
  660. priv->mbox_chans[i].con_priv = cp;
  661. snprintf(cp->irq_desc, sizeof(cp->irq_desc),
  662. "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
  663. }
  664. priv->mbox.num_chans = IMX_MU_CHANS;
  665. priv->mbox.of_xlate = imx_mu_xlate;
  666. if (priv->side_b)
  667. return 0;
  668. /* Set default MU configuration */
  669. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  670. imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
  671. /* Clear any pending GIP */
  672. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
  673. imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
  674. /* Clear any pending RSR */
  675. for (i = 0; i < priv->num_rr; i++)
  676. imx_mu_read(priv, priv->dcfg->xRR + i * 4);
  677. return 0;
  678. }
  679. static int imx_mu_init_specific(struct imx_mu_priv *priv)
  680. {
  681. unsigned int i;
  682. int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
  683. for (i = 0; i < num_chans; i++) {
  684. struct imx_mu_con_priv *cp = &priv->con_priv[i];
  685. cp->idx = i < 2 ? 0 : i - 2;
  686. cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
  687. cp->chan = &priv->mbox_chans[i];
  688. priv->mbox_chans[i].con_priv = cp;
  689. snprintf(cp->irq_desc, sizeof(cp->irq_desc),
  690. "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
  691. }
  692. priv->mbox.num_chans = num_chans;
  693. priv->mbox.of_xlate = imx_mu_specific_xlate;
  694. /* Set default MU configuration */
  695. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  696. imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
  697. return 0;
  698. }
  699. static int imx_mu_init_seco(struct imx_mu_priv *priv)
  700. {
  701. int ret;
  702. ret = imx_mu_init_generic(priv);
  703. if (ret)
  704. return ret;
  705. priv->mbox.of_xlate = imx_mu_seco_xlate;
  706. return 0;
  707. }
  708. static int imx_mu_probe(struct platform_device *pdev)
  709. {
  710. struct device *dev = &pdev->dev;
  711. struct device_node *np = dev->of_node;
  712. struct imx_mu_priv *priv;
  713. const struct imx_mu_dcfg *dcfg;
  714. int i, ret;
  715. u32 size;
  716. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  717. if (!priv)
  718. return -ENOMEM;
  719. priv->dev = dev;
  720. priv->base = devm_platform_ioremap_resource(pdev, 0);
  721. if (IS_ERR(priv->base))
  722. return PTR_ERR(priv->base);
  723. dcfg = of_device_get_match_data(dev);
  724. if (!dcfg)
  725. return -EINVAL;
  726. priv->dcfg = dcfg;
  727. if (priv->dcfg->type & IMX_MU_V2_IRQ) {
  728. priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx");
  729. if (priv->irq[IMX_MU_TYPE_TX] < 0)
  730. return priv->irq[IMX_MU_TYPE_TX];
  731. priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx");
  732. if (priv->irq[IMX_MU_TYPE_RX] < 0)
  733. return priv->irq[IMX_MU_TYPE_RX];
  734. } else {
  735. ret = platform_get_irq(pdev, 0);
  736. if (ret < 0)
  737. return ret;
  738. for (i = 0; i < IMX_MU_CHANS; i++)
  739. priv->irq[i] = ret;
  740. }
  741. if (priv->dcfg->type & IMX_MU_V2_S4)
  742. size = sizeof(struct imx_s4_rpc_msg_max);
  743. else
  744. size = sizeof(struct imx_sc_rpc_msg_max);
  745. priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
  746. if (!priv->msg)
  747. return -ENOMEM;
  748. priv->clk = devm_clk_get(dev, NULL);
  749. if (IS_ERR(priv->clk)) {
  750. if (PTR_ERR(priv->clk) != -ENOENT)
  751. return PTR_ERR(priv->clk);
  752. priv->clk = NULL;
  753. }
  754. ret = clk_prepare_enable(priv->clk);
  755. if (ret) {
  756. dev_err(dev, "Failed to enable clock\n");
  757. return ret;
  758. }
  759. imx_mu_get_tr_rr(priv);
  760. priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
  761. ret = priv->dcfg->init(priv);
  762. if (ret) {
  763. dev_err(dev, "Failed to init MU\n");
  764. goto disable_clk;
  765. }
  766. spin_lock_init(&priv->xcr_lock);
  767. priv->mbox.dev = dev;
  768. priv->mbox.ops = &imx_mu_ops;
  769. priv->mbox.chans = priv->mbox_chans;
  770. priv->mbox.txdone_irq = true;
  771. platform_set_drvdata(pdev, priv);
  772. ret = devm_mbox_controller_register(dev, &priv->mbox);
  773. if (ret)
  774. goto disable_clk;
  775. of_platform_populate(dev->of_node, NULL, NULL, dev);
  776. pm_runtime_enable(dev);
  777. ret = pm_runtime_resume_and_get(dev);
  778. if (ret < 0)
  779. goto disable_runtime_pm;
  780. ret = pm_runtime_put_sync(dev);
  781. if (ret < 0)
  782. goto disable_runtime_pm;
  783. clk_disable_unprepare(priv->clk);
  784. return 0;
  785. disable_runtime_pm:
  786. pm_runtime_disable(dev);
  787. disable_clk:
  788. clk_disable_unprepare(priv->clk);
  789. return ret;
  790. }
  791. static void imx_mu_remove(struct platform_device *pdev)
  792. {
  793. struct imx_mu_priv *priv = platform_get_drvdata(pdev);
  794. pm_runtime_disable(priv->dev);
  795. }
  796. static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
  797. .tx = imx_mu_generic_tx,
  798. .rx = imx_mu_generic_rx,
  799. .rxdb = imx_mu_generic_rxdb,
  800. .init = imx_mu_init_generic,
  801. .xTR = 0x0,
  802. .xRR = 0x10,
  803. .xSR = {0x20, 0x20, 0x20, 0x20},
  804. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  805. };
  806. static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
  807. .tx = imx_mu_generic_tx,
  808. .rx = imx_mu_generic_rx,
  809. .rxdb = imx_mu_generic_rxdb,
  810. .init = imx_mu_init_generic,
  811. .xTR = 0x20,
  812. .xRR = 0x40,
  813. .xSR = {0x60, 0x60, 0x60, 0x60},
  814. .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
  815. };
  816. static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
  817. .tx = imx_mu_generic_tx,
  818. .rx = imx_mu_generic_rx,
  819. .rxdb = imx_mu_generic_rxdb,
  820. .init = imx_mu_init_generic,
  821. .type = IMX_MU_V2,
  822. .xTR = 0x200,
  823. .xRR = 0x280,
  824. .xSR = {0xC, 0x118, 0x124, 0x12C},
  825. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  826. };
  827. static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
  828. .tx = imx_mu_specific_tx,
  829. .rx = imx_mu_specific_rx,
  830. .init = imx_mu_init_specific,
  831. .type = IMX_MU_V2 | IMX_MU_V2_S4,
  832. .xTR = 0x200,
  833. .xRR = 0x280,
  834. .xSR = {0xC, 0x118, 0x124, 0x12C},
  835. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  836. };
  837. static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
  838. .tx = imx_mu_specific_tx,
  839. .rx = imx_mu_specific_rx,
  840. .init = imx_mu_init_specific,
  841. .type = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ,
  842. .xTR = 0x200,
  843. .xRR = 0x280,
  844. .xSR = {0xC, 0x118, 0x124, 0x12C},
  845. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  846. };
  847. static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
  848. .tx = imx_mu_specific_tx,
  849. .rx = imx_mu_specific_rx,
  850. .init = imx_mu_init_specific,
  851. .rxdb = imx_mu_generic_rxdb,
  852. .xTR = 0x0,
  853. .xRR = 0x10,
  854. .xSR = {0x20, 0x20, 0x20, 0x20},
  855. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  856. };
  857. static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
  858. .tx = imx_mu_seco_tx,
  859. .rx = imx_mu_generic_rx,
  860. .rxdb = imx_mu_seco_rxdb,
  861. .init = imx_mu_init_seco,
  862. .xTR = 0x0,
  863. .xRR = 0x10,
  864. .xSR = {0x20, 0x20, 0x20, 0x20},
  865. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  866. };
  867. static const struct of_device_id imx_mu_dt_ids[] = {
  868. { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
  869. { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
  870. { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
  871. { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
  872. { .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
  873. { .compatible = "fsl,imx95-mu", .data = &imx_mu_cfg_imx8ulp },
  874. { .compatible = "fsl,imx95-mu-ele", .data = &imx_mu_cfg_imx8ulp_s4 },
  875. { .compatible = "fsl,imx95-mu-v2x", .data = &imx_mu_cfg_imx8ulp_s4 },
  876. { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
  877. { .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
  878. { },
  879. };
  880. MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
  881. static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
  882. {
  883. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  884. int i;
  885. if (!priv->clk) {
  886. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  887. priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
  888. }
  889. priv->suspend = true;
  890. return 0;
  891. }
  892. static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
  893. {
  894. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  895. int i;
  896. /*
  897. * ONLY restore MU when context lost, the TIE could
  898. * be set during noirq resume as there is MU data
  899. * communication going on, and restore the saved
  900. * value will overwrite the TIE and cause MU data
  901. * send failed, may lead to system freeze. This issue
  902. * is observed by testing freeze mode suspend.
  903. */
  904. if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
  905. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  906. imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
  907. }
  908. priv->suspend = false;
  909. return 0;
  910. }
  911. static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
  912. {
  913. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  914. clk_disable_unprepare(priv->clk);
  915. return 0;
  916. }
  917. static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
  918. {
  919. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  920. int ret;
  921. ret = clk_prepare_enable(priv->clk);
  922. if (ret)
  923. dev_err(dev, "failed to enable clock\n");
  924. return ret;
  925. }
  926. static const struct dev_pm_ops imx_mu_pm_ops = {
  927. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
  928. imx_mu_resume_noirq)
  929. SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
  930. imx_mu_runtime_resume, NULL)
  931. };
  932. static struct platform_driver imx_mu_driver = {
  933. .probe = imx_mu_probe,
  934. .remove_new = imx_mu_remove,
  935. .driver = {
  936. .name = "imx_mu",
  937. .of_match_table = imx_mu_dt_ids,
  938. .pm = &imx_mu_pm_ops,
  939. },
  940. };
  941. module_platform_driver(imx_mu_driver);
  942. MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
  943. MODULE_DESCRIPTION("Message Unit driver for i.MX");
  944. MODULE_LICENSE("GPL v2");