imx-mailbox.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
  4. * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
  5. */
  6. #include <linux/bitfield.h>
  7. #include <linux/clk.h>
  8. #include <linux/firmware/imx/ipc.h>
  9. #include <linux/firmware/imx/s4.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/io.h>
  12. #include <linux/iopoll.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mailbox_controller.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/suspend.h>
  22. #include <linux/slab.h>
  23. #include <linux/workqueue.h>
  24. #include "mailbox.h"
  25. #define IMX_MU_CHANS 24
  26. /* TX0/RX0/RXDB[0-3] */
  27. #define IMX_MU_SCU_CHANS 6
  28. /* TX0/RX0 */
  29. #define IMX_MU_S4_CHANS 2
  30. #define IMX_MU_CHAN_NAME_SIZE 32
  31. #define IMX_MU_V2_PAR_OFF 0x4
  32. #define IMX_MU_V2_TR_MASK GENMASK(7, 0)
  33. #define IMX_MU_V2_RR_MASK GENMASK(15, 8)
  34. #define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
  35. #define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
  36. /* Please not change TX & RX */
  37. enum imx_mu_chan_type {
  38. IMX_MU_TYPE_TX = 0, /* Tx */
  39. IMX_MU_TYPE_RX = 1, /* Rx */
  40. IMX_MU_TYPE_TXDB = 2, /* Tx doorbell */
  41. IMX_MU_TYPE_RXDB = 3, /* Rx doorbell */
  42. IMX_MU_TYPE_RST = 4, /* Reset */
  43. IMX_MU_TYPE_TXDB_V2 = 5, /* Tx doorbell with S/W ACK */
  44. };
  45. enum imx_mu_xcr {
  46. IMX_MU_CR,
  47. IMX_MU_GIER,
  48. IMX_MU_GCR,
  49. IMX_MU_TCR,
  50. IMX_MU_RCR,
  51. IMX_MU_xCR_MAX,
  52. };
  53. enum imx_mu_xsr {
  54. IMX_MU_SR,
  55. IMX_MU_GSR,
  56. IMX_MU_TSR,
  57. IMX_MU_RSR,
  58. IMX_MU_xSR_MAX,
  59. };
  60. struct imx_sc_rpc_msg_max {
  61. struct imx_sc_rpc_msg hdr;
  62. u32 data[30];
  63. };
  64. struct imx_s4_rpc_msg_max {
  65. struct imx_s4_rpc_msg hdr;
  66. u32 data[254];
  67. };
  68. struct imx_mu_con_priv {
  69. unsigned int idx;
  70. char irq_desc[IMX_MU_CHAN_NAME_SIZE];
  71. enum imx_mu_chan_type type;
  72. struct mbox_chan *chan;
  73. struct work_struct txdb_work;
  74. };
  75. struct imx_mu_priv {
  76. struct device *dev;
  77. void __iomem *base;
  78. void *msg;
  79. spinlock_t xcr_lock; /* control register lock */
  80. struct mbox_controller mbox;
  81. struct mbox_chan mbox_chans[IMX_MU_CHANS];
  82. struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
  83. const struct imx_mu_dcfg *dcfg;
  84. struct clk *clk;
  85. int irq[IMX_MU_CHANS];
  86. bool suspend;
  87. bool side_b;
  88. u32 xcr[IMX_MU_xCR_MAX];
  89. u32 num_tr;
  90. u32 num_rr;
  91. };
  92. enum imx_mu_type {
  93. IMX_MU_V1,
  94. IMX_MU_V2 = BIT(1),
  95. IMX_MU_V2_S4 = BIT(15),
  96. IMX_MU_V2_IRQ = BIT(16),
  97. };
  98. struct imx_mu_dcfg {
  99. int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
  100. int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
  101. int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
  102. int (*init)(struct imx_mu_priv *priv);
  103. enum imx_mu_type type;
  104. u32 xTR; /* Transmit Register0 */
  105. u32 xRR; /* Receive Register0 */
  106. u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */
  107. u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */
  108. };
  109. #define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
  110. #define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
  111. #define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
  112. /* General Purpose Interrupt Enable */
  113. #define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
  114. /* Receive Interrupt Enable */
  115. #define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
  116. /* Transmit Interrupt Enable */
  117. #define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
  118. /* General Purpose Interrupt Request */
  119. #define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
  120. /* MU reset */
  121. #define IMX_MU_xCR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(5))
  122. #define IMX_MU_xSR_RST(type) (type & IMX_MU_V2 ? BIT(0) : BIT(7))
  123. static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
  124. {
  125. return container_of(mbox, struct imx_mu_priv, mbox);
  126. }
  127. static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
  128. {
  129. iowrite32(val, priv->base + offs);
  130. }
  131. static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
  132. {
  133. return ioread32(priv->base + offs);
  134. }
  135. static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx)
  136. {
  137. u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
  138. u32 status;
  139. u32 can_write;
  140. dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
  141. do {
  142. status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
  143. can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4);
  144. } while (!can_write && time_is_after_jiffies64(timeout_time));
  145. if (!can_write) {
  146. dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
  147. val, idx, status);
  148. return -ETIME;
  149. }
  150. imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4);
  151. return 0;
  152. }
  153. static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx)
  154. {
  155. u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
  156. u32 status;
  157. u32 can_read;
  158. dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
  159. do {
  160. status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
  161. can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4);
  162. } while (!can_read && time_is_after_jiffies64(timeout_time));
  163. if (!can_read) {
  164. dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
  165. idx, status);
  166. return -ETIME;
  167. }
  168. *val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4);
  169. dev_dbg(priv->dev, "Read %.8x\n", *val);
  170. return 0;
  171. }
  172. static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
  173. {
  174. unsigned long flags;
  175. u32 val;
  176. spin_lock_irqsave(&priv->xcr_lock, flags);
  177. val = imx_mu_read(priv, priv->dcfg->xCR[type]);
  178. val &= ~clr;
  179. val |= set;
  180. imx_mu_write(priv, val, priv->dcfg->xCR[type]);
  181. spin_unlock_irqrestore(&priv->xcr_lock, flags);
  182. return val;
  183. }
  184. static int imx_mu_generic_tx(struct imx_mu_priv *priv,
  185. struct imx_mu_con_priv *cp,
  186. void *data)
  187. {
  188. u32 *arg = data;
  189. u32 val;
  190. int ret;
  191. switch (cp->type) {
  192. case IMX_MU_TYPE_TX:
  193. imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
  194. imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
  195. break;
  196. case IMX_MU_TYPE_TXDB:
  197. imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
  198. queue_work(system_bh_wq, &cp->txdb_work);
  199. break;
  200. case IMX_MU_TYPE_TXDB_V2:
  201. imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
  202. priv->dcfg->xCR[IMX_MU_GCR]);
  203. ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
  204. !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
  205. 0, 1000);
  206. if (ret)
  207. dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
  208. break;
  209. default:
  210. dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
  211. return -EINVAL;
  212. }
  213. return 0;
  214. }
  215. static int imx_mu_generic_rx(struct imx_mu_priv *priv,
  216. struct imx_mu_con_priv *cp)
  217. {
  218. u32 dat;
  219. dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
  220. mbox_chan_received_data(cp->chan, (void *)&dat);
  221. return 0;
  222. }
  223. static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
  224. struct imx_mu_con_priv *cp)
  225. {
  226. imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
  227. priv->dcfg->xSR[IMX_MU_GSR]);
  228. mbox_chan_received_data(cp->chan, NULL);
  229. return 0;
  230. }
  231. static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
  232. {
  233. u32 *arg = data;
  234. u32 num_tr = priv->num_tr;
  235. int i, ret;
  236. u32 xsr;
  237. u32 size, max_size;
  238. if (priv->dcfg->type & IMX_MU_V2_S4) {
  239. size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
  240. max_size = sizeof(struct imx_s4_rpc_msg_max);
  241. } else {
  242. size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
  243. max_size = sizeof(struct imx_sc_rpc_msg_max);
  244. }
  245. switch (cp->type) {
  246. case IMX_MU_TYPE_TX:
  247. /*
  248. * msg->hdr.size specifies the number of u32 words while
  249. * sizeof yields bytes.
  250. */
  251. if (size > max_size / 4) {
  252. /*
  253. * The real message size can be different to
  254. * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
  255. */
  256. dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
  257. return -EINVAL;
  258. }
  259. for (i = 0; i < num_tr && i < size; i++)
  260. imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
  261. for (; i < size; i++) {
  262. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
  263. xsr,
  264. xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
  265. 0, 5 * USEC_PER_SEC);
  266. if (ret) {
  267. dev_err(priv->dev, "Send data index: %d timeout\n", i);
  268. return ret;
  269. }
  270. imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
  271. }
  272. imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
  273. break;
  274. default:
  275. dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
  276. return -EINVAL;
  277. }
  278. return 0;
  279. }
  280. static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
  281. {
  282. u32 *data;
  283. int i, ret;
  284. u32 xsr;
  285. u32 size, max_size;
  286. u32 num_rr = priv->num_rr;
  287. data = (u32 *)priv->msg;
  288. imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
  289. *data++ = imx_mu_read(priv, priv->dcfg->xRR);
  290. if (priv->dcfg->type & IMX_MU_V2_S4) {
  291. size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
  292. max_size = sizeof(struct imx_s4_rpc_msg_max);
  293. } else {
  294. size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
  295. max_size = sizeof(struct imx_sc_rpc_msg_max);
  296. }
  297. if (size > max_size / 4) {
  298. dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
  299. return -EINVAL;
  300. }
  301. for (i = 1; i < size; i++) {
  302. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
  303. xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % num_rr), 0,
  304. 5 * USEC_PER_SEC);
  305. if (ret) {
  306. dev_err(priv->dev, "timeout read idx %d\n", i);
  307. return ret;
  308. }
  309. *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % num_rr) * 4);
  310. }
  311. imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
  312. mbox_chan_received_data(cp->chan, (void *)priv->msg);
  313. return 0;
  314. }
  315. static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
  316. void *data)
  317. {
  318. struct imx_sc_rpc_msg_max *msg = data;
  319. u32 *arg = data;
  320. u32 byte_size;
  321. int err;
  322. int i;
  323. dev_dbg(priv->dev, "Sending message\n");
  324. switch (cp->type) {
  325. case IMX_MU_TYPE_TXDB:
  326. byte_size = msg->hdr.size * sizeof(u32);
  327. if (byte_size > sizeof(*msg)) {
  328. /*
  329. * The real message size can be different to
  330. * struct imx_sc_rpc_msg_max size
  331. */
  332. dev_err(priv->dev,
  333. "Exceed max msg size (%zu) on TX, got: %i\n",
  334. sizeof(*msg), byte_size);
  335. return -EINVAL;
  336. }
  337. print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
  338. data, byte_size, false);
  339. /* Send first word */
  340. dev_dbg(priv->dev, "Sending header\n");
  341. imx_mu_write(priv, *arg++, priv->dcfg->xTR);
  342. /* Send signaling */
  343. dev_dbg(priv->dev, "Sending signaling\n");
  344. imx_mu_xcr_rmw(priv, IMX_MU_GCR,
  345. IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
  346. /* Send words to fill the mailbox */
  347. for (i = 1; i < 4 && i < msg->hdr.size; i++) {
  348. dev_dbg(priv->dev, "Sending word %d\n", i);
  349. imx_mu_write(priv, *arg++,
  350. priv->dcfg->xTR + (i % 4) * 4);
  351. }
  352. /* Send rest of message waiting for remote read */
  353. for (; i < msg->hdr.size; i++) {
  354. dev_dbg(priv->dev, "Sending word %d\n", i);
  355. err = imx_mu_tx_waiting_write(priv, *arg++, i);
  356. if (err) {
  357. dev_err(priv->dev, "Timeout tx %d\n", i);
  358. return err;
  359. }
  360. }
  361. /* Simulate hack for mbox framework */
  362. queue_work(system_bh_wq, &cp->txdb_work);
  363. break;
  364. default:
  365. dev_warn_ratelimited(priv->dev,
  366. "Send data on wrong channel type: %d\n",
  367. cp->type);
  368. return -EINVAL;
  369. }
  370. return 0;
  371. }
  372. static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
  373. {
  374. struct imx_sc_rpc_msg_max msg;
  375. u32 *data = (u32 *)&msg;
  376. u32 byte_size;
  377. int err = 0;
  378. int i;
  379. dev_dbg(priv->dev, "Receiving message\n");
  380. /* Read header */
  381. dev_dbg(priv->dev, "Receiving header\n");
  382. *data++ = imx_mu_read(priv, priv->dcfg->xRR);
  383. byte_size = msg.hdr.size * sizeof(u32);
  384. if (byte_size > sizeof(msg)) {
  385. dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
  386. sizeof(msg), byte_size);
  387. err = -EINVAL;
  388. goto error;
  389. }
  390. /* Read message waiting they are written */
  391. for (i = 1; i < msg.hdr.size; i++) {
  392. dev_dbg(priv->dev, "Receiving word %d\n", i);
  393. err = imx_mu_rx_waiting_read(priv, data++, i);
  394. if (err) {
  395. dev_err(priv->dev, "Timeout rx %d\n", i);
  396. goto error;
  397. }
  398. }
  399. /* Clear GIP */
  400. imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
  401. priv->dcfg->xSR[IMX_MU_GSR]);
  402. print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
  403. &msg, byte_size, false);
  404. /* send data to client */
  405. dev_dbg(priv->dev, "Sending message to client\n");
  406. mbox_chan_received_data(cp->chan, (void *)&msg);
  407. goto exit;
  408. error:
  409. mbox_chan_received_data(cp->chan, ERR_PTR(err));
  410. exit:
  411. return err;
  412. }
  413. static void imx_mu_txdb_work(struct work_struct *t)
  414. {
  415. struct imx_mu_con_priv *cp = from_work(cp, t, txdb_work);
  416. mbox_chan_txdone(cp->chan, 0);
  417. }
  418. static irqreturn_t imx_mu_isr(int irq, void *p)
  419. {
  420. struct mbox_chan *chan = p;
  421. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  422. struct imx_mu_con_priv *cp = chan->con_priv;
  423. u32 val, ctrl;
  424. switch (cp->type) {
  425. case IMX_MU_TYPE_TX:
  426. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
  427. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
  428. val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
  429. (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  430. break;
  431. case IMX_MU_TYPE_RX:
  432. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
  433. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
  434. val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
  435. (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
  436. break;
  437. case IMX_MU_TYPE_RXDB:
  438. ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
  439. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
  440. val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
  441. (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
  442. break;
  443. case IMX_MU_TYPE_RST:
  444. return IRQ_NONE;
  445. default:
  446. dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
  447. cp->type);
  448. return IRQ_NONE;
  449. }
  450. if (!val)
  451. return IRQ_NONE;
  452. if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
  453. (cp->type == IMX_MU_TYPE_TX)) {
  454. imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  455. mbox_chan_txdone(chan, 0);
  456. } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
  457. (cp->type == IMX_MU_TYPE_RX)) {
  458. priv->dcfg->rx(priv, cp);
  459. } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
  460. (cp->type == IMX_MU_TYPE_RXDB)) {
  461. priv->dcfg->rxdb(priv, cp);
  462. } else {
  463. dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
  464. return IRQ_NONE;
  465. }
  466. if (priv->suspend)
  467. pm_system_wakeup();
  468. return IRQ_HANDLED;
  469. }
  470. static int imx_mu_send_data(struct mbox_chan *chan, void *data)
  471. {
  472. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  473. struct imx_mu_con_priv *cp = chan->con_priv;
  474. return priv->dcfg->tx(priv, cp, data);
  475. }
  476. static int imx_mu_startup(struct mbox_chan *chan)
  477. {
  478. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  479. struct imx_mu_con_priv *cp = chan->con_priv;
  480. unsigned long irq_flag = 0;
  481. int ret;
  482. pm_runtime_get_sync(priv->dev);
  483. if (cp->type == IMX_MU_TYPE_TXDB_V2)
  484. return 0;
  485. if (cp->type == IMX_MU_TYPE_TXDB) {
  486. /* Tx doorbell don't have ACK support */
  487. INIT_WORK(&cp->txdb_work, imx_mu_txdb_work);
  488. return 0;
  489. }
  490. /* IPC MU should be with IRQF_NO_SUSPEND set */
  491. if (!priv->dev->pm_domain)
  492. irq_flag |= IRQF_NO_SUSPEND;
  493. if (!(priv->dcfg->type & IMX_MU_V2_IRQ))
  494. irq_flag |= IRQF_SHARED;
  495. ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan);
  496. if (ret) {
  497. dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]);
  498. return ret;
  499. }
  500. switch (cp->type) {
  501. case IMX_MU_TYPE_RX:
  502. imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
  503. break;
  504. case IMX_MU_TYPE_RXDB:
  505. imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
  506. break;
  507. default:
  508. break;
  509. }
  510. return 0;
  511. }
  512. static void imx_mu_shutdown(struct mbox_chan *chan)
  513. {
  514. struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
  515. struct imx_mu_con_priv *cp = chan->con_priv;
  516. int ret;
  517. u32 sr;
  518. if (cp->type == IMX_MU_TYPE_TXDB_V2) {
  519. pm_runtime_put_sync(priv->dev);
  520. return;
  521. }
  522. if (cp->type == IMX_MU_TYPE_TXDB) {
  523. cancel_work_sync(&cp->txdb_work);
  524. pm_runtime_put_sync(priv->dev);
  525. return;
  526. }
  527. switch (cp->type) {
  528. case IMX_MU_TYPE_TX:
  529. imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
  530. break;
  531. case IMX_MU_TYPE_RX:
  532. imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
  533. break;
  534. case IMX_MU_TYPE_RXDB:
  535. imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
  536. break;
  537. case IMX_MU_TYPE_RST:
  538. imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
  539. ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
  540. !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
  541. if (ret)
  542. dev_warn(priv->dev, "RST channel timeout\n");
  543. break;
  544. default:
  545. break;
  546. }
  547. free_irq(priv->irq[cp->type], chan);
  548. pm_runtime_put_sync(priv->dev);
  549. }
  550. static const struct mbox_chan_ops imx_mu_ops = {
  551. .send_data = imx_mu_send_data,
  552. .startup = imx_mu_startup,
  553. .shutdown = imx_mu_shutdown,
  554. };
  555. static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
  556. const struct of_phandle_args *sp)
  557. {
  558. u32 type, idx, chan;
  559. if (sp->args_count != 2) {
  560. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  561. return ERR_PTR(-EINVAL);
  562. }
  563. type = sp->args[0]; /* channel type */
  564. idx = sp->args[1]; /* index */
  565. switch (type) {
  566. case IMX_MU_TYPE_TX:
  567. case IMX_MU_TYPE_RX:
  568. if (idx != 0)
  569. dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
  570. chan = type;
  571. break;
  572. case IMX_MU_TYPE_RXDB:
  573. chan = 2 + idx;
  574. break;
  575. default:
  576. dev_err(mbox->dev, "Invalid chan type: %d\n", type);
  577. return ERR_PTR(-EINVAL);
  578. }
  579. if (chan >= mbox->num_chans) {
  580. dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
  581. return ERR_PTR(-EINVAL);
  582. }
  583. return &mbox->chans[chan];
  584. }
  585. static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
  586. const struct of_phandle_args *sp)
  587. {
  588. struct mbox_chan *p_chan;
  589. u32 type, idx, chan;
  590. if (sp->args_count != 2) {
  591. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  592. return ERR_PTR(-EINVAL);
  593. }
  594. type = sp->args[0]; /* channel type */
  595. idx = sp->args[1]; /* index */
  596. /* RST only supports 1 channel */
  597. if ((type == IMX_MU_TYPE_RST) && idx) {
  598. dev_err(mbox->dev, "Invalid RST channel %d\n", idx);
  599. return ERR_PTR(-EINVAL);
  600. }
  601. chan = type * 4 + idx;
  602. if (chan >= mbox->num_chans) {
  603. dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
  604. return ERR_PTR(-EINVAL);
  605. }
  606. p_chan = &mbox->chans[chan];
  607. if (type == IMX_MU_TYPE_TXDB_V2)
  608. p_chan->txdone_method = TXDONE_BY_ACK;
  609. return p_chan;
  610. }
  611. static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
  612. const struct of_phandle_args *sp)
  613. {
  614. u32 type;
  615. if (sp->args_count < 1) {
  616. dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
  617. return ERR_PTR(-EINVAL);
  618. }
  619. type = sp->args[0]; /* channel type */
  620. /* Only supports TXDB and RXDB */
  621. if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
  622. dev_err(mbox->dev, "Invalid type: %d\n", type);
  623. return ERR_PTR(-EINVAL);
  624. }
  625. return imx_mu_xlate(mbox, sp);
  626. }
  627. static void imx_mu_get_tr_rr(struct imx_mu_priv *priv)
  628. {
  629. u32 val;
  630. if (priv->dcfg->type & IMX_MU_V2) {
  631. val = imx_mu_read(priv, IMX_MU_V2_PAR_OFF);
  632. priv->num_tr = FIELD_GET(IMX_MU_V2_TR_MASK, val);
  633. priv->num_rr = FIELD_GET(IMX_MU_V2_RR_MASK, val);
  634. } else {
  635. priv->num_tr = 4;
  636. priv->num_rr = 4;
  637. }
  638. }
  639. static int imx_mu_init_generic(struct imx_mu_priv *priv)
  640. {
  641. unsigned int i;
  642. unsigned int val;
  643. if (priv->num_rr > 4 || priv->num_tr > 4) {
  644. WARN_ONCE(true, "%s not support TR/RR larger than 4\n", __func__);
  645. return -EOPNOTSUPP;
  646. }
  647. for (i = 0; i < IMX_MU_CHANS; i++) {
  648. struct imx_mu_con_priv *cp = &priv->con_priv[i];
  649. cp->idx = i % 4;
  650. cp->type = i >> 2;
  651. cp->chan = &priv->mbox_chans[i];
  652. priv->mbox_chans[i].con_priv = cp;
  653. snprintf(cp->irq_desc, sizeof(cp->irq_desc),
  654. "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
  655. }
  656. priv->mbox.num_chans = IMX_MU_CHANS;
  657. priv->mbox.of_xlate = imx_mu_xlate;
  658. if (priv->side_b)
  659. return 0;
  660. /* Set default MU configuration */
  661. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  662. imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
  663. /* Clear any pending GIP */
  664. val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
  665. imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
  666. /* Clear any pending RSR */
  667. for (i = 0; i < priv->num_rr; i++)
  668. imx_mu_read(priv, priv->dcfg->xRR + i * 4);
  669. return 0;
  670. }
  671. static int imx_mu_init_specific(struct imx_mu_priv *priv)
  672. {
  673. unsigned int i;
  674. int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
  675. for (i = 0; i < num_chans; i++) {
  676. struct imx_mu_con_priv *cp = &priv->con_priv[i];
  677. cp->idx = i < 2 ? 0 : i - 2;
  678. cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
  679. cp->chan = &priv->mbox_chans[i];
  680. priv->mbox_chans[i].con_priv = cp;
  681. snprintf(cp->irq_desc, sizeof(cp->irq_desc),
  682. "%s[%i-%i]", dev_name(priv->dev), cp->type, cp->idx);
  683. }
  684. priv->mbox.num_chans = num_chans;
  685. priv->mbox.of_xlate = imx_mu_specific_xlate;
  686. /* Set default MU configuration */
  687. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  688. imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
  689. return 0;
  690. }
  691. static int imx_mu_init_seco(struct imx_mu_priv *priv)
  692. {
  693. int ret;
  694. ret = imx_mu_init_generic(priv);
  695. if (ret)
  696. return ret;
  697. priv->mbox.of_xlate = imx_mu_seco_xlate;
  698. return 0;
  699. }
  700. static int imx_mu_probe(struct platform_device *pdev)
  701. {
  702. struct device *dev = &pdev->dev;
  703. struct device_node *np = dev->of_node;
  704. struct imx_mu_priv *priv;
  705. const struct imx_mu_dcfg *dcfg;
  706. int i, ret;
  707. u32 size;
  708. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  709. if (!priv)
  710. return -ENOMEM;
  711. priv->dev = dev;
  712. priv->base = devm_platform_ioremap_resource(pdev, 0);
  713. if (IS_ERR(priv->base))
  714. return PTR_ERR(priv->base);
  715. dcfg = of_device_get_match_data(dev);
  716. if (!dcfg)
  717. return -EINVAL;
  718. priv->dcfg = dcfg;
  719. if (priv->dcfg->type & IMX_MU_V2_IRQ) {
  720. priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx");
  721. if (priv->irq[IMX_MU_TYPE_TX] < 0)
  722. return priv->irq[IMX_MU_TYPE_TX];
  723. priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx");
  724. if (priv->irq[IMX_MU_TYPE_RX] < 0)
  725. return priv->irq[IMX_MU_TYPE_RX];
  726. } else {
  727. ret = platform_get_irq(pdev, 0);
  728. if (ret < 0)
  729. return ret;
  730. for (i = 0; i < IMX_MU_CHANS; i++)
  731. priv->irq[i] = ret;
  732. }
  733. if (priv->dcfg->type & IMX_MU_V2_S4)
  734. size = sizeof(struct imx_s4_rpc_msg_max);
  735. else
  736. size = sizeof(struct imx_sc_rpc_msg_max);
  737. priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
  738. if (!priv->msg)
  739. return -ENOMEM;
  740. priv->clk = devm_clk_get(dev, NULL);
  741. if (IS_ERR(priv->clk)) {
  742. if (PTR_ERR(priv->clk) != -ENOENT)
  743. return PTR_ERR(priv->clk);
  744. priv->clk = NULL;
  745. }
  746. ret = clk_prepare_enable(priv->clk);
  747. if (ret) {
  748. dev_err(dev, "Failed to enable clock\n");
  749. return ret;
  750. }
  751. imx_mu_get_tr_rr(priv);
  752. priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
  753. ret = priv->dcfg->init(priv);
  754. if (ret) {
  755. dev_err(dev, "Failed to init MU\n");
  756. goto disable_clk;
  757. }
  758. spin_lock_init(&priv->xcr_lock);
  759. priv->mbox.dev = dev;
  760. priv->mbox.ops = &imx_mu_ops;
  761. priv->mbox.chans = priv->mbox_chans;
  762. priv->mbox.txdone_irq = true;
  763. platform_set_drvdata(pdev, priv);
  764. ret = devm_mbox_controller_register(dev, &priv->mbox);
  765. if (ret)
  766. goto disable_clk;
  767. of_platform_populate(dev->of_node, NULL, NULL, dev);
  768. pm_runtime_enable(dev);
  769. ret = pm_runtime_resume_and_get(dev);
  770. if (ret < 0)
  771. goto disable_runtime_pm;
  772. ret = pm_runtime_put_sync(dev);
  773. if (ret < 0)
  774. goto disable_runtime_pm;
  775. clk_disable_unprepare(priv->clk);
  776. return 0;
  777. disable_runtime_pm:
  778. pm_runtime_disable(dev);
  779. disable_clk:
  780. clk_disable_unprepare(priv->clk);
  781. return ret;
  782. }
  783. static void imx_mu_remove(struct platform_device *pdev)
  784. {
  785. struct imx_mu_priv *priv = platform_get_drvdata(pdev);
  786. pm_runtime_disable(priv->dev);
  787. }
  788. static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
  789. .tx = imx_mu_generic_tx,
  790. .rx = imx_mu_generic_rx,
  791. .rxdb = imx_mu_generic_rxdb,
  792. .init = imx_mu_init_generic,
  793. .xTR = 0x0,
  794. .xRR = 0x10,
  795. .xSR = {0x20, 0x20, 0x20, 0x20},
  796. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  797. };
  798. static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
  799. .tx = imx_mu_generic_tx,
  800. .rx = imx_mu_generic_rx,
  801. .rxdb = imx_mu_generic_rxdb,
  802. .init = imx_mu_init_generic,
  803. .xTR = 0x20,
  804. .xRR = 0x40,
  805. .xSR = {0x60, 0x60, 0x60, 0x60},
  806. .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
  807. };
  808. static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
  809. .tx = imx_mu_generic_tx,
  810. .rx = imx_mu_generic_rx,
  811. .rxdb = imx_mu_generic_rxdb,
  812. .init = imx_mu_init_generic,
  813. .type = IMX_MU_V2,
  814. .xTR = 0x200,
  815. .xRR = 0x280,
  816. .xSR = {0xC, 0x118, 0x124, 0x12C},
  817. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  818. };
  819. static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
  820. .tx = imx_mu_specific_tx,
  821. .rx = imx_mu_specific_rx,
  822. .init = imx_mu_init_specific,
  823. .type = IMX_MU_V2 | IMX_MU_V2_S4,
  824. .xTR = 0x200,
  825. .xRR = 0x280,
  826. .xSR = {0xC, 0x118, 0x124, 0x12C},
  827. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  828. };
  829. static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
  830. .tx = imx_mu_specific_tx,
  831. .rx = imx_mu_specific_rx,
  832. .init = imx_mu_init_specific,
  833. .type = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ,
  834. .xTR = 0x200,
  835. .xRR = 0x280,
  836. .xSR = {0xC, 0x118, 0x124, 0x12C},
  837. .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
  838. };
  839. static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
  840. .tx = imx_mu_specific_tx,
  841. .rx = imx_mu_specific_rx,
  842. .init = imx_mu_init_specific,
  843. .rxdb = imx_mu_generic_rxdb,
  844. .xTR = 0x0,
  845. .xRR = 0x10,
  846. .xSR = {0x20, 0x20, 0x20, 0x20},
  847. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  848. };
  849. static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
  850. .tx = imx_mu_seco_tx,
  851. .rx = imx_mu_generic_rx,
  852. .rxdb = imx_mu_seco_rxdb,
  853. .init = imx_mu_init_seco,
  854. .xTR = 0x0,
  855. .xRR = 0x10,
  856. .xSR = {0x20, 0x20, 0x20, 0x20},
  857. .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
  858. };
  859. static const struct of_device_id imx_mu_dt_ids[] = {
  860. { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
  861. { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
  862. { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
  863. { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
  864. { .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
  865. { .compatible = "fsl,imx95-mu", .data = &imx_mu_cfg_imx8ulp },
  866. { .compatible = "fsl,imx95-mu-ele", .data = &imx_mu_cfg_imx8ulp_s4 },
  867. { .compatible = "fsl,imx95-mu-v2x", .data = &imx_mu_cfg_imx8ulp_s4 },
  868. { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
  869. { .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
  870. { },
  871. };
  872. MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
  873. static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
  874. {
  875. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  876. int i;
  877. if (!priv->clk) {
  878. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  879. priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
  880. }
  881. priv->suspend = true;
  882. return 0;
  883. }
  884. static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
  885. {
  886. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  887. int i;
  888. /*
  889. * ONLY restore MU when context lost, the TIE could
  890. * be set during noirq resume as there is MU data
  891. * communication going on, and restore the saved
  892. * value will overwrite the TIE and cause MU data
  893. * send failed, may lead to system freeze. This issue
  894. * is observed by testing freeze mode suspend.
  895. */
  896. if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
  897. for (i = 0; i < IMX_MU_xCR_MAX; i++)
  898. imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
  899. }
  900. priv->suspend = false;
  901. return 0;
  902. }
  903. static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
  904. {
  905. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  906. clk_disable_unprepare(priv->clk);
  907. return 0;
  908. }
  909. static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
  910. {
  911. struct imx_mu_priv *priv = dev_get_drvdata(dev);
  912. int ret;
  913. ret = clk_prepare_enable(priv->clk);
  914. if (ret)
  915. dev_err(dev, "failed to enable clock\n");
  916. return ret;
  917. }
  918. static const struct dev_pm_ops imx_mu_pm_ops = {
  919. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
  920. imx_mu_resume_noirq)
  921. SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
  922. imx_mu_runtime_resume, NULL)
  923. };
  924. static struct platform_driver imx_mu_driver = {
  925. .probe = imx_mu_probe,
  926. .remove_new = imx_mu_remove,
  927. .driver = {
  928. .name = "imx_mu",
  929. .of_match_table = imx_mu_dt_ids,
  930. .pm = &imx_mu_pm_ops,
  931. },
  932. };
  933. module_platform_driver(imx_mu_driver);
  934. MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
  935. MODULE_DESCRIPTION("Message Unit driver for i.MX");
  936. MODULE_LICENSE("GPL v2");