arm_mhuv2.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ARM Message Handling Unit Version 2 (MHUv2) driver.
  4. *
  5. * Copyright (C) 2020 ARM Ltd.
  6. * Copyright (C) 2020 Linaro Ltd.
  7. *
  8. * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32
  9. * bit long) and the driver allows any combination of both the transport
  10. * protocol modes: data-transfer and doorbell, to be used on those channel
  11. * windows.
  12. *
  13. * The transport protocols should be specified in the device tree entry for the
  14. * device. The transport protocols determine how the underlying hardware
  15. * resources of the device are utilized when transmitting data. Refer to the
  16. * device tree bindings of the ARM MHUv2 controller for more details.
  17. *
  18. * The number of registered mailbox channels is dependent on both the underlying
  19. * hardware - mainly the number of channel windows implemented by the platform,
  20. * as well as the selected transport protocols.
  21. *
  22. * The MHUv2 controller can work both as a sender and receiver, but the driver
  23. * and the DT bindings support unidirectional transfers for better allocation of
  24. * the channels. That is, this driver will be probed for two separate devices
  25. * for each mailbox controller, a sender device and a receiver device.
  26. */
  27. #include <linux/amba/bus.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/mailbox_controller.h>
  30. #include <linux/mailbox/arm_mhuv2_message.h>
  31. #include <linux/module.h>
  32. #include <linux/of_address.h>
  33. #include <linux/spinlock.h>
  34. /* ====== MHUv2 Registers ====== */
  35. /* Maximum number of channel windows */
  36. #define MHUV2_CH_WN_MAX 124
  37. /* Number of combined interrupt status registers */
  38. #define MHUV2_CMB_INT_ST_REG_CNT 4
  39. #define MHUV2_STAT_BYTES (sizeof(u32))
  40. #define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__)
  41. #define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1)
  42. #define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols"
  43. /* Register Message Handling Unit Configuration fields */
  44. struct mhu_cfg_t {
  45. u32 num_ch : 7;
  46. u32 pad : 25;
  47. } __packed;
  48. /* register Interrupt Status fields */
  49. struct int_st_t {
  50. u32 nr2r : 1;
  51. u32 r2nr : 1;
  52. u32 pad : 30;
  53. } __packed;
  54. /* Register Interrupt Clear fields */
  55. struct int_clr_t {
  56. u32 nr2r : 1;
  57. u32 r2nr : 1;
  58. u32 pad : 30;
  59. } __packed;
  60. /* Register Interrupt Enable fields */
  61. struct int_en_t {
  62. u32 r2nr : 1;
  63. u32 nr2r : 1;
  64. u32 chcomb : 1;
  65. u32 pad : 29;
  66. } __packed;
  67. /* Register Implementer Identification fields */
  68. struct iidr_t {
  69. u32 implementer : 12;
  70. u32 revision : 4;
  71. u32 variant : 4;
  72. u32 product_id : 12;
  73. } __packed;
  74. /* Register Architecture Identification Register fields */
  75. struct aidr_t {
  76. u32 arch_minor_rev : 4;
  77. u32 arch_major_rev : 4;
  78. u32 pad : 24;
  79. } __packed;
  80. /* Sender Channel Window fields */
  81. struct mhu2_send_ch_wn_reg {
  82. u32 stat;
  83. u8 pad1[0x0C - 0x04];
  84. u32 stat_set;
  85. u32 int_st;
  86. u32 int_clr;
  87. u32 int_en;
  88. u8 pad2[0x20 - 0x1C];
  89. } __packed;
  90. /* Sender frame register fields */
  91. struct mhu2_send_frame_reg {
  92. struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
  93. struct mhu_cfg_t mhu_cfg;
  94. u32 resp_cfg;
  95. u32 access_request;
  96. u32 access_ready;
  97. struct int_st_t int_st;
  98. struct int_clr_t int_clr;
  99. struct int_en_t int_en;
  100. u32 reserved0;
  101. u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
  102. u8 pad[0xFC8 - 0xFB0];
  103. struct iidr_t iidr;
  104. struct aidr_t aidr;
  105. } __packed;
  106. /* Receiver Channel Window fields */
  107. struct mhu2_recv_ch_wn_reg {
  108. u32 stat;
  109. u32 stat_masked;
  110. u32 stat_clear;
  111. u8 reserved0[0x10 - 0x0C];
  112. u32 mask;
  113. u32 mask_set;
  114. u32 mask_clear;
  115. u8 pad[0x20 - 0x1C];
  116. } __packed;
  117. /* Receiver frame register fields */
  118. struct mhu2_recv_frame_reg {
  119. struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
  120. struct mhu_cfg_t mhu_cfg;
  121. u8 reserved0[0xF90 - 0xF84];
  122. struct int_st_t int_st;
  123. struct int_clr_t int_clr;
  124. struct int_en_t int_en;
  125. u32 pad;
  126. u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
  127. u8 reserved2[0xFC8 - 0xFB0];
  128. struct iidr_t iidr;
  129. struct aidr_t aidr;
  130. } __packed;
  131. /* ====== MHUv2 data structures ====== */
  132. enum mhuv2_transport_protocol {
  133. DOORBELL = 0,
  134. DATA_TRANSFER = 1
  135. };
  136. enum mhuv2_frame {
  137. RECEIVER_FRAME,
  138. SENDER_FRAME
  139. };
  140. /**
  141. * struct mhuv2 - MHUv2 mailbox controller data
  142. *
  143. * @mbox: Mailbox controller belonging to the MHU frame.
  144. * @send: Base address of the register mapping region.
  145. * @recv: Base address of the register mapping region.
  146. * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME.
  147. * @irq: Interrupt.
  148. * @windows: Channel windows implemented by the platform.
  149. * @minor: Minor version of the controller.
  150. * @length: Length of the protocols array in bytes.
  151. * @protocols: Raw protocol information, derived from device tree.
  152. * @doorbell_pending_lock: spinlock required for correct operation of Tx
  153. * interrupt for doorbells.
  154. */
  155. struct mhuv2 {
  156. struct mbox_controller mbox;
  157. union {
  158. struct mhu2_send_frame_reg __iomem *send;
  159. struct mhu2_recv_frame_reg __iomem *recv;
  160. };
  161. enum mhuv2_frame frame;
  162. unsigned int irq;
  163. unsigned int windows;
  164. unsigned int minor;
  165. unsigned int length;
  166. u32 *protocols;
  167. spinlock_t doorbell_pending_lock;
  168. };
  169. #define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox)
  170. /**
  171. * struct mhuv2_protocol_ops - MHUv2 operations
  172. *
  173. * Each transport protocol must provide an implementation of the operations
  174. * provided here.
  175. *
  176. * @rx_startup: Startup callback for receiver.
  177. * @rx_shutdown: Shutdown callback for receiver.
  178. * @read_data: Reads and clears newly available data.
  179. * @tx_startup: Startup callback for receiver.
  180. * @tx_shutdown: Shutdown callback for receiver.
  181. * @last_tx_done: Report back if the last tx is completed or not.
  182. * @send_data: Send data to the receiver.
  183. */
  184. struct mhuv2_protocol_ops {
  185. int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
  186. void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
  187. void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan);
  188. void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
  189. void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
  190. int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan);
  191. int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg);
  192. };
  193. /*
  194. * MHUv2 mailbox channel's private information
  195. *
  196. * @ops: protocol specific ops for the channel.
  197. * @ch_wn_idx: Channel window index allocated to the channel.
  198. * @windows: Total number of windows consumed by the channel, only relevant
  199. * in DATA_TRANSFER protocol.
  200. * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant
  201. * in DOORBELL protocol.
  202. * @pending: Flag indicating pending doorbell interrupt, only relevant in
  203. * DOORBELL protocol.
  204. */
  205. struct mhuv2_mbox_chan_priv {
  206. const struct mhuv2_protocol_ops *ops;
  207. u32 ch_wn_idx;
  208. union {
  209. u32 windows;
  210. struct {
  211. u32 doorbell;
  212. u32 pending;
  213. };
  214. };
  215. };
  216. /* Macro for reading a bitfield within a physically mapped packed struct */
  217. #define readl_relaxed_bitfield(_regptr, _type, _field) \
  218. ({ \
  219. u32 _regval; \
  220. _regval = readl_relaxed((_regptr)); \
  221. (*(_type *)(&_regval))._field; \
  222. })
  223. /* Macro for writing a bitfield within a physically mapped packed struct */
  224. #define writel_relaxed_bitfield(_value, _regptr, _type, _field) \
  225. ({ \
  226. u32 _regval; \
  227. _regval = readl_relaxed(_regptr); \
  228. (*(_type *)(&_regval))._field = _value; \
  229. writel_relaxed(_regval, _regptr); \
  230. })
  231. /* =================== Doorbell transport protocol operations =============== */
  232. static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan)
  233. {
  234. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  235. writel_relaxed(BIT(priv->doorbell),
  236. &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear);
  237. return 0;
  238. }
  239. static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu,
  240. struct mbox_chan *chan)
  241. {
  242. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  243. writel_relaxed(BIT(priv->doorbell),
  244. &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set);
  245. }
  246. static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan)
  247. {
  248. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  249. writel_relaxed(BIT(priv->doorbell),
  250. &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear);
  251. return NULL;
  252. }
  253. static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu,
  254. struct mbox_chan *chan)
  255. {
  256. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  257. return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) &
  258. BIT(priv->doorbell));
  259. }
  260. static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan,
  261. void *arg)
  262. {
  263. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  264. unsigned long flags;
  265. spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
  266. priv->pending = 1;
  267. writel_relaxed(BIT(priv->doorbell),
  268. &mhu->send->ch_wn[priv->ch_wn_idx].stat_set);
  269. spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
  270. return 0;
  271. }
  272. static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = {
  273. .rx_startup = mhuv2_doorbell_rx_startup,
  274. .rx_shutdown = mhuv2_doorbell_rx_shutdown,
  275. .read_data = mhuv2_doorbell_read_data,
  276. .last_tx_done = mhuv2_doorbell_last_tx_done,
  277. .send_data = mhuv2_doorbell_send_data,
  278. };
  279. #define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops)
  280. /* ============= Data transfer transport protocol operations ================ */
  281. static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu,
  282. struct mbox_chan *chan)
  283. {
  284. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  285. int i = priv->ch_wn_idx + priv->windows - 1;
  286. /*
  287. * The protocol mandates that all but the last status register must be
  288. * masked.
  289. */
  290. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear);
  291. return 0;
  292. }
  293. static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu,
  294. struct mbox_chan *chan)
  295. {
  296. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  297. int i = priv->ch_wn_idx + priv->windows - 1;
  298. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
  299. }
  300. static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu,
  301. struct mbox_chan *chan)
  302. {
  303. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  304. const int windows = priv->windows;
  305. struct arm_mhuv2_mbox_msg *msg;
  306. u32 *data;
  307. int i, idx;
  308. msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL);
  309. if (!msg)
  310. return ERR_PTR(-ENOMEM);
  311. data = msg->data = msg + 1;
  312. msg->len = windows * MHUV2_STAT_BYTES;
  313. /*
  314. * Messages are expected in order of most significant word to least
  315. * significant word. Refer mhuv2_data_transfer_send_data() for more
  316. * details.
  317. *
  318. * We also need to read the stat register instead of stat_masked, as we
  319. * masked all but the last window.
  320. *
  321. * Last channel window must be cleared as the final operation. Upon
  322. * clearing the last channel window register, which is unmasked in
  323. * data-transfer protocol, the interrupt is de-asserted.
  324. */
  325. for (i = 0; i < windows; i++) {
  326. idx = priv->ch_wn_idx + i;
  327. data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat);
  328. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear);
  329. }
  330. return msg;
  331. }
  332. static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu,
  333. struct mbox_chan *chan)
  334. {
  335. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  336. int i = priv->ch_wn_idx + priv->windows - 1;
  337. /* Enable interrupts only for the last window */
  338. if (mhu->minor) {
  339. writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr);
  340. writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en);
  341. }
  342. }
  343. static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu,
  344. struct mbox_chan *chan)
  345. {
  346. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  347. int i = priv->ch_wn_idx + priv->windows - 1;
  348. if (mhu->minor)
  349. writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
  350. }
  351. static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu,
  352. struct mbox_chan *chan)
  353. {
  354. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  355. int i = priv->ch_wn_idx + priv->windows - 1;
  356. /* Just checking the last channel window should be enough */
  357. return !readl_relaxed(&mhu->send->ch_wn[i].stat);
  358. }
  359. /*
  360. * Message will be transmitted from most significant to least significant word.
  361. * This is to allow for messages shorter than channel windows to still trigger
  362. * the receiver interrupt which gets activated when the last stat register is
  363. * written. As an example, a 6-word message is to be written on a 4-channel MHU
  364. * connection: Registers marked with '*' are masked, and will not generate an
  365. * interrupt on the receiver side once written.
  366. *
  367. * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004],
  368. * [0x00000005], [0x00000006]
  369. *
  370. * ROUND 1:
  371. * stat reg To write Write sequence
  372. * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver
  373. * [ stat 2 ] <- [0x00000002] 3
  374. * [ stat 1 ] <- [0x00000003] 2
  375. * [ stat 0 ] <- [0x00000004] 1
  376. *
  377. * data += 4 // Increment data pointer by number of stat regs
  378. *
  379. * ROUND 2:
  380. * stat reg To write Write sequence
  381. * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver
  382. * [ stat 2 ] <- [0x00000006] 1
  383. * [ stat 1 ] <- [0x00000000]
  384. * [ stat 0 ] <- [0x00000000]
  385. */
  386. static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu,
  387. struct mbox_chan *chan, void *arg)
  388. {
  389. const struct arm_mhuv2_mbox_msg *msg = arg;
  390. int bytes_left = msg->len, bytes_to_send, bytes_in_round, i;
  391. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  392. int windows = priv->windows;
  393. u32 *data = msg->data, word;
  394. while (bytes_left) {
  395. if (!data[0]) {
  396. dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver");
  397. return -EINVAL;
  398. }
  399. while(!mhuv2_data_transfer_last_tx_done(mhu, chan))
  400. continue;
  401. bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES));
  402. for (i = windows - 1; i >= 0; i--) {
  403. /* Data less than windows can transfer ? */
  404. if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES))
  405. continue;
  406. word = data[i];
  407. bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1);
  408. if (unlikely(bytes_to_send))
  409. word &= LSB_MASK(bytes_to_send);
  410. else
  411. bytes_to_send = MHUV2_STAT_BYTES;
  412. writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set);
  413. bytes_left -= bytes_to_send;
  414. bytes_in_round -= bytes_to_send;
  415. }
  416. data += windows;
  417. }
  418. return 0;
  419. }
  420. static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
  421. .rx_startup = mhuv2_data_transfer_rx_startup,
  422. .rx_shutdown = mhuv2_data_transfer_rx_shutdown,
  423. .read_data = mhuv2_data_transfer_read_data,
  424. .tx_startup = mhuv2_data_transfer_tx_startup,
  425. .tx_shutdown = mhuv2_data_transfer_tx_shutdown,
  426. .last_tx_done = mhuv2_data_transfer_last_tx_done,
  427. .send_data = mhuv2_data_transfer_send_data,
  428. };
  429. /* Interrupt handlers */
  430. static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg)
  431. {
  432. struct mbox_chan *chans = mhu->mbox.chans;
  433. int channel = 0, i, j, offset = 0, windows, protocol, ch_wn;
  434. u32 stat;
  435. for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
  436. stat = readl_relaxed(reg + i);
  437. if (!stat)
  438. continue;
  439. ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
  440. for (j = 0; j < mhu->length; j += 2) {
  441. protocol = mhu->protocols[j];
  442. windows = mhu->protocols[j + 1];
  443. if (ch_wn >= offset + windows) {
  444. if (protocol == DOORBELL)
  445. channel += MHUV2_STAT_BITS * windows;
  446. else
  447. channel++;
  448. offset += windows;
  449. continue;
  450. }
  451. /* Return first chan of the window in doorbell mode */
  452. if (protocol == DOORBELL)
  453. channel += MHUV2_STAT_BITS * (ch_wn - offset);
  454. return &chans[channel];
  455. }
  456. }
  457. return ERR_PTR(-EIO);
  458. }
  459. static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
  460. {
  461. struct mhuv2 *mhu = data;
  462. struct device *dev = mhu->mbox.dev;
  463. struct mhuv2_mbox_chan_priv *priv;
  464. struct mbox_chan *chan;
  465. unsigned long flags;
  466. int i, found = 0;
  467. u32 stat;
  468. chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st);
  469. if (IS_ERR(chan)) {
  470. dev_warn(dev, "Failed to find channel for the Tx interrupt\n");
  471. return IRQ_NONE;
  472. }
  473. priv = chan->con_priv;
  474. if (!IS_PROTOCOL_DOORBELL(priv)) {
  475. for (i = 0; i < priv->windows; i++)
  476. writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + i].int_clr);
  477. if (chan->cl) {
  478. mbox_chan_txdone(chan, 0);
  479. return IRQ_HANDLED;
  480. }
  481. dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n",
  482. priv->ch_wn_idx);
  483. return IRQ_NONE;
  484. }
  485. /* Clear the interrupt first, so we don't miss any doorbell later */
  486. writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr);
  487. /*
  488. * In Doorbell mode, make sure no new transitions happen while the
  489. * interrupt handler is trying to find the finished doorbell tx
  490. * operations, else we may think few of the transfers were complete
  491. * before they actually were.
  492. */
  493. spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
  494. /*
  495. * In case of doorbell mode, the first channel of the window is returned
  496. * by get_irq_chan_comb(). Find all the pending channels here.
  497. */
  498. stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat);
  499. for (i = 0; i < MHUV2_STAT_BITS; i++) {
  500. priv = chan[i].con_priv;
  501. /* Find cases where pending was 1, but stat's bit is cleared */
  502. if (priv->pending ^ ((stat >> i) & 0x1)) {
  503. BUG_ON(!priv->pending);
  504. if (!chan->cl) {
  505. dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n",
  506. priv->ch_wn_idx, i);
  507. continue;
  508. }
  509. mbox_chan_txdone(&chan[i], 0);
  510. priv->pending = 0;
  511. found++;
  512. }
  513. }
  514. spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
  515. if (!found) {
  516. /*
  517. * We may have already processed the doorbell in the previous
  518. * iteration if the interrupt came right after we cleared it but
  519. * before we read the stat register.
  520. */
  521. dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n",
  522. priv->ch_wn_idx);
  523. return IRQ_NONE;
  524. }
  525. return IRQ_HANDLED;
  526. }
  527. static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu)
  528. {
  529. struct mhuv2_mbox_chan_priv *priv;
  530. struct mbox_chan *chan;
  531. u32 stat;
  532. chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st);
  533. if (IS_ERR(chan))
  534. return chan;
  535. priv = chan->con_priv;
  536. if (!IS_PROTOCOL_DOORBELL(priv))
  537. return chan;
  538. /*
  539. * In case of doorbell mode, the first channel of the window is returned
  540. * by the routine. Find the exact channel here.
  541. */
  542. stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
  543. BUG_ON(!stat);
  544. return chan + __builtin_ctz(stat);
  545. }
  546. static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu)
  547. {
  548. struct mbox_chan *chans = mhu->mbox.chans;
  549. struct mhuv2_mbox_chan_priv *priv;
  550. u32 stat;
  551. int i = 0;
  552. while (i < mhu->mbox.num_chans) {
  553. priv = chans[i].con_priv;
  554. stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
  555. if (stat) {
  556. if (IS_PROTOCOL_DOORBELL(priv))
  557. i += __builtin_ctz(stat);
  558. return &chans[i];
  559. }
  560. i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1;
  561. }
  562. return ERR_PTR(-EIO);
  563. }
  564. static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu)
  565. {
  566. if (!mhu->minor)
  567. return get_irq_chan_stat_rx(mhu);
  568. return get_irq_chan_comb_rx(mhu);
  569. }
  570. static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
  571. {
  572. struct mhuv2 *mhu = arg;
  573. struct mbox_chan *chan = get_irq_chan_rx(mhu);
  574. struct device *dev = mhu->mbox.dev;
  575. struct mhuv2_mbox_chan_priv *priv;
  576. int ret = IRQ_NONE;
  577. void *data;
  578. if (IS_ERR(chan)) {
  579. dev_warn(dev, "Failed to find channel for the rx interrupt\n");
  580. return IRQ_NONE;
  581. }
  582. priv = chan->con_priv;
  583. /* Read and clear the data first */
  584. data = priv->ops->read_data(mhu, chan);
  585. if (!chan->cl) {
  586. dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n",
  587. priv->ch_wn_idx);
  588. } else if (IS_ERR(data)) {
  589. dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data));
  590. } else {
  591. mbox_chan_received_data(chan, data);
  592. ret = IRQ_HANDLED;
  593. }
  594. if (!IS_ERR(data))
  595. kfree(data);
  596. return ret;
  597. }
  598. /* Sender and receiver ops */
  599. static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan)
  600. {
  601. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  602. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  603. return priv->ops->last_tx_done(mhu, chan);
  604. }
  605. static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data)
  606. {
  607. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  608. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  609. if (!priv->ops->last_tx_done(mhu, chan))
  610. return -EBUSY;
  611. return priv->ops->send_data(mhu, chan, data);
  612. }
  613. static int mhuv2_sender_startup(struct mbox_chan *chan)
  614. {
  615. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  616. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  617. if (priv->ops->tx_startup)
  618. priv->ops->tx_startup(mhu, chan);
  619. return 0;
  620. }
  621. static void mhuv2_sender_shutdown(struct mbox_chan *chan)
  622. {
  623. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  624. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  625. if (priv->ops->tx_shutdown)
  626. priv->ops->tx_shutdown(mhu, chan);
  627. }
  628. static const struct mbox_chan_ops mhuv2_sender_ops = {
  629. .send_data = mhuv2_sender_send_data,
  630. .startup = mhuv2_sender_startup,
  631. .shutdown = mhuv2_sender_shutdown,
  632. .last_tx_done = mhuv2_sender_last_tx_done,
  633. };
  634. static int mhuv2_receiver_startup(struct mbox_chan *chan)
  635. {
  636. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  637. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  638. return priv->ops->rx_startup(mhu, chan);
  639. }
  640. static void mhuv2_receiver_shutdown(struct mbox_chan *chan)
  641. {
  642. struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
  643. struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
  644. priv->ops->rx_shutdown(mhu, chan);
  645. }
  646. static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data)
  647. {
  648. dev_err(chan->mbox->dev,
  649. "Trying to transmit on a receiver MHU frame\n");
  650. return -EIO;
  651. }
  652. static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan)
  653. {
  654. dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n");
  655. return true;
  656. }
  657. static const struct mbox_chan_ops mhuv2_receiver_ops = {
  658. .send_data = mhuv2_receiver_send_data,
  659. .startup = mhuv2_receiver_startup,
  660. .shutdown = mhuv2_receiver_shutdown,
  661. .last_tx_done = mhuv2_receiver_last_tx_done,
  662. };
  663. static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox,
  664. const struct of_phandle_args *pa)
  665. {
  666. struct mhuv2 *mhu = mhu_from_mbox(mbox);
  667. struct mbox_chan *chans = mbox->chans;
  668. int channel = 0, i, offset, doorbell, protocol, windows;
  669. if (pa->args_count != 2)
  670. return ERR_PTR(-EINVAL);
  671. offset = pa->args[0];
  672. doorbell = pa->args[1];
  673. if (doorbell >= MHUV2_STAT_BITS)
  674. goto out;
  675. for (i = 0; i < mhu->length; i += 2) {
  676. protocol = mhu->protocols[i];
  677. windows = mhu->protocols[i + 1];
  678. if (protocol == DOORBELL) {
  679. if (offset < windows)
  680. return &chans[channel + MHUV2_STAT_BITS * offset + doorbell];
  681. channel += MHUV2_STAT_BITS * windows;
  682. offset -= windows;
  683. } else {
  684. if (offset == 0) {
  685. if (doorbell)
  686. goto out;
  687. return &chans[channel];
  688. }
  689. channel++;
  690. offset--;
  691. }
  692. }
  693. out:
  694. dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
  695. pa->args[0], doorbell);
  696. return ERR_PTR(-ENODEV);
  697. }
  698. static int mhuv2_verify_protocol(struct mhuv2 *mhu)
  699. {
  700. struct device *dev = mhu->mbox.dev;
  701. int protocol, windows, channels = 0, total_windows = 0, i;
  702. for (i = 0; i < mhu->length; i += 2) {
  703. protocol = mhu->protocols[i];
  704. windows = mhu->protocols[i + 1];
  705. if (!windows) {
  706. dev_err(dev, "Window size can't be zero (%d)\n", i);
  707. return -EINVAL;
  708. }
  709. total_windows += windows;
  710. if (protocol == DOORBELL) {
  711. channels += MHUV2_STAT_BITS * windows;
  712. } else if (protocol == DATA_TRANSFER) {
  713. channels++;
  714. } else {
  715. dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n",
  716. protocol, MHUV2_PROTOCOL_PROP, i);
  717. return -EINVAL;
  718. }
  719. }
  720. if (total_windows > mhu->windows) {
  721. dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n",
  722. total_windows, mhu->windows);
  723. return -EINVAL;
  724. }
  725. mhu->mbox.num_chans = channels;
  726. return 0;
  727. }
  728. static int mhuv2_allocate_channels(struct mhuv2 *mhu)
  729. {
  730. struct mbox_controller *mbox = &mhu->mbox;
  731. struct mhuv2_mbox_chan_priv *priv;
  732. struct device *dev = mbox->dev;
  733. struct mbox_chan *chans;
  734. int protocol, windows = 0, next_window = 0, i, j, k;
  735. chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL);
  736. if (!chans)
  737. return -ENOMEM;
  738. mbox->chans = chans;
  739. for (i = 0; i < mhu->length; i += 2) {
  740. next_window += windows;
  741. protocol = mhu->protocols[i];
  742. windows = mhu->protocols[i + 1];
  743. if (protocol == DATA_TRANSFER) {
  744. priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
  745. if (!priv)
  746. return -ENOMEM;
  747. priv->ch_wn_idx = next_window;
  748. priv->ops = &mhuv2_data_transfer_ops;
  749. priv->windows = windows;
  750. chans++->con_priv = priv;
  751. continue;
  752. }
  753. for (j = 0; j < windows; j++) {
  754. for (k = 0; k < MHUV2_STAT_BITS; k++) {
  755. priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
  756. if (!priv)
  757. return -ENOMEM;
  758. priv->ch_wn_idx = next_window + j;
  759. priv->ops = &mhuv2_doorbell_ops;
  760. priv->doorbell = k;
  761. chans++->con_priv = priv;
  762. }
  763. /*
  764. * Permanently enable interrupt as we can't
  765. * control it per doorbell.
  766. */
  767. if (mhu->frame == SENDER_FRAME && mhu->minor)
  768. writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en);
  769. }
  770. }
  771. /* Make sure we have initialized all channels */
  772. BUG_ON(chans - mbox->chans != mbox->num_chans);
  773. return 0;
  774. }
  775. static int mhuv2_parse_channels(struct mhuv2 *mhu)
  776. {
  777. struct device *dev = mhu->mbox.dev;
  778. const struct device_node *np = dev->of_node;
  779. int ret, count;
  780. u32 *protocols;
  781. count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP);
  782. if (count <= 0 || count % 2) {
  783. dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP,
  784. count);
  785. return -EINVAL;
  786. }
  787. protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL);
  788. if (!protocols)
  789. return -ENOMEM;
  790. ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count);
  791. if (ret) {
  792. dev_err(dev, "Failed to read %s property: %d\n",
  793. MHUV2_PROTOCOL_PROP, ret);
  794. return ret;
  795. }
  796. mhu->protocols = protocols;
  797. mhu->length = count;
  798. ret = mhuv2_verify_protocol(mhu);
  799. if (ret)
  800. return ret;
  801. return mhuv2_allocate_channels(mhu);
  802. }
  803. static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
  804. void __iomem *reg)
  805. {
  806. struct device *dev = mhu->mbox.dev;
  807. int ret, i;
  808. mhu->frame = SENDER_FRAME;
  809. mhu->mbox.ops = &mhuv2_sender_ops;
  810. mhu->send = reg;
  811. mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, struct mhu_cfg_t, num_ch);
  812. mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, struct aidr_t, arch_minor_rev);
  813. spin_lock_init(&mhu->doorbell_pending_lock);
  814. /*
  815. * For minor version 1 and forward, tx interrupt is provided by
  816. * the controller.
  817. */
  818. if (mhu->minor && adev->irq[0]) {
  819. ret = devm_request_threaded_irq(dev, adev->irq[0], NULL,
  820. mhuv2_sender_interrupt,
  821. IRQF_ONESHOT, "mhuv2-tx", mhu);
  822. if (ret) {
  823. dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n",
  824. ret);
  825. } else {
  826. mhu->mbox.txdone_irq = true;
  827. mhu->mbox.txdone_poll = false;
  828. mhu->irq = adev->irq[0];
  829. writel_relaxed_bitfield(1, &mhu->send->int_en, struct int_en_t, chcomb);
  830. /* Disable all channel interrupts */
  831. for (i = 0; i < mhu->windows; i++)
  832. writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
  833. goto out;
  834. }
  835. }
  836. mhu->mbox.txdone_irq = false;
  837. mhu->mbox.txdone_poll = true;
  838. mhu->mbox.txpoll_period = 1;
  839. out:
  840. /* Wait for receiver to be ready */
  841. writel_relaxed(0x1, &mhu->send->access_request);
  842. while (!readl_relaxed(&mhu->send->access_ready))
  843. continue;
  844. return 0;
  845. }
  846. static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
  847. void __iomem *reg)
  848. {
  849. struct device *dev = mhu->mbox.dev;
  850. int ret, i;
  851. mhu->frame = RECEIVER_FRAME;
  852. mhu->mbox.ops = &mhuv2_receiver_ops;
  853. mhu->recv = reg;
  854. mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, struct mhu_cfg_t, num_ch);
  855. mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, struct aidr_t, arch_minor_rev);
  856. mhu->irq = adev->irq[0];
  857. if (!mhu->irq) {
  858. dev_err(dev, "Missing receiver IRQ\n");
  859. return -EINVAL;
  860. }
  861. ret = devm_request_threaded_irq(dev, mhu->irq, NULL,
  862. mhuv2_receiver_interrupt, IRQF_ONESHOT,
  863. "mhuv2-rx", mhu);
  864. if (ret) {
  865. dev_err(dev, "Failed to request rx IRQ\n");
  866. return ret;
  867. }
  868. /* Mask all the channel windows */
  869. for (i = 0; i < mhu->windows; i++)
  870. writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
  871. if (mhu->minor)
  872. writel_relaxed_bitfield(1, &mhu->recv->int_en, struct int_en_t, chcomb);
  873. return 0;
  874. }
  875. static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
  876. {
  877. struct device *dev = &adev->dev;
  878. const struct device_node *np = dev->of_node;
  879. struct mhuv2 *mhu;
  880. void __iomem *reg;
  881. int ret = -EINVAL;
  882. reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
  883. if (IS_ERR(reg))
  884. return PTR_ERR(reg);
  885. mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
  886. if (!mhu)
  887. return -ENOMEM;
  888. mhu->mbox.dev = dev;
  889. mhu->mbox.of_xlate = mhuv2_mbox_of_xlate;
  890. if (of_device_is_compatible(np, "arm,mhuv2-tx"))
  891. ret = mhuv2_tx_init(adev, mhu, reg);
  892. else if (of_device_is_compatible(np, "arm,mhuv2-rx"))
  893. ret = mhuv2_rx_init(adev, mhu, reg);
  894. else
  895. dev_err(dev, "Invalid compatible property\n");
  896. if (ret)
  897. return ret;
  898. /* Channel windows can't be 0 */
  899. BUG_ON(!mhu->windows);
  900. ret = mhuv2_parse_channels(mhu);
  901. if (ret)
  902. return ret;
  903. amba_set_drvdata(adev, mhu);
  904. ret = devm_mbox_controller_register(dev, &mhu->mbox);
  905. if (ret)
  906. dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret);
  907. return ret;
  908. }
  909. static void mhuv2_remove(struct amba_device *adev)
  910. {
  911. struct mhuv2 *mhu = amba_get_drvdata(adev);
  912. if (mhu->frame == SENDER_FRAME)
  913. writel_relaxed(0x0, &mhu->send->access_request);
  914. }
  915. static struct amba_id mhuv2_ids[] = {
  916. {
  917. /* 2.0 */
  918. .id = 0xbb0d1,
  919. .mask = 0xfffff,
  920. },
  921. {
  922. /* 2.1 */
  923. .id = 0xbb076,
  924. .mask = 0xfffff,
  925. },
  926. { 0, 0 },
  927. };
  928. MODULE_DEVICE_TABLE(amba, mhuv2_ids);
  929. static struct amba_driver mhuv2_driver = {
  930. .drv = {
  931. .name = "arm-mhuv2",
  932. },
  933. .id_table = mhuv2_ids,
  934. .probe = mhuv2_probe,
  935. .remove = mhuv2_remove,
  936. };
  937. module_amba_driver(mhuv2_driver);
  938. MODULE_LICENSE("GPL v2");
  939. MODULE_DESCRIPTION("ARM MHUv2 Driver");
  940. MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
  941. MODULE_AUTHOR("Tushar Khandelwal <tushar.khandelwal@arm.com>");