fsl_rmu.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119
  1. /*
  2. * Freescale MPC85xx/MPC86xx RapidIO RMU support
  3. *
  4. * Copyright 2009 Sysgo AG
  5. * Thomas Moll <thomas.moll@sysgo.com>
  6. * - fixed maintenance access routines, check for aligned access
  7. *
  8. * Copyright 2009 Integrated Device Technology, Inc.
  9. * Alex Bounine <alexandre.bounine@idt.com>
  10. * - Added Port-Write message handling
  11. * - Added Machine Check exception handling
  12. *
  13. * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
  14. * Zhang Wei <wei.zhang@freescale.com>
  15. * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
  16. * Liu Gang <Gang.Liu@freescale.com>
  17. *
  18. * Copyright 2005 MontaVista Software, Inc.
  19. * Matt Porter <mporter@kernel.crashing.org>
  20. *
  21. * This program is free software; you can redistribute it and/or modify it
  22. * under the terms of the GNU General Public License as published by the
  23. * Free Software Foundation; either version 2 of the License, or (at your
  24. * option) any later version.
  25. */
  26. #include <linux/types.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/of_irq.h>
  30. #include <linux/of_platform.h>
  31. #include <linux/slab.h>
  32. #include "fsl_rio.h"
  33. #define GET_RMM_HANDLE(mport) \
  34. (((struct rio_priv *)(mport->priv))->rmm_handle)
  35. /* RapidIO definition irq, which read from OF-tree */
  36. #define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
  37. #define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
  38. #define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
  39. #define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
  40. #define RIO_MIN_TX_RING_SIZE 2
  41. #define RIO_MAX_TX_RING_SIZE 2048
  42. #define RIO_MIN_RX_RING_SIZE 2
  43. #define RIO_MAX_RX_RING_SIZE 2048
  44. #define RIO_IPWMR_SEN 0x00100000
  45. #define RIO_IPWMR_QFIE 0x00000100
  46. #define RIO_IPWMR_EIE 0x00000020
  47. #define RIO_IPWMR_CQ 0x00000002
  48. #define RIO_IPWMR_PWE 0x00000001
  49. #define RIO_IPWSR_QF 0x00100000
  50. #define RIO_IPWSR_TE 0x00000080
  51. #define RIO_IPWSR_QFI 0x00000010
  52. #define RIO_IPWSR_PWD 0x00000008
  53. #define RIO_IPWSR_PWB 0x00000004
  54. #define RIO_EPWISR 0x10010
  55. /* EPWISR Error match value */
  56. #define RIO_EPWISR_PINT1 0x80000000
  57. #define RIO_EPWISR_PINT2 0x40000000
  58. #define RIO_EPWISR_MU 0x00000002
  59. #define RIO_EPWISR_PW 0x00000001
  60. #define IPWSR_CLEAR 0x98
  61. #define OMSR_CLEAR 0x1cb3
  62. #define IMSR_CLEAR 0x491
  63. #define IDSR_CLEAR 0x91
  64. #define ODSR_CLEAR 0x1c00
  65. #define LTLEECSR_ENABLE_ALL 0xFFC000FC
  66. #define RIO_LTLEECSR 0x060c
  67. #define RIO_IM0SR 0x64
  68. #define RIO_IM1SR 0x164
  69. #define RIO_OM0SR 0x4
  70. #define RIO_OM1SR 0x104
  71. #define RIO_DBELL_WIN_SIZE 0x1000
  72. #define RIO_MSG_OMR_MUI 0x00000002
  73. #define RIO_MSG_OSR_TE 0x00000080
  74. #define RIO_MSG_OSR_QOI 0x00000020
  75. #define RIO_MSG_OSR_QFI 0x00000010
  76. #define RIO_MSG_OSR_MUB 0x00000004
  77. #define RIO_MSG_OSR_EOMI 0x00000002
  78. #define RIO_MSG_OSR_QEI 0x00000001
  79. #define RIO_MSG_IMR_MI 0x00000002
  80. #define RIO_MSG_ISR_TE 0x00000080
  81. #define RIO_MSG_ISR_QFI 0x00000010
  82. #define RIO_MSG_ISR_DIQI 0x00000001
  83. #define RIO_MSG_DESC_SIZE 32
  84. #define RIO_MSG_BUFFER_SIZE 4096
  85. #define DOORBELL_DMR_DI 0x00000002
  86. #define DOORBELL_DSR_TE 0x00000080
  87. #define DOORBELL_DSR_QFI 0x00000010
  88. #define DOORBELL_DSR_DIQI 0x00000001
  89. #define DOORBELL_MESSAGE_SIZE 0x08
  90. static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
  91. struct rio_msg_regs {
  92. u32 omr;
  93. u32 osr;
  94. u32 pad1;
  95. u32 odqdpar;
  96. u32 pad2;
  97. u32 osar;
  98. u32 odpr;
  99. u32 odatr;
  100. u32 odcr;
  101. u32 pad3;
  102. u32 odqepar;
  103. u32 pad4[13];
  104. u32 imr;
  105. u32 isr;
  106. u32 pad5;
  107. u32 ifqdpar;
  108. u32 pad6;
  109. u32 ifqepar;
  110. };
  111. struct rio_dbell_regs {
  112. u32 odmr;
  113. u32 odsr;
  114. u32 pad1[4];
  115. u32 oddpr;
  116. u32 oddatr;
  117. u32 pad2[3];
  118. u32 odretcr;
  119. u32 pad3[12];
  120. u32 dmr;
  121. u32 dsr;
  122. u32 pad4;
  123. u32 dqdpar;
  124. u32 pad5;
  125. u32 dqepar;
  126. };
  127. struct rio_pw_regs {
  128. u32 pwmr;
  129. u32 pwsr;
  130. u32 epwqbar;
  131. u32 pwqbar;
  132. };
  133. struct rio_tx_desc {
  134. u32 pad1;
  135. u32 saddr;
  136. u32 dport;
  137. u32 dattr;
  138. u32 pad2;
  139. u32 pad3;
  140. u32 dwcnt;
  141. u32 pad4;
  142. };
  143. struct rio_msg_tx_ring {
  144. void *virt;
  145. dma_addr_t phys;
  146. void *virt_buffer[RIO_MAX_TX_RING_SIZE];
  147. dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
  148. int tx_slot;
  149. int size;
  150. void *dev_id;
  151. };
  152. struct rio_msg_rx_ring {
  153. void *virt;
  154. dma_addr_t phys;
  155. void *virt_buffer[RIO_MAX_RX_RING_SIZE];
  156. int rx_slot;
  157. int size;
  158. void *dev_id;
  159. };
  160. struct fsl_rmu {
  161. struct rio_msg_regs __iomem *msg_regs;
  162. struct rio_msg_tx_ring msg_tx_ring;
  163. struct rio_msg_rx_ring msg_rx_ring;
  164. int txirq;
  165. int rxirq;
  166. };
  167. struct rio_dbell_msg {
  168. u16 pad1;
  169. u16 tid;
  170. u16 sid;
  171. u16 info;
  172. };
  173. /**
  174. * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
  175. * @irq: Linux interrupt number
  176. * @dev_instance: Pointer to interrupt-specific data
  177. *
  178. * Handles outbound message interrupts. Executes a register outbound
  179. * mailbox event handler and acks the interrupt occurrence.
  180. */
  181. static irqreturn_t
  182. fsl_rio_tx_handler(int irq, void *dev_instance)
  183. {
  184. int osr;
  185. struct rio_mport *port = (struct rio_mport *)dev_instance;
  186. struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
  187. osr = in_be32(&rmu->msg_regs->osr);
  188. if (osr & RIO_MSG_OSR_TE) {
  189. pr_info("RIO: outbound message transmission error\n");
  190. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
  191. goto out;
  192. }
  193. if (osr & RIO_MSG_OSR_QOI) {
  194. pr_info("RIO: outbound message queue overflow\n");
  195. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
  196. goto out;
  197. }
  198. if (osr & RIO_MSG_OSR_EOMI) {
  199. u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
  200. int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
  201. if (port->outb_msg[0].mcback != NULL) {
  202. port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
  203. -1,
  204. slot);
  205. }
  206. /* Ack the end-of-message interrupt */
  207. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
  208. }
  209. out:
  210. return IRQ_HANDLED;
  211. }
  212. /**
  213. * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
  214. * @irq: Linux interrupt number
  215. * @dev_instance: Pointer to interrupt-specific data
  216. *
  217. * Handles inbound message interrupts. Executes a registered inbound
  218. * mailbox event handler and acks the interrupt occurrence.
  219. */
  220. static irqreturn_t
  221. fsl_rio_rx_handler(int irq, void *dev_instance)
  222. {
  223. int isr;
  224. struct rio_mport *port = (struct rio_mport *)dev_instance;
  225. struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
  226. isr = in_be32(&rmu->msg_regs->isr);
  227. if (isr & RIO_MSG_ISR_TE) {
  228. pr_info("RIO: inbound message reception error\n");
  229. out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
  230. goto out;
  231. }
  232. /* XXX Need to check/dispatch until queue empty */
  233. if (isr & RIO_MSG_ISR_DIQI) {
  234. /*
  235. * Can receive messages for any mailbox/letter to that
  236. * mailbox destination. So, make the callback with an
  237. * unknown/invalid mailbox number argument.
  238. */
  239. if (port->inb_msg[0].mcback != NULL)
  240. port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
  241. -1,
  242. -1);
  243. /* Ack the queueing interrupt */
  244. out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
  245. }
  246. out:
  247. return IRQ_HANDLED;
  248. }
  249. /**
  250. * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
  251. * @irq: Linux interrupt number
  252. * @dev_instance: Pointer to interrupt-specific data
  253. *
  254. * Handles doorbell interrupts. Parses a list of registered
  255. * doorbell event handlers and executes a matching event handler.
  256. */
  257. static irqreturn_t
  258. fsl_rio_dbell_handler(int irq, void *dev_instance)
  259. {
  260. int dsr;
  261. struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
  262. int i;
  263. dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
  264. if (dsr & DOORBELL_DSR_TE) {
  265. pr_info("RIO: doorbell reception error\n");
  266. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
  267. goto out;
  268. }
  269. if (dsr & DOORBELL_DSR_QFI) {
  270. pr_info("RIO: doorbell queue full\n");
  271. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
  272. }
  273. /* XXX Need to check/dispatch until queue empty */
  274. if (dsr & DOORBELL_DSR_DIQI) {
  275. struct rio_dbell_msg *dmsg =
  276. fsl_dbell->dbell_ring.virt +
  277. (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
  278. struct rio_dbell *dbell;
  279. int found = 0;
  280. pr_debug
  281. ("RIO: processing doorbell,"
  282. " sid %2.2x tid %2.2x info %4.4x\n",
  283. dmsg->sid, dmsg->tid, dmsg->info);
  284. for (i = 0; i < MAX_PORT_NUM; i++) {
  285. if (fsl_dbell->mport[i]) {
  286. list_for_each_entry(dbell,
  287. &fsl_dbell->mport[i]->dbells, node) {
  288. if ((dbell->res->start
  289. <= dmsg->info)
  290. && (dbell->res->end
  291. >= dmsg->info)) {
  292. found = 1;
  293. break;
  294. }
  295. }
  296. if (found && dbell->dinb) {
  297. dbell->dinb(fsl_dbell->mport[i],
  298. dbell->dev_id, dmsg->sid,
  299. dmsg->tid,
  300. dmsg->info);
  301. break;
  302. }
  303. }
  304. }
  305. if (!found) {
  306. pr_debug
  307. ("RIO: spurious doorbell,"
  308. " sid %2.2x tid %2.2x info %4.4x\n",
  309. dmsg->sid, dmsg->tid,
  310. dmsg->info);
  311. }
  312. setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
  313. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
  314. }
  315. out:
  316. return IRQ_HANDLED;
  317. }
  318. void msg_unit_error_handler(void)
  319. {
  320. /*XXX: Error recovery is not implemented, we just clear errors */
  321. out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
  322. out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
  323. out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
  324. out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
  325. out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
  326. out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
  327. out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
  328. out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
  329. }
  330. /**
  331. * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
  332. * @irq: Linux interrupt number
  333. * @dev_instance: Pointer to interrupt-specific data
  334. *
  335. * Handles port write interrupts. Parses a list of registered
  336. * port write event handlers and executes a matching event handler.
  337. */
  338. static irqreturn_t
  339. fsl_rio_port_write_handler(int irq, void *dev_instance)
  340. {
  341. u32 ipwmr, ipwsr;
  342. struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
  343. u32 epwisr, tmp;
  344. epwisr = in_be32(rio_regs_win + RIO_EPWISR);
  345. if (!(epwisr & RIO_EPWISR_PW))
  346. goto pw_done;
  347. ipwmr = in_be32(&pw->pw_regs->pwmr);
  348. ipwsr = in_be32(&pw->pw_regs->pwsr);
  349. #ifdef DEBUG_PW
  350. pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
  351. if (ipwsr & RIO_IPWSR_QF)
  352. pr_debug(" QF");
  353. if (ipwsr & RIO_IPWSR_TE)
  354. pr_debug(" TE");
  355. if (ipwsr & RIO_IPWSR_QFI)
  356. pr_debug(" QFI");
  357. if (ipwsr & RIO_IPWSR_PWD)
  358. pr_debug(" PWD");
  359. if (ipwsr & RIO_IPWSR_PWB)
  360. pr_debug(" PWB");
  361. pr_debug(" )\n");
  362. #endif
  363. /* Schedule deferred processing if PW was received */
  364. if (ipwsr & RIO_IPWSR_QFI) {
  365. /* Save PW message (if there is room in FIFO),
  366. * otherwise discard it.
  367. */
  368. if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
  369. pw->port_write_msg.msg_count++;
  370. kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
  371. RIO_PW_MSG_SIZE);
  372. } else {
  373. pw->port_write_msg.discard_count++;
  374. pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
  375. pw->port_write_msg.discard_count);
  376. }
  377. /* Clear interrupt and issue Clear Queue command. This allows
  378. * another port-write to be received.
  379. */
  380. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
  381. out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
  382. schedule_work(&pw->pw_work);
  383. }
  384. if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
  385. pw->port_write_msg.err_count++;
  386. pr_debug("RIO: Port-Write Transaction Err (%d)\n",
  387. pw->port_write_msg.err_count);
  388. /* Clear Transaction Error: port-write controller should be
  389. * disabled when clearing this error
  390. */
  391. out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
  392. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
  393. out_be32(&pw->pw_regs->pwmr, ipwmr);
  394. }
  395. if (ipwsr & RIO_IPWSR_PWD) {
  396. pw->port_write_msg.discard_count++;
  397. pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
  398. pw->port_write_msg.discard_count);
  399. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
  400. }
  401. pw_done:
  402. if (epwisr & RIO_EPWISR_PINT1) {
  403. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  404. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  405. fsl_rio_port_error_handler(0);
  406. }
  407. if (epwisr & RIO_EPWISR_PINT2) {
  408. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  409. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  410. fsl_rio_port_error_handler(1);
  411. }
  412. if (epwisr & RIO_EPWISR_MU) {
  413. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  414. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  415. msg_unit_error_handler();
  416. }
  417. return IRQ_HANDLED;
  418. }
  419. static void fsl_pw_dpc(struct work_struct *work)
  420. {
  421. struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
  422. union rio_pw_msg msg_buffer;
  423. int i;
  424. /*
  425. * Process port-write messages
  426. */
  427. while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
  428. RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
  429. #ifdef DEBUG_PW
  430. {
  431. u32 i;
  432. pr_debug("%s : Port-Write Message:", __func__);
  433. for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
  434. if ((i%4) == 0)
  435. pr_debug("\n0x%02x: 0x%08x", i*4,
  436. msg_buffer.raw[i]);
  437. else
  438. pr_debug(" 0x%08x", msg_buffer.raw[i]);
  439. }
  440. pr_debug("\n");
  441. }
  442. #endif
  443. /* Pass the port-write message to RIO core for processing */
  444. for (i = 0; i < MAX_PORT_NUM; i++) {
  445. if (pw->mport[i])
  446. rio_inb_pwrite_handler(pw->mport[i],
  447. &msg_buffer);
  448. }
  449. }
  450. }
  451. /**
  452. * fsl_rio_pw_enable - enable/disable port-write interface init
  453. * @mport: Master port implementing the port write unit
  454. * @enable: 1=enable; 0=disable port-write message handling
  455. */
  456. int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
  457. {
  458. u32 rval;
  459. rval = in_be32(&pw->pw_regs->pwmr);
  460. if (enable)
  461. rval |= RIO_IPWMR_PWE;
  462. else
  463. rval &= ~RIO_IPWMR_PWE;
  464. out_be32(&pw->pw_regs->pwmr, rval);
  465. return 0;
  466. }
  467. /**
  468. * fsl_rio_port_write_init - MPC85xx port write interface init
  469. * @mport: Master port implementing the port write unit
  470. *
  471. * Initializes port write unit hardware and DMA buffer
  472. * ring. Called from fsl_rio_setup(). Returns %0 on success
  473. * or %-ENOMEM on failure.
  474. */
  475. int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
  476. {
  477. int rc = 0;
  478. /* Following configurations require a disabled port write controller */
  479. out_be32(&pw->pw_regs->pwmr,
  480. in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
  481. /* Initialize port write */
  482. pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
  483. RIO_PW_MSG_SIZE,
  484. &pw->port_write_msg.phys, GFP_KERNEL);
  485. if (!pw->port_write_msg.virt) {
  486. pr_err("RIO: unable allocate port write queue\n");
  487. return -ENOMEM;
  488. }
  489. pw->port_write_msg.err_count = 0;
  490. pw->port_write_msg.discard_count = 0;
  491. /* Point dequeue/enqueue pointers at first entry */
  492. out_be32(&pw->pw_regs->epwqbar, 0);
  493. out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
  494. pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
  495. in_be32(&pw->pw_regs->epwqbar),
  496. in_be32(&pw->pw_regs->pwqbar));
  497. /* Clear interrupt status IPWSR */
  498. out_be32(&pw->pw_regs->pwsr,
  499. (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
  500. /* Configure port write controller for snooping enable all reporting,
  501. clear queue full */
  502. out_be32(&pw->pw_regs->pwmr,
  503. RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
  504. /* Hook up port-write handler */
  505. rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
  506. IRQF_SHARED, "port-write", (void *)pw);
  507. if (rc < 0) {
  508. pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
  509. goto err_out;
  510. }
  511. /* Enable Error Interrupt */
  512. out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
  513. INIT_WORK(&pw->pw_work, fsl_pw_dpc);
  514. spin_lock_init(&pw->pw_fifo_lock);
  515. if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
  516. pr_err("FIFO allocation failed\n");
  517. rc = -ENOMEM;
  518. goto err_out_irq;
  519. }
  520. pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
  521. in_be32(&pw->pw_regs->pwmr),
  522. in_be32(&pw->pw_regs->pwsr));
  523. return rc;
  524. err_out_irq:
  525. free_irq(IRQ_RIO_PW(pw), (void *)pw);
  526. err_out:
  527. dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
  528. pw->port_write_msg.virt,
  529. pw->port_write_msg.phys);
  530. return rc;
  531. }
  532. /**
  533. * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
  534. * @mport: RapidIO master port info
  535. * @index: ID of RapidIO interface
  536. * @destid: Destination ID of target device
  537. * @data: 16-bit info field of RapidIO doorbell message
  538. *
  539. * Sends a MPC85xx doorbell message. Returns %0 on success or
  540. * %-EINVAL on failure.
  541. */
  542. int fsl_rio_doorbell_send(struct rio_mport *mport,
  543. int index, u16 destid, u16 data)
  544. {
  545. unsigned long flags;
  546. pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
  547. index, destid, data);
  548. spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
  549. /* In the serial version silicons, such as MPC8548, MPC8641,
  550. * below operations is must be.
  551. */
  552. out_be32(&dbell->dbell_regs->odmr, 0x00000000);
  553. out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
  554. out_be32(&dbell->dbell_regs->oddpr, destid << 16);
  555. out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
  556. out_be32(&dbell->dbell_regs->odmr, 0x00000001);
  557. spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
  558. return 0;
  559. }
  560. /**
  561. * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
  562. * @mport: Master port with outbound message queue
  563. * @rdev: Target of outbound message
  564. * @mbox: Outbound mailbox
  565. * @buffer: Message to add to outbound queue
  566. * @len: Length of message
  567. *
  568. * Adds the @buffer message to the MPC85xx outbound message queue. Returns
  569. * %0 on success or %-EINVAL on failure.
  570. */
  571. int
  572. fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
  573. void *buffer, size_t len)
  574. {
  575. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  576. u32 omr;
  577. struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
  578. + rmu->msg_tx_ring.tx_slot;
  579. int ret = 0;
  580. pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
  581. "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
  582. if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
  583. ret = -EINVAL;
  584. goto out;
  585. }
  586. /* Copy and clear rest of buffer */
  587. memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
  588. len);
  589. if (len < (RIO_MAX_MSG_SIZE - 4))
  590. memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
  591. + len, 0, RIO_MAX_MSG_SIZE - len);
  592. /* Set mbox field for message, and set destid */
  593. desc->dport = (rdev->destid << 16) | (mbox & 0x3);
  594. /* Enable EOMI interrupt and priority */
  595. desc->dattr = 0x28000000 | ((mport->index) << 20);
  596. /* Set transfer size aligned to next power of 2 (in double words) */
  597. desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
  598. /* Set snooping and source buffer address */
  599. desc->saddr = 0x00000004
  600. | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
  601. /* Increment enqueue pointer */
  602. omr = in_be32(&rmu->msg_regs->omr);
  603. out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
  604. /* Go to next descriptor */
  605. if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
  606. rmu->msg_tx_ring.tx_slot = 0;
  607. out:
  608. return ret;
  609. }
  610. /**
  611. * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
  612. * @mport: Master port implementing the outbound message unit
  613. * @dev_id: Device specific pointer to pass on event
  614. * @mbox: Mailbox to open
  615. * @entries: Number of entries in the outbound mailbox ring
  616. *
  617. * Initializes buffer ring, request the outbound message interrupt,
  618. * and enables the outbound message unit. Returns %0 on success and
  619. * %-EINVAL or %-ENOMEM on failure.
  620. */
  621. int
  622. fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
  623. {
  624. int i, j, rc = 0;
  625. struct rio_priv *priv = mport->priv;
  626. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  627. if ((entries < RIO_MIN_TX_RING_SIZE) ||
  628. (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
  629. rc = -EINVAL;
  630. goto out;
  631. }
  632. /* Initialize shadow copy ring */
  633. rmu->msg_tx_ring.dev_id = dev_id;
  634. rmu->msg_tx_ring.size = entries;
  635. for (i = 0; i < rmu->msg_tx_ring.size; i++) {
  636. rmu->msg_tx_ring.virt_buffer[i] =
  637. dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
  638. &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
  639. if (!rmu->msg_tx_ring.virt_buffer[i]) {
  640. rc = -ENOMEM;
  641. for (j = 0; j < rmu->msg_tx_ring.size; j++)
  642. if (rmu->msg_tx_ring.virt_buffer[j])
  643. dma_free_coherent(priv->dev,
  644. RIO_MSG_BUFFER_SIZE,
  645. rmu->msg_tx_ring.
  646. virt_buffer[j],
  647. rmu->msg_tx_ring.
  648. phys_buffer[j]);
  649. goto out;
  650. }
  651. }
  652. /* Initialize outbound message descriptor ring */
  653. rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
  654. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  655. &rmu->msg_tx_ring.phys, GFP_KERNEL);
  656. if (!rmu->msg_tx_ring.virt) {
  657. rc = -ENOMEM;
  658. goto out_dma;
  659. }
  660. memset(rmu->msg_tx_ring.virt, 0,
  661. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
  662. rmu->msg_tx_ring.tx_slot = 0;
  663. /* Point dequeue/enqueue pointers at first entry in ring */
  664. out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
  665. out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
  666. /* Configure for snooping */
  667. out_be32(&rmu->msg_regs->osar, 0x00000004);
  668. /* Clear interrupt status */
  669. out_be32(&rmu->msg_regs->osr, 0x000000b3);
  670. /* Hook up outbound message handler */
  671. rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
  672. "msg_tx", (void *)mport);
  673. if (rc < 0)
  674. goto out_irq;
  675. /*
  676. * Configure outbound message unit
  677. * Snooping
  678. * Interrupts (all enabled, except QEIE)
  679. * Chaining mode
  680. * Disable
  681. */
  682. out_be32(&rmu->msg_regs->omr, 0x00100220);
  683. /* Set number of entries */
  684. out_be32(&rmu->msg_regs->omr,
  685. in_be32(&rmu->msg_regs->omr) |
  686. ((get_bitmask_order(entries) - 2) << 12));
  687. /* Now enable the unit */
  688. out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
  689. out:
  690. return rc;
  691. out_irq:
  692. dma_free_coherent(priv->dev,
  693. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  694. rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
  695. out_dma:
  696. for (i = 0; i < rmu->msg_tx_ring.size; i++)
  697. dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
  698. rmu->msg_tx_ring.virt_buffer[i],
  699. rmu->msg_tx_ring.phys_buffer[i]);
  700. return rc;
  701. }
  702. /**
  703. * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
  704. * @mport: Master port implementing the outbound message unit
  705. * @mbox: Mailbox to close
  706. *
  707. * Disables the outbound message unit, free all buffers, and
  708. * frees the outbound message interrupt.
  709. */
  710. void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
  711. {
  712. struct rio_priv *priv = mport->priv;
  713. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  714. /* Disable inbound message unit */
  715. out_be32(&rmu->msg_regs->omr, 0);
  716. /* Free ring */
  717. dma_free_coherent(priv->dev,
  718. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  719. rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
  720. /* Free interrupt */
  721. free_irq(IRQ_RIO_TX(mport), (void *)mport);
  722. }
  723. /**
  724. * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
  725. * @mport: Master port implementing the inbound message unit
  726. * @dev_id: Device specific pointer to pass on event
  727. * @mbox: Mailbox to open
  728. * @entries: Number of entries in the inbound mailbox ring
  729. *
  730. * Initializes buffer ring, request the inbound message interrupt,
  731. * and enables the inbound message unit. Returns %0 on success
  732. * and %-EINVAL or %-ENOMEM on failure.
  733. */
  734. int
  735. fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
  736. {
  737. int i, rc = 0;
  738. struct rio_priv *priv = mport->priv;
  739. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  740. if ((entries < RIO_MIN_RX_RING_SIZE) ||
  741. (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
  742. rc = -EINVAL;
  743. goto out;
  744. }
  745. /* Initialize client buffer ring */
  746. rmu->msg_rx_ring.dev_id = dev_id;
  747. rmu->msg_rx_ring.size = entries;
  748. rmu->msg_rx_ring.rx_slot = 0;
  749. for (i = 0; i < rmu->msg_rx_ring.size; i++)
  750. rmu->msg_rx_ring.virt_buffer[i] = NULL;
  751. /* Initialize inbound message ring */
  752. rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
  753. rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  754. &rmu->msg_rx_ring.phys, GFP_KERNEL);
  755. if (!rmu->msg_rx_ring.virt) {
  756. rc = -ENOMEM;
  757. goto out;
  758. }
  759. /* Point dequeue/enqueue pointers at first entry in ring */
  760. out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
  761. out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
  762. /* Clear interrupt status */
  763. out_be32(&rmu->msg_regs->isr, 0x00000091);
  764. /* Hook up inbound message handler */
  765. rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
  766. "msg_rx", (void *)mport);
  767. if (rc < 0) {
  768. dma_free_coherent(priv->dev,
  769. rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  770. rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
  771. goto out;
  772. }
  773. /*
  774. * Configure inbound message unit:
  775. * Snooping
  776. * 4KB max message size
  777. * Unmask all interrupt sources
  778. * Disable
  779. */
  780. out_be32(&rmu->msg_regs->imr, 0x001b0060);
  781. /* Set number of queue entries */
  782. setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
  783. /* Now enable the unit */
  784. setbits32(&rmu->msg_regs->imr, 0x1);
  785. out:
  786. return rc;
  787. }
  788. /**
  789. * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
  790. * @mport: Master port implementing the inbound message unit
  791. * @mbox: Mailbox to close
  792. *
  793. * Disables the inbound message unit, free all buffers, and
  794. * frees the inbound message interrupt.
  795. */
  796. void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
  797. {
  798. struct rio_priv *priv = mport->priv;
  799. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  800. /* Disable inbound message unit */
  801. out_be32(&rmu->msg_regs->imr, 0);
  802. /* Free ring */
  803. dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  804. rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
  805. /* Free interrupt */
  806. free_irq(IRQ_RIO_RX(mport), (void *)mport);
  807. }
  808. /**
  809. * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
  810. * @mport: Master port implementing the inbound message unit
  811. * @mbox: Inbound mailbox number
  812. * @buf: Buffer to add to inbound queue
  813. *
  814. * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
  815. * %0 on success or %-EINVAL on failure.
  816. */
  817. int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
  818. {
  819. int rc = 0;
  820. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  821. pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
  822. rmu->msg_rx_ring.rx_slot);
  823. if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
  824. printk(KERN_ERR
  825. "RIO: error adding inbound buffer %d, buffer exists\n",
  826. rmu->msg_rx_ring.rx_slot);
  827. rc = -EINVAL;
  828. goto out;
  829. }
  830. rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
  831. if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
  832. rmu->msg_rx_ring.rx_slot = 0;
  833. out:
  834. return rc;
  835. }
  836. /**
  837. * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
  838. * @mport: Master port implementing the inbound message unit
  839. * @mbox: Inbound mailbox number
  840. *
  841. * Gets the next available inbound message from the inbound message queue.
  842. * A pointer to the message is returned on success or NULL on failure.
  843. */
  844. void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
  845. {
  846. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  847. u32 phys_buf;
  848. void *virt_buf;
  849. void *buf = NULL;
  850. int buf_idx;
  851. phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
  852. /* If no more messages, then bail out */
  853. if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
  854. goto out2;
  855. virt_buf = rmu->msg_rx_ring.virt + (phys_buf
  856. - rmu->msg_rx_ring.phys);
  857. buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
  858. buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
  859. if (!buf) {
  860. printk(KERN_ERR
  861. "RIO: inbound message copy failed, no buffers\n");
  862. goto out1;
  863. }
  864. /* Copy max message size, caller is expected to allocate that big */
  865. memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
  866. /* Clear the available buffer */
  867. rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
  868. out1:
  869. setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
  870. out2:
  871. return buf;
  872. }
  873. /**
  874. * fsl_rio_doorbell_init - MPC85xx doorbell interface init
  875. * @mport: Master port implementing the inbound doorbell unit
  876. *
  877. * Initializes doorbell unit hardware and inbound DMA buffer
  878. * ring. Called from fsl_rio_setup(). Returns %0 on success
  879. * or %-ENOMEM on failure.
  880. */
  881. int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
  882. {
  883. int rc = 0;
  884. /* Initialize inbound doorbells */
  885. dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
  886. DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
  887. if (!dbell->dbell_ring.virt) {
  888. printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
  889. rc = -ENOMEM;
  890. goto out;
  891. }
  892. /* Point dequeue/enqueue pointers at first entry in ring */
  893. out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
  894. out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
  895. /* Clear interrupt status */
  896. out_be32(&dbell->dbell_regs->dsr, 0x00000091);
  897. /* Hook up doorbell handler */
  898. rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
  899. "dbell_rx", (void *)dbell);
  900. if (rc < 0) {
  901. dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
  902. dbell->dbell_ring.virt, dbell->dbell_ring.phys);
  903. printk(KERN_ERR
  904. "MPC85xx RIO: unable to request inbound doorbell irq");
  905. goto out;
  906. }
  907. /* Configure doorbells for snooping, 512 entries, and enable */
  908. out_be32(&dbell->dbell_regs->dmr, 0x00108161);
  909. out:
  910. return rc;
  911. }
  912. int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
  913. {
  914. struct rio_priv *priv;
  915. struct fsl_rmu *rmu;
  916. u64 msg_start;
  917. const u32 *msg_addr;
  918. int mlen;
  919. int aw;
  920. if (!mport || !mport->priv)
  921. return -EINVAL;
  922. priv = mport->priv;
  923. if (!node) {
  924. dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
  925. priv->dev->of_node);
  926. return -EINVAL;
  927. }
  928. rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
  929. if (!rmu)
  930. return -ENOMEM;
  931. aw = of_n_addr_cells(node);
  932. msg_addr = of_get_property(node, "reg", &mlen);
  933. if (!msg_addr) {
  934. pr_err("%pOF: unable to find 'reg' property of message-unit\n",
  935. node);
  936. kfree(rmu);
  937. return -ENOMEM;
  938. }
  939. msg_start = of_read_number(msg_addr, aw);
  940. rmu->msg_regs = (struct rio_msg_regs *)
  941. (rmu_regs_win + (u32)msg_start);
  942. rmu->txirq = irq_of_parse_and_map(node, 0);
  943. rmu->rxirq = irq_of_parse_and_map(node, 1);
  944. printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
  945. node, rmu->txirq, rmu->rxirq);
  946. priv->rmm_handle = rmu;
  947. rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
  948. rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
  949. rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
  950. return 0;
  951. }