bcm-sba-raid.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787
  1. /*
  2. * Copyright (C) 2017 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation version 2.
  7. *
  8. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  9. * kind, whether express or implied; without even the implied warranty
  10. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. /*
  14. * Broadcom SBA RAID Driver
  15. *
  16. * The Broadcom stream buffer accelerator (SBA) provides offloading
  17. * capabilities for RAID operations. The SBA offload engine is accessible
  18. * via Broadcom SoC specific ring manager. Two or more offload engines
  19. * can share same Broadcom SoC specific ring manager due to this Broadcom
  20. * SoC specific ring manager driver is implemented as a mailbox controller
  21. * driver and offload engine drivers are implemented as mallbox clients.
  22. *
  23. * Typically, Broadcom SoC specific ring manager will implement larger
  24. * number of hardware rings over one or more SBA hardware devices. By
  25. * design, the internal buffer size of SBA hardware device is limited
  26. * but all offload operations supported by SBA can be broken down into
  27. * multiple small size requests and executed parallely on multiple SBA
  28. * hardware devices for achieving high through-put.
  29. *
  30. * The Broadcom SBA RAID driver does not require any register programming
  31. * except submitting request to SBA hardware device via mailbox channels.
  32. * This driver implements a DMA device with one DMA channel using a single
  33. * mailbox channel provided by Broadcom SoC specific ring manager driver.
  34. * For having more SBA DMA channels, we can create more SBA device nodes
  35. * in Broadcom SoC specific DTS based on number of hardware rings supported
  36. * by Broadcom SoC ring manager.
  37. */
  38. #include <linux/bitops.h>
  39. #include <linux/debugfs.h>
  40. #include <linux/dma-mapping.h>
  41. #include <linux/dmaengine.h>
  42. #include <linux/list.h>
  43. #include <linux/mailbox_client.h>
  44. #include <linux/mailbox/brcm-message.h>
  45. #include <linux/module.h>
  46. #include <linux/of_device.h>
  47. #include <linux/slab.h>
  48. #include <linux/raid/pq.h>
  49. #include "dmaengine.h"
  50. /* ====== Driver macros and defines ===== */
  51. #define SBA_TYPE_SHIFT 48
  52. #define SBA_TYPE_MASK GENMASK(1, 0)
  53. #define SBA_TYPE_A 0x0
  54. #define SBA_TYPE_B 0x2
  55. #define SBA_TYPE_C 0x3
  56. #define SBA_USER_DEF_SHIFT 32
  57. #define SBA_USER_DEF_MASK GENMASK(15, 0)
  58. #define SBA_R_MDATA_SHIFT 24
  59. #define SBA_R_MDATA_MASK GENMASK(7, 0)
  60. #define SBA_C_MDATA_MS_SHIFT 18
  61. #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
  62. #define SBA_INT_SHIFT 17
  63. #define SBA_INT_MASK BIT(0)
  64. #define SBA_RESP_SHIFT 16
  65. #define SBA_RESP_MASK BIT(0)
  66. #define SBA_C_MDATA_SHIFT 8
  67. #define SBA_C_MDATA_MASK GENMASK(7, 0)
  68. #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
  69. #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
  70. #define SBA_C_MDATA_DNUM_SHIFT 5
  71. #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
  72. #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
  73. #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
  74. #define SBA_CMD_SHIFT 0
  75. #define SBA_CMD_MASK GENMASK(3, 0)
  76. #define SBA_CMD_ZERO_BUFFER 0x4
  77. #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
  78. #define SBA_CMD_LOAD_BUFFER 0x9
  79. #define SBA_CMD_XOR 0xa
  80. #define SBA_CMD_GALOIS_XOR 0xb
  81. #define SBA_CMD_WRITE_BUFFER 0xc
  82. #define SBA_CMD_GALOIS 0xe
  83. #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
  84. #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
  85. /* Driver helper macros */
  86. #define to_sba_request(tx) \
  87. container_of(tx, struct sba_request, tx)
  88. #define to_sba_device(dchan) \
  89. container_of(dchan, struct sba_device, dma_chan)
  90. /* ===== Driver data structures ===== */
  91. enum sba_request_flags {
  92. SBA_REQUEST_STATE_FREE = 0x001,
  93. SBA_REQUEST_STATE_ALLOCED = 0x002,
  94. SBA_REQUEST_STATE_PENDING = 0x004,
  95. SBA_REQUEST_STATE_ACTIVE = 0x008,
  96. SBA_REQUEST_STATE_ABORTED = 0x010,
  97. SBA_REQUEST_STATE_MASK = 0x0ff,
  98. SBA_REQUEST_FENCE = 0x100,
  99. };
  100. struct sba_request {
  101. /* Global state */
  102. struct list_head node;
  103. struct sba_device *sba;
  104. u32 flags;
  105. /* Chained requests management */
  106. struct sba_request *first;
  107. struct list_head next;
  108. atomic_t next_pending_count;
  109. /* BRCM message data */
  110. struct brcm_message msg;
  111. struct dma_async_tx_descriptor tx;
  112. /* SBA commands */
  113. struct brcm_sba_command cmds[0];
  114. };
  115. enum sba_version {
  116. SBA_VER_1 = 0,
  117. SBA_VER_2
  118. };
  119. struct sba_device {
  120. /* Underlying device */
  121. struct device *dev;
  122. /* DT configuration parameters */
  123. enum sba_version ver;
  124. /* Derived configuration parameters */
  125. u32 max_req;
  126. u32 hw_buf_size;
  127. u32 hw_resp_size;
  128. u32 max_pq_coefs;
  129. u32 max_pq_srcs;
  130. u32 max_cmd_per_req;
  131. u32 max_xor_srcs;
  132. u32 max_resp_pool_size;
  133. u32 max_cmds_pool_size;
  134. /* Maibox client and Mailbox channels */
  135. struct mbox_client client;
  136. struct mbox_chan *mchan;
  137. struct device *mbox_dev;
  138. /* DMA device and DMA channel */
  139. struct dma_device dma_dev;
  140. struct dma_chan dma_chan;
  141. /* DMA channel resources */
  142. void *resp_base;
  143. dma_addr_t resp_dma_base;
  144. void *cmds_base;
  145. dma_addr_t cmds_dma_base;
  146. spinlock_t reqs_lock;
  147. bool reqs_fence;
  148. struct list_head reqs_alloc_list;
  149. struct list_head reqs_pending_list;
  150. struct list_head reqs_active_list;
  151. struct list_head reqs_aborted_list;
  152. struct list_head reqs_free_list;
  153. /* DebugFS directory entries */
  154. struct dentry *root;
  155. struct dentry *stats;
  156. };
  157. /* ====== Command helper routines ===== */
  158. static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
  159. {
  160. cmd &= ~((u64)mask << shift);
  161. cmd |= ((u64)(val & mask) << shift);
  162. return cmd;
  163. }
  164. static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
  165. {
  166. return b0 & SBA_C_MDATA_BNUMx_MASK;
  167. }
  168. static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
  169. {
  170. return b0 & SBA_C_MDATA_BNUMx_MASK;
  171. }
  172. static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
  173. {
  174. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  175. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
  176. }
  177. static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
  178. {
  179. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  180. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
  181. ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
  182. }
  183. /* ====== General helper routines ===== */
  184. static struct sba_request *sba_alloc_request(struct sba_device *sba)
  185. {
  186. bool found = false;
  187. unsigned long flags;
  188. struct sba_request *req = NULL;
  189. spin_lock_irqsave(&sba->reqs_lock, flags);
  190. list_for_each_entry(req, &sba->reqs_free_list, node) {
  191. if (async_tx_test_ack(&req->tx)) {
  192. list_move_tail(&req->node, &sba->reqs_alloc_list);
  193. found = true;
  194. break;
  195. }
  196. }
  197. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  198. if (!found) {
  199. /*
  200. * We have no more free requests so, we peek
  201. * mailbox channels hoping few active requests
  202. * would have completed which will create more
  203. * room for new requests.
  204. */
  205. mbox_client_peek_data(sba->mchan);
  206. return NULL;
  207. }
  208. req->flags = SBA_REQUEST_STATE_ALLOCED;
  209. req->first = req;
  210. INIT_LIST_HEAD(&req->next);
  211. atomic_set(&req->next_pending_count, 1);
  212. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  213. async_tx_ack(&req->tx);
  214. return req;
  215. }
  216. /* Note: Must be called with sba->reqs_lock held */
  217. static void _sba_pending_request(struct sba_device *sba,
  218. struct sba_request *req)
  219. {
  220. lockdep_assert_held(&sba->reqs_lock);
  221. req->flags &= ~SBA_REQUEST_STATE_MASK;
  222. req->flags |= SBA_REQUEST_STATE_PENDING;
  223. list_move_tail(&req->node, &sba->reqs_pending_list);
  224. if (list_empty(&sba->reqs_active_list))
  225. sba->reqs_fence = false;
  226. }
  227. /* Note: Must be called with sba->reqs_lock held */
  228. static bool _sba_active_request(struct sba_device *sba,
  229. struct sba_request *req)
  230. {
  231. lockdep_assert_held(&sba->reqs_lock);
  232. if (list_empty(&sba->reqs_active_list))
  233. sba->reqs_fence = false;
  234. if (sba->reqs_fence)
  235. return false;
  236. req->flags &= ~SBA_REQUEST_STATE_MASK;
  237. req->flags |= SBA_REQUEST_STATE_ACTIVE;
  238. list_move_tail(&req->node, &sba->reqs_active_list);
  239. if (req->flags & SBA_REQUEST_FENCE)
  240. sba->reqs_fence = true;
  241. return true;
  242. }
  243. /* Note: Must be called with sba->reqs_lock held */
  244. static void _sba_abort_request(struct sba_device *sba,
  245. struct sba_request *req)
  246. {
  247. lockdep_assert_held(&sba->reqs_lock);
  248. req->flags &= ~SBA_REQUEST_STATE_MASK;
  249. req->flags |= SBA_REQUEST_STATE_ABORTED;
  250. list_move_tail(&req->node, &sba->reqs_aborted_list);
  251. if (list_empty(&sba->reqs_active_list))
  252. sba->reqs_fence = false;
  253. }
  254. /* Note: Must be called with sba->reqs_lock held */
  255. static void _sba_free_request(struct sba_device *sba,
  256. struct sba_request *req)
  257. {
  258. lockdep_assert_held(&sba->reqs_lock);
  259. req->flags &= ~SBA_REQUEST_STATE_MASK;
  260. req->flags |= SBA_REQUEST_STATE_FREE;
  261. list_move_tail(&req->node, &sba->reqs_free_list);
  262. if (list_empty(&sba->reqs_active_list))
  263. sba->reqs_fence = false;
  264. }
  265. static void sba_free_chained_requests(struct sba_request *req)
  266. {
  267. unsigned long flags;
  268. struct sba_request *nreq;
  269. struct sba_device *sba = req->sba;
  270. spin_lock_irqsave(&sba->reqs_lock, flags);
  271. _sba_free_request(sba, req);
  272. list_for_each_entry(nreq, &req->next, next)
  273. _sba_free_request(sba, nreq);
  274. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  275. }
  276. static void sba_chain_request(struct sba_request *first,
  277. struct sba_request *req)
  278. {
  279. unsigned long flags;
  280. struct sba_device *sba = req->sba;
  281. spin_lock_irqsave(&sba->reqs_lock, flags);
  282. list_add_tail(&req->next, &first->next);
  283. req->first = first;
  284. atomic_inc(&first->next_pending_count);
  285. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  286. }
  287. static void sba_cleanup_nonpending_requests(struct sba_device *sba)
  288. {
  289. unsigned long flags;
  290. struct sba_request *req, *req1;
  291. spin_lock_irqsave(&sba->reqs_lock, flags);
  292. /* Freeup all alloced request */
  293. list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
  294. _sba_free_request(sba, req);
  295. /* Set all active requests as aborted */
  296. list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
  297. _sba_abort_request(sba, req);
  298. /*
  299. * Note: We expect that aborted request will be eventually
  300. * freed by sba_receive_message()
  301. */
  302. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  303. }
  304. static void sba_cleanup_pending_requests(struct sba_device *sba)
  305. {
  306. unsigned long flags;
  307. struct sba_request *req, *req1;
  308. spin_lock_irqsave(&sba->reqs_lock, flags);
  309. /* Freeup all pending request */
  310. list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
  311. _sba_free_request(sba, req);
  312. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  313. }
  314. static int sba_send_mbox_request(struct sba_device *sba,
  315. struct sba_request *req)
  316. {
  317. int ret = 0;
  318. /* Send message for the request */
  319. req->msg.error = 0;
  320. ret = mbox_send_message(sba->mchan, &req->msg);
  321. if (ret < 0) {
  322. dev_err(sba->dev, "send message failed with error %d", ret);
  323. return ret;
  324. }
  325. /* Check error returned by mailbox controller */
  326. ret = req->msg.error;
  327. if (ret < 0) {
  328. dev_err(sba->dev, "message error %d", ret);
  329. }
  330. /* Signal txdone for mailbox channel */
  331. mbox_client_txdone(sba->mchan, ret);
  332. return ret;
  333. }
  334. /* Note: Must be called with sba->reqs_lock held */
  335. static void _sba_process_pending_requests(struct sba_device *sba)
  336. {
  337. int ret;
  338. u32 count;
  339. struct sba_request *req;
  340. /* Process few pending requests */
  341. count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
  342. while (!list_empty(&sba->reqs_pending_list) && count) {
  343. /* Get the first pending request */
  344. req = list_first_entry(&sba->reqs_pending_list,
  345. struct sba_request, node);
  346. /* Try to make request active */
  347. if (!_sba_active_request(sba, req))
  348. break;
  349. /* Send request to mailbox channel */
  350. ret = sba_send_mbox_request(sba, req);
  351. if (ret < 0) {
  352. _sba_pending_request(sba, req);
  353. break;
  354. }
  355. count--;
  356. }
  357. }
  358. static void sba_process_received_request(struct sba_device *sba,
  359. struct sba_request *req)
  360. {
  361. unsigned long flags;
  362. struct dma_async_tx_descriptor *tx;
  363. struct sba_request *nreq, *first = req->first;
  364. /* Process only after all chained requests are received */
  365. if (!atomic_dec_return(&first->next_pending_count)) {
  366. tx = &first->tx;
  367. WARN_ON(tx->cookie < 0);
  368. if (tx->cookie > 0) {
  369. spin_lock_irqsave(&sba->reqs_lock, flags);
  370. dma_cookie_complete(tx);
  371. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  372. dmaengine_desc_get_callback_invoke(tx, NULL);
  373. dma_descriptor_unmap(tx);
  374. tx->callback = NULL;
  375. tx->callback_result = NULL;
  376. }
  377. dma_run_dependencies(tx);
  378. spin_lock_irqsave(&sba->reqs_lock, flags);
  379. /* Free all requests chained to first request */
  380. list_for_each_entry(nreq, &first->next, next)
  381. _sba_free_request(sba, nreq);
  382. INIT_LIST_HEAD(&first->next);
  383. /* Free the first request */
  384. _sba_free_request(sba, first);
  385. /* Process pending requests */
  386. _sba_process_pending_requests(sba);
  387. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  388. }
  389. }
  390. static void sba_write_stats_in_seqfile(struct sba_device *sba,
  391. struct seq_file *file)
  392. {
  393. unsigned long flags;
  394. struct sba_request *req;
  395. u32 free_count = 0, alloced_count = 0;
  396. u32 pending_count = 0, active_count = 0, aborted_count = 0;
  397. spin_lock_irqsave(&sba->reqs_lock, flags);
  398. list_for_each_entry(req, &sba->reqs_free_list, node)
  399. if (async_tx_test_ack(&req->tx))
  400. free_count++;
  401. list_for_each_entry(req, &sba->reqs_alloc_list, node)
  402. alloced_count++;
  403. list_for_each_entry(req, &sba->reqs_pending_list, node)
  404. pending_count++;
  405. list_for_each_entry(req, &sba->reqs_active_list, node)
  406. active_count++;
  407. list_for_each_entry(req, &sba->reqs_aborted_list, node)
  408. aborted_count++;
  409. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  410. seq_printf(file, "maximum requests = %d\n", sba->max_req);
  411. seq_printf(file, "free requests = %d\n", free_count);
  412. seq_printf(file, "alloced requests = %d\n", alloced_count);
  413. seq_printf(file, "pending requests = %d\n", pending_count);
  414. seq_printf(file, "active requests = %d\n", active_count);
  415. seq_printf(file, "aborted requests = %d\n", aborted_count);
  416. }
  417. /* ====== DMAENGINE callbacks ===== */
  418. static void sba_free_chan_resources(struct dma_chan *dchan)
  419. {
  420. /*
  421. * Channel resources are pre-alloced so we just free-up
  422. * whatever we can so that we can re-use pre-alloced
  423. * channel resources next time.
  424. */
  425. sba_cleanup_nonpending_requests(to_sba_device(dchan));
  426. }
  427. static int sba_device_terminate_all(struct dma_chan *dchan)
  428. {
  429. /* Cleanup all pending requests */
  430. sba_cleanup_pending_requests(to_sba_device(dchan));
  431. return 0;
  432. }
  433. static void sba_issue_pending(struct dma_chan *dchan)
  434. {
  435. unsigned long flags;
  436. struct sba_device *sba = to_sba_device(dchan);
  437. /* Process pending requests */
  438. spin_lock_irqsave(&sba->reqs_lock, flags);
  439. _sba_process_pending_requests(sba);
  440. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  441. }
  442. static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
  443. {
  444. unsigned long flags;
  445. dma_cookie_t cookie;
  446. struct sba_device *sba;
  447. struct sba_request *req, *nreq;
  448. if (unlikely(!tx))
  449. return -EINVAL;
  450. sba = to_sba_device(tx->chan);
  451. req = to_sba_request(tx);
  452. /* Assign cookie and mark all chained requests pending */
  453. spin_lock_irqsave(&sba->reqs_lock, flags);
  454. cookie = dma_cookie_assign(tx);
  455. _sba_pending_request(sba, req);
  456. list_for_each_entry(nreq, &req->next, next)
  457. _sba_pending_request(sba, nreq);
  458. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  459. return cookie;
  460. }
  461. static enum dma_status sba_tx_status(struct dma_chan *dchan,
  462. dma_cookie_t cookie,
  463. struct dma_tx_state *txstate)
  464. {
  465. enum dma_status ret;
  466. struct sba_device *sba = to_sba_device(dchan);
  467. ret = dma_cookie_status(dchan, cookie, txstate);
  468. if (ret == DMA_COMPLETE)
  469. return ret;
  470. mbox_client_peek_data(sba->mchan);
  471. return dma_cookie_status(dchan, cookie, txstate);
  472. }
  473. static void sba_fillup_interrupt_msg(struct sba_request *req,
  474. struct brcm_sba_command *cmds,
  475. struct brcm_message *msg)
  476. {
  477. u64 cmd;
  478. u32 c_mdata;
  479. dma_addr_t resp_dma = req->tx.phys;
  480. struct brcm_sba_command *cmdsp = cmds;
  481. /* Type-B command to load dummy data into buf0 */
  482. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  483. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  484. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  485. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  486. c_mdata = sba_cmd_load_c_mdata(0);
  487. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  488. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  489. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  490. SBA_CMD_SHIFT, SBA_CMD_MASK);
  491. cmdsp->cmd = cmd;
  492. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  493. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  494. cmdsp->data = resp_dma;
  495. cmdsp->data_len = req->sba->hw_resp_size;
  496. cmdsp++;
  497. /* Type-A command to write buf0 to dummy location */
  498. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  499. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  500. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  501. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  502. cmd = sba_cmd_enc(cmd, 0x1,
  503. SBA_RESP_SHIFT, SBA_RESP_MASK);
  504. c_mdata = sba_cmd_write_c_mdata(0);
  505. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  506. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  507. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  508. SBA_CMD_SHIFT, SBA_CMD_MASK);
  509. cmdsp->cmd = cmd;
  510. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  511. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  512. if (req->sba->hw_resp_size) {
  513. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  514. cmdsp->resp = resp_dma;
  515. cmdsp->resp_len = req->sba->hw_resp_size;
  516. }
  517. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  518. cmdsp->data = resp_dma;
  519. cmdsp->data_len = req->sba->hw_resp_size;
  520. cmdsp++;
  521. /* Fillup brcm_message */
  522. msg->type = BRCM_MESSAGE_SBA;
  523. msg->sba.cmds = cmds;
  524. msg->sba.cmds_count = cmdsp - cmds;
  525. msg->ctx = req;
  526. msg->error = 0;
  527. }
  528. static struct dma_async_tx_descriptor *
  529. sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
  530. {
  531. struct sba_request *req = NULL;
  532. struct sba_device *sba = to_sba_device(dchan);
  533. /* Alloc new request */
  534. req = sba_alloc_request(sba);
  535. if (!req)
  536. return NULL;
  537. /*
  538. * Force fence so that no requests are submitted
  539. * until DMA callback for this request is invoked.
  540. */
  541. req->flags |= SBA_REQUEST_FENCE;
  542. /* Fillup request message */
  543. sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
  544. /* Init async_tx descriptor */
  545. req->tx.flags = flags;
  546. req->tx.cookie = -EBUSY;
  547. return &req->tx;
  548. }
  549. static void sba_fillup_memcpy_msg(struct sba_request *req,
  550. struct brcm_sba_command *cmds,
  551. struct brcm_message *msg,
  552. dma_addr_t msg_offset, size_t msg_len,
  553. dma_addr_t dst, dma_addr_t src)
  554. {
  555. u64 cmd;
  556. u32 c_mdata;
  557. dma_addr_t resp_dma = req->tx.phys;
  558. struct brcm_sba_command *cmdsp = cmds;
  559. /* Type-B command to load data into buf0 */
  560. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  561. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  562. cmd = sba_cmd_enc(cmd, msg_len,
  563. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  564. c_mdata = sba_cmd_load_c_mdata(0);
  565. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  566. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  567. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  568. SBA_CMD_SHIFT, SBA_CMD_MASK);
  569. cmdsp->cmd = cmd;
  570. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  571. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  572. cmdsp->data = src + msg_offset;
  573. cmdsp->data_len = msg_len;
  574. cmdsp++;
  575. /* Type-A command to write buf0 */
  576. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  577. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  578. cmd = sba_cmd_enc(cmd, msg_len,
  579. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  580. cmd = sba_cmd_enc(cmd, 0x1,
  581. SBA_RESP_SHIFT, SBA_RESP_MASK);
  582. c_mdata = sba_cmd_write_c_mdata(0);
  583. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  584. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  585. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  586. SBA_CMD_SHIFT, SBA_CMD_MASK);
  587. cmdsp->cmd = cmd;
  588. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  589. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  590. if (req->sba->hw_resp_size) {
  591. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  592. cmdsp->resp = resp_dma;
  593. cmdsp->resp_len = req->sba->hw_resp_size;
  594. }
  595. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  596. cmdsp->data = dst + msg_offset;
  597. cmdsp->data_len = msg_len;
  598. cmdsp++;
  599. /* Fillup brcm_message */
  600. msg->type = BRCM_MESSAGE_SBA;
  601. msg->sba.cmds = cmds;
  602. msg->sba.cmds_count = cmdsp - cmds;
  603. msg->ctx = req;
  604. msg->error = 0;
  605. }
  606. static struct sba_request *
  607. sba_prep_dma_memcpy_req(struct sba_device *sba,
  608. dma_addr_t off, dma_addr_t dst, dma_addr_t src,
  609. size_t len, unsigned long flags)
  610. {
  611. struct sba_request *req = NULL;
  612. /* Alloc new request */
  613. req = sba_alloc_request(sba);
  614. if (!req)
  615. return NULL;
  616. if (flags & DMA_PREP_FENCE)
  617. req->flags |= SBA_REQUEST_FENCE;
  618. /* Fillup request message */
  619. sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
  620. off, len, dst, src);
  621. /* Init async_tx descriptor */
  622. req->tx.flags = flags;
  623. req->tx.cookie = -EBUSY;
  624. return req;
  625. }
  626. static struct dma_async_tx_descriptor *
  627. sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
  628. size_t len, unsigned long flags)
  629. {
  630. size_t req_len;
  631. dma_addr_t off = 0;
  632. struct sba_device *sba = to_sba_device(dchan);
  633. struct sba_request *first = NULL, *req;
  634. /* Create chained requests where each request is upto hw_buf_size */
  635. while (len) {
  636. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  637. req = sba_prep_dma_memcpy_req(sba, off, dst, src,
  638. req_len, flags);
  639. if (!req) {
  640. if (first)
  641. sba_free_chained_requests(first);
  642. return NULL;
  643. }
  644. if (first)
  645. sba_chain_request(first, req);
  646. else
  647. first = req;
  648. off += req_len;
  649. len -= req_len;
  650. }
  651. return (first) ? &first->tx : NULL;
  652. }
  653. static void sba_fillup_xor_msg(struct sba_request *req,
  654. struct brcm_sba_command *cmds,
  655. struct brcm_message *msg,
  656. dma_addr_t msg_offset, size_t msg_len,
  657. dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
  658. {
  659. u64 cmd;
  660. u32 c_mdata;
  661. unsigned int i;
  662. dma_addr_t resp_dma = req->tx.phys;
  663. struct brcm_sba_command *cmdsp = cmds;
  664. /* Type-B command to load data into buf0 */
  665. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  666. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  667. cmd = sba_cmd_enc(cmd, msg_len,
  668. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  669. c_mdata = sba_cmd_load_c_mdata(0);
  670. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  671. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  672. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  673. SBA_CMD_SHIFT, SBA_CMD_MASK);
  674. cmdsp->cmd = cmd;
  675. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  676. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  677. cmdsp->data = src[0] + msg_offset;
  678. cmdsp->data_len = msg_len;
  679. cmdsp++;
  680. /* Type-B commands to xor data with buf0 and put it back in buf0 */
  681. for (i = 1; i < src_cnt; i++) {
  682. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  683. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  684. cmd = sba_cmd_enc(cmd, msg_len,
  685. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  686. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  687. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  688. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  689. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  690. SBA_CMD_SHIFT, SBA_CMD_MASK);
  691. cmdsp->cmd = cmd;
  692. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  693. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  694. cmdsp->data = src[i] + msg_offset;
  695. cmdsp->data_len = msg_len;
  696. cmdsp++;
  697. }
  698. /* Type-A command to write buf0 */
  699. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  700. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  701. cmd = sba_cmd_enc(cmd, msg_len,
  702. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  703. cmd = sba_cmd_enc(cmd, 0x1,
  704. SBA_RESP_SHIFT, SBA_RESP_MASK);
  705. c_mdata = sba_cmd_write_c_mdata(0);
  706. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  707. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  708. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  709. SBA_CMD_SHIFT, SBA_CMD_MASK);
  710. cmdsp->cmd = cmd;
  711. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  712. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  713. if (req->sba->hw_resp_size) {
  714. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  715. cmdsp->resp = resp_dma;
  716. cmdsp->resp_len = req->sba->hw_resp_size;
  717. }
  718. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  719. cmdsp->data = dst + msg_offset;
  720. cmdsp->data_len = msg_len;
  721. cmdsp++;
  722. /* Fillup brcm_message */
  723. msg->type = BRCM_MESSAGE_SBA;
  724. msg->sba.cmds = cmds;
  725. msg->sba.cmds_count = cmdsp - cmds;
  726. msg->ctx = req;
  727. msg->error = 0;
  728. }
  729. static struct sba_request *
  730. sba_prep_dma_xor_req(struct sba_device *sba,
  731. dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
  732. u32 src_cnt, size_t len, unsigned long flags)
  733. {
  734. struct sba_request *req = NULL;
  735. /* Alloc new request */
  736. req = sba_alloc_request(sba);
  737. if (!req)
  738. return NULL;
  739. if (flags & DMA_PREP_FENCE)
  740. req->flags |= SBA_REQUEST_FENCE;
  741. /* Fillup request message */
  742. sba_fillup_xor_msg(req, req->cmds, &req->msg,
  743. off, len, dst, src, src_cnt);
  744. /* Init async_tx descriptor */
  745. req->tx.flags = flags;
  746. req->tx.cookie = -EBUSY;
  747. return req;
  748. }
  749. static struct dma_async_tx_descriptor *
  750. sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
  751. u32 src_cnt, size_t len, unsigned long flags)
  752. {
  753. size_t req_len;
  754. dma_addr_t off = 0;
  755. struct sba_device *sba = to_sba_device(dchan);
  756. struct sba_request *first = NULL, *req;
  757. /* Sanity checks */
  758. if (unlikely(src_cnt > sba->max_xor_srcs))
  759. return NULL;
  760. /* Create chained requests where each request is upto hw_buf_size */
  761. while (len) {
  762. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  763. req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
  764. req_len, flags);
  765. if (!req) {
  766. if (first)
  767. sba_free_chained_requests(first);
  768. return NULL;
  769. }
  770. if (first)
  771. sba_chain_request(first, req);
  772. else
  773. first = req;
  774. off += req_len;
  775. len -= req_len;
  776. }
  777. return (first) ? &first->tx : NULL;
  778. }
  779. static void sba_fillup_pq_msg(struct sba_request *req,
  780. bool pq_continue,
  781. struct brcm_sba_command *cmds,
  782. struct brcm_message *msg,
  783. dma_addr_t msg_offset, size_t msg_len,
  784. dma_addr_t *dst_p, dma_addr_t *dst_q,
  785. const u8 *scf, dma_addr_t *src, u32 src_cnt)
  786. {
  787. u64 cmd;
  788. u32 c_mdata;
  789. unsigned int i;
  790. dma_addr_t resp_dma = req->tx.phys;
  791. struct brcm_sba_command *cmdsp = cmds;
  792. if (pq_continue) {
  793. /* Type-B command to load old P into buf0 */
  794. if (dst_p) {
  795. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  796. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  797. cmd = sba_cmd_enc(cmd, msg_len,
  798. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  799. c_mdata = sba_cmd_load_c_mdata(0);
  800. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  801. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  802. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  803. SBA_CMD_SHIFT, SBA_CMD_MASK);
  804. cmdsp->cmd = cmd;
  805. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  806. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  807. cmdsp->data = *dst_p + msg_offset;
  808. cmdsp->data_len = msg_len;
  809. cmdsp++;
  810. }
  811. /* Type-B command to load old Q into buf1 */
  812. if (dst_q) {
  813. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  814. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  815. cmd = sba_cmd_enc(cmd, msg_len,
  816. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  817. c_mdata = sba_cmd_load_c_mdata(1);
  818. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  819. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  820. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  821. SBA_CMD_SHIFT, SBA_CMD_MASK);
  822. cmdsp->cmd = cmd;
  823. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  824. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  825. cmdsp->data = *dst_q + msg_offset;
  826. cmdsp->data_len = msg_len;
  827. cmdsp++;
  828. }
  829. } else {
  830. /* Type-A command to zero all buffers */
  831. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  832. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  833. cmd = sba_cmd_enc(cmd, msg_len,
  834. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  835. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  836. SBA_CMD_SHIFT, SBA_CMD_MASK);
  837. cmdsp->cmd = cmd;
  838. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  839. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  840. cmdsp++;
  841. }
  842. /* Type-B commands for generate P onto buf0 and Q onto buf1 */
  843. for (i = 0; i < src_cnt; i++) {
  844. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  845. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  846. cmd = sba_cmd_enc(cmd, msg_len,
  847. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  848. c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
  849. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  850. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  851. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  852. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  853. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
  854. SBA_CMD_SHIFT, SBA_CMD_MASK);
  855. cmdsp->cmd = cmd;
  856. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  857. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  858. cmdsp->data = src[i] + msg_offset;
  859. cmdsp->data_len = msg_len;
  860. cmdsp++;
  861. }
  862. /* Type-A command to write buf0 */
  863. if (dst_p) {
  864. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  865. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  866. cmd = sba_cmd_enc(cmd, msg_len,
  867. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  868. cmd = sba_cmd_enc(cmd, 0x1,
  869. SBA_RESP_SHIFT, SBA_RESP_MASK);
  870. c_mdata = sba_cmd_write_c_mdata(0);
  871. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  872. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  873. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  874. SBA_CMD_SHIFT, SBA_CMD_MASK);
  875. cmdsp->cmd = cmd;
  876. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  877. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  878. if (req->sba->hw_resp_size) {
  879. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  880. cmdsp->resp = resp_dma;
  881. cmdsp->resp_len = req->sba->hw_resp_size;
  882. }
  883. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  884. cmdsp->data = *dst_p + msg_offset;
  885. cmdsp->data_len = msg_len;
  886. cmdsp++;
  887. }
  888. /* Type-A command to write buf1 */
  889. if (dst_q) {
  890. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  891. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  892. cmd = sba_cmd_enc(cmd, msg_len,
  893. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  894. cmd = sba_cmd_enc(cmd, 0x1,
  895. SBA_RESP_SHIFT, SBA_RESP_MASK);
  896. c_mdata = sba_cmd_write_c_mdata(1);
  897. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  898. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  899. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  900. SBA_CMD_SHIFT, SBA_CMD_MASK);
  901. cmdsp->cmd = cmd;
  902. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  903. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  904. if (req->sba->hw_resp_size) {
  905. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  906. cmdsp->resp = resp_dma;
  907. cmdsp->resp_len = req->sba->hw_resp_size;
  908. }
  909. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  910. cmdsp->data = *dst_q + msg_offset;
  911. cmdsp->data_len = msg_len;
  912. cmdsp++;
  913. }
  914. /* Fillup brcm_message */
  915. msg->type = BRCM_MESSAGE_SBA;
  916. msg->sba.cmds = cmds;
  917. msg->sba.cmds_count = cmdsp - cmds;
  918. msg->ctx = req;
  919. msg->error = 0;
  920. }
  921. static struct sba_request *
  922. sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
  923. dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
  924. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  925. {
  926. struct sba_request *req = NULL;
  927. /* Alloc new request */
  928. req = sba_alloc_request(sba);
  929. if (!req)
  930. return NULL;
  931. if (flags & DMA_PREP_FENCE)
  932. req->flags |= SBA_REQUEST_FENCE;
  933. /* Fillup request messages */
  934. sba_fillup_pq_msg(req, dmaf_continue(flags),
  935. req->cmds, &req->msg,
  936. off, len, dst_p, dst_q, scf, src, src_cnt);
  937. /* Init async_tx descriptor */
  938. req->tx.flags = flags;
  939. req->tx.cookie = -EBUSY;
  940. return req;
  941. }
  942. static void sba_fillup_pq_single_msg(struct sba_request *req,
  943. bool pq_continue,
  944. struct brcm_sba_command *cmds,
  945. struct brcm_message *msg,
  946. dma_addr_t msg_offset, size_t msg_len,
  947. dma_addr_t *dst_p, dma_addr_t *dst_q,
  948. dma_addr_t src, u8 scf)
  949. {
  950. u64 cmd;
  951. u32 c_mdata;
  952. u8 pos, dpos = raid6_gflog[scf];
  953. dma_addr_t resp_dma = req->tx.phys;
  954. struct brcm_sba_command *cmdsp = cmds;
  955. if (!dst_p)
  956. goto skip_p;
  957. if (pq_continue) {
  958. /* Type-B command to load old P into buf0 */
  959. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  960. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  961. cmd = sba_cmd_enc(cmd, msg_len,
  962. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  963. c_mdata = sba_cmd_load_c_mdata(0);
  964. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  965. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  966. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  967. SBA_CMD_SHIFT, SBA_CMD_MASK);
  968. cmdsp->cmd = cmd;
  969. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  970. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  971. cmdsp->data = *dst_p + msg_offset;
  972. cmdsp->data_len = msg_len;
  973. cmdsp++;
  974. /*
  975. * Type-B commands to xor data with buf0 and put it
  976. * back in buf0
  977. */
  978. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  979. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  980. cmd = sba_cmd_enc(cmd, msg_len,
  981. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  982. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  983. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  984. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  985. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  986. SBA_CMD_SHIFT, SBA_CMD_MASK);
  987. cmdsp->cmd = cmd;
  988. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  989. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  990. cmdsp->data = src + msg_offset;
  991. cmdsp->data_len = msg_len;
  992. cmdsp++;
  993. } else {
  994. /* Type-B command to load old P into buf0 */
  995. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  996. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  997. cmd = sba_cmd_enc(cmd, msg_len,
  998. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  999. c_mdata = sba_cmd_load_c_mdata(0);
  1000. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1001. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1002. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  1003. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1004. cmdsp->cmd = cmd;
  1005. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1006. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1007. cmdsp->data = src + msg_offset;
  1008. cmdsp->data_len = msg_len;
  1009. cmdsp++;
  1010. }
  1011. /* Type-A command to write buf0 */
  1012. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1013. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1014. cmd = sba_cmd_enc(cmd, msg_len,
  1015. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1016. cmd = sba_cmd_enc(cmd, 0x1,
  1017. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1018. c_mdata = sba_cmd_write_c_mdata(0);
  1019. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1020. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1021. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1022. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1023. cmdsp->cmd = cmd;
  1024. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1025. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1026. if (req->sba->hw_resp_size) {
  1027. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1028. cmdsp->resp = resp_dma;
  1029. cmdsp->resp_len = req->sba->hw_resp_size;
  1030. }
  1031. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1032. cmdsp->data = *dst_p + msg_offset;
  1033. cmdsp->data_len = msg_len;
  1034. cmdsp++;
  1035. skip_p:
  1036. if (!dst_q)
  1037. goto skip_q;
  1038. /* Type-A command to zero all buffers */
  1039. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1040. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1041. cmd = sba_cmd_enc(cmd, msg_len,
  1042. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1043. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  1044. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1045. cmdsp->cmd = cmd;
  1046. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1047. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1048. cmdsp++;
  1049. if (dpos == 255)
  1050. goto skip_q_computation;
  1051. pos = (dpos < req->sba->max_pq_coefs) ?
  1052. dpos : (req->sba->max_pq_coefs - 1);
  1053. /*
  1054. * Type-B command to generate initial Q from data
  1055. * and store output into buf0
  1056. */
  1057. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1058. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1059. cmd = sba_cmd_enc(cmd, msg_len,
  1060. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1061. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
  1062. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1063. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1064. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1065. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1066. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1067. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1068. cmdsp->cmd = cmd;
  1069. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1070. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1071. cmdsp->data = src + msg_offset;
  1072. cmdsp->data_len = msg_len;
  1073. cmdsp++;
  1074. dpos -= pos;
  1075. /* Multiple Type-A command to generate final Q */
  1076. while (dpos) {
  1077. pos = (dpos < req->sba->max_pq_coefs) ?
  1078. dpos : (req->sba->max_pq_coefs - 1);
  1079. /*
  1080. * Type-A command to generate Q with buf0 and
  1081. * buf1 store result in buf0
  1082. */
  1083. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1084. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1085. cmd = sba_cmd_enc(cmd, msg_len,
  1086. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1087. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
  1088. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1089. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1090. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1091. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1092. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1093. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1094. cmdsp->cmd = cmd;
  1095. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1096. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1097. cmdsp++;
  1098. dpos -= pos;
  1099. }
  1100. skip_q_computation:
  1101. if (pq_continue) {
  1102. /*
  1103. * Type-B command to XOR previous output with
  1104. * buf0 and write it into buf0
  1105. */
  1106. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1107. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1108. cmd = sba_cmd_enc(cmd, msg_len,
  1109. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1110. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  1111. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1112. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1113. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  1114. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1115. cmdsp->cmd = cmd;
  1116. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1117. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1118. cmdsp->data = *dst_q + msg_offset;
  1119. cmdsp->data_len = msg_len;
  1120. cmdsp++;
  1121. }
  1122. /* Type-A command to write buf0 */
  1123. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1124. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1125. cmd = sba_cmd_enc(cmd, msg_len,
  1126. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1127. cmd = sba_cmd_enc(cmd, 0x1,
  1128. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1129. c_mdata = sba_cmd_write_c_mdata(0);
  1130. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1131. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1132. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1133. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1134. cmdsp->cmd = cmd;
  1135. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1136. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1137. if (req->sba->hw_resp_size) {
  1138. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1139. cmdsp->resp = resp_dma;
  1140. cmdsp->resp_len = req->sba->hw_resp_size;
  1141. }
  1142. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1143. cmdsp->data = *dst_q + msg_offset;
  1144. cmdsp->data_len = msg_len;
  1145. cmdsp++;
  1146. skip_q:
  1147. /* Fillup brcm_message */
  1148. msg->type = BRCM_MESSAGE_SBA;
  1149. msg->sba.cmds = cmds;
  1150. msg->sba.cmds_count = cmdsp - cmds;
  1151. msg->ctx = req;
  1152. msg->error = 0;
  1153. }
  1154. static struct sba_request *
  1155. sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
  1156. dma_addr_t *dst_p, dma_addr_t *dst_q,
  1157. dma_addr_t src, u8 scf, size_t len,
  1158. unsigned long flags)
  1159. {
  1160. struct sba_request *req = NULL;
  1161. /* Alloc new request */
  1162. req = sba_alloc_request(sba);
  1163. if (!req)
  1164. return NULL;
  1165. if (flags & DMA_PREP_FENCE)
  1166. req->flags |= SBA_REQUEST_FENCE;
  1167. /* Fillup request messages */
  1168. sba_fillup_pq_single_msg(req, dmaf_continue(flags),
  1169. req->cmds, &req->msg, off, len,
  1170. dst_p, dst_q, src, scf);
  1171. /* Init async_tx descriptor */
  1172. req->tx.flags = flags;
  1173. req->tx.cookie = -EBUSY;
  1174. return req;
  1175. }
  1176. static struct dma_async_tx_descriptor *
  1177. sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
  1178. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  1179. {
  1180. u32 i, dst_q_index;
  1181. size_t req_len;
  1182. bool slow = false;
  1183. dma_addr_t off = 0;
  1184. dma_addr_t *dst_p = NULL, *dst_q = NULL;
  1185. struct sba_device *sba = to_sba_device(dchan);
  1186. struct sba_request *first = NULL, *req;
  1187. /* Sanity checks */
  1188. if (unlikely(src_cnt > sba->max_pq_srcs))
  1189. return NULL;
  1190. for (i = 0; i < src_cnt; i++)
  1191. if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
  1192. slow = true;
  1193. /* Figure-out P and Q destination addresses */
  1194. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  1195. dst_p = &dst[0];
  1196. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  1197. dst_q = &dst[1];
  1198. /* Create chained requests where each request is upto hw_buf_size */
  1199. while (len) {
  1200. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  1201. if (slow) {
  1202. dst_q_index = src_cnt;
  1203. if (dst_q) {
  1204. for (i = 0; i < src_cnt; i++) {
  1205. if (*dst_q == src[i]) {
  1206. dst_q_index = i;
  1207. break;
  1208. }
  1209. }
  1210. }
  1211. if (dst_q_index < src_cnt) {
  1212. i = dst_q_index;
  1213. req = sba_prep_dma_pq_single_req(sba,
  1214. off, dst_p, dst_q, src[i], scf[i],
  1215. req_len, flags | DMA_PREP_FENCE);
  1216. if (!req)
  1217. goto fail;
  1218. if (first)
  1219. sba_chain_request(first, req);
  1220. else
  1221. first = req;
  1222. flags |= DMA_PREP_CONTINUE;
  1223. }
  1224. for (i = 0; i < src_cnt; i++) {
  1225. if (dst_q_index == i)
  1226. continue;
  1227. req = sba_prep_dma_pq_single_req(sba,
  1228. off, dst_p, dst_q, src[i], scf[i],
  1229. req_len, flags | DMA_PREP_FENCE);
  1230. if (!req)
  1231. goto fail;
  1232. if (first)
  1233. sba_chain_request(first, req);
  1234. else
  1235. first = req;
  1236. flags |= DMA_PREP_CONTINUE;
  1237. }
  1238. } else {
  1239. req = sba_prep_dma_pq_req(sba, off,
  1240. dst_p, dst_q, src, src_cnt,
  1241. scf, req_len, flags);
  1242. if (!req)
  1243. goto fail;
  1244. if (first)
  1245. sba_chain_request(first, req);
  1246. else
  1247. first = req;
  1248. }
  1249. off += req_len;
  1250. len -= req_len;
  1251. }
  1252. return (first) ? &first->tx : NULL;
  1253. fail:
  1254. if (first)
  1255. sba_free_chained_requests(first);
  1256. return NULL;
  1257. }
  1258. /* ====== Mailbox callbacks ===== */
  1259. static void sba_receive_message(struct mbox_client *cl, void *msg)
  1260. {
  1261. struct brcm_message *m = msg;
  1262. struct sba_request *req = m->ctx;
  1263. struct sba_device *sba = req->sba;
  1264. /* Error count if message has error */
  1265. if (m->error < 0)
  1266. dev_err(sba->dev, "%s got message with error %d",
  1267. dma_chan_name(&sba->dma_chan), m->error);
  1268. /* Process received request */
  1269. sba_process_received_request(sba, req);
  1270. }
  1271. /* ====== Debugfs callbacks ====== */
  1272. static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
  1273. {
  1274. struct platform_device *pdev = to_platform_device(file->private);
  1275. struct sba_device *sba = platform_get_drvdata(pdev);
  1276. /* Write stats in file */
  1277. sba_write_stats_in_seqfile(sba, file);
  1278. return 0;
  1279. }
  1280. /* ====== Platform driver routines ===== */
  1281. static int sba_prealloc_channel_resources(struct sba_device *sba)
  1282. {
  1283. int i, j, ret = 0;
  1284. struct sba_request *req = NULL;
  1285. sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
  1286. sba->max_resp_pool_size,
  1287. &sba->resp_dma_base, GFP_KERNEL);
  1288. if (!sba->resp_base)
  1289. return -ENOMEM;
  1290. sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
  1291. sba->max_cmds_pool_size,
  1292. &sba->cmds_dma_base, GFP_KERNEL);
  1293. if (!sba->cmds_base) {
  1294. ret = -ENOMEM;
  1295. goto fail_free_resp_pool;
  1296. }
  1297. spin_lock_init(&sba->reqs_lock);
  1298. sba->reqs_fence = false;
  1299. INIT_LIST_HEAD(&sba->reqs_alloc_list);
  1300. INIT_LIST_HEAD(&sba->reqs_pending_list);
  1301. INIT_LIST_HEAD(&sba->reqs_active_list);
  1302. INIT_LIST_HEAD(&sba->reqs_aborted_list);
  1303. INIT_LIST_HEAD(&sba->reqs_free_list);
  1304. for (i = 0; i < sba->max_req; i++) {
  1305. req = devm_kzalloc(sba->dev,
  1306. struct_size(req, cmds, sba->max_cmd_per_req),
  1307. GFP_KERNEL);
  1308. if (!req) {
  1309. ret = -ENOMEM;
  1310. goto fail_free_cmds_pool;
  1311. }
  1312. INIT_LIST_HEAD(&req->node);
  1313. req->sba = sba;
  1314. req->flags = SBA_REQUEST_STATE_FREE;
  1315. INIT_LIST_HEAD(&req->next);
  1316. atomic_set(&req->next_pending_count, 0);
  1317. for (j = 0; j < sba->max_cmd_per_req; j++) {
  1318. req->cmds[j].cmd = 0;
  1319. req->cmds[j].cmd_dma = sba->cmds_base +
  1320. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1321. req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
  1322. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1323. req->cmds[j].flags = 0;
  1324. }
  1325. memset(&req->msg, 0, sizeof(req->msg));
  1326. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  1327. async_tx_ack(&req->tx);
  1328. req->tx.tx_submit = sba_tx_submit;
  1329. req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
  1330. list_add_tail(&req->node, &sba->reqs_free_list);
  1331. }
  1332. return 0;
  1333. fail_free_cmds_pool:
  1334. dma_free_coherent(sba->mbox_dev,
  1335. sba->max_cmds_pool_size,
  1336. sba->cmds_base, sba->cmds_dma_base);
  1337. fail_free_resp_pool:
  1338. dma_free_coherent(sba->mbox_dev,
  1339. sba->max_resp_pool_size,
  1340. sba->resp_base, sba->resp_dma_base);
  1341. return ret;
  1342. }
  1343. static void sba_freeup_channel_resources(struct sba_device *sba)
  1344. {
  1345. dmaengine_terminate_all(&sba->dma_chan);
  1346. dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
  1347. sba->cmds_base, sba->cmds_dma_base);
  1348. dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
  1349. sba->resp_base, sba->resp_dma_base);
  1350. sba->resp_base = NULL;
  1351. sba->resp_dma_base = 0;
  1352. }
  1353. static int sba_async_register(struct sba_device *sba)
  1354. {
  1355. int ret;
  1356. struct dma_device *dma_dev = &sba->dma_dev;
  1357. /* Initialize DMA channel cookie */
  1358. sba->dma_chan.device = dma_dev;
  1359. dma_cookie_init(&sba->dma_chan);
  1360. /* Initialize DMA device capability mask */
  1361. dma_cap_zero(dma_dev->cap_mask);
  1362. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  1363. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1364. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1365. dma_cap_set(DMA_PQ, dma_dev->cap_mask);
  1366. /*
  1367. * Set mailbox channel device as the base device of
  1368. * our dma_device because the actual memory accesses
  1369. * will be done by mailbox controller
  1370. */
  1371. dma_dev->dev = sba->mbox_dev;
  1372. /* Set base prep routines */
  1373. dma_dev->device_free_chan_resources = sba_free_chan_resources;
  1374. dma_dev->device_terminate_all = sba_device_terminate_all;
  1375. dma_dev->device_issue_pending = sba_issue_pending;
  1376. dma_dev->device_tx_status = sba_tx_status;
  1377. /* Set interrupt routine */
  1378. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  1379. dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
  1380. /* Set memcpy routine */
  1381. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  1382. dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
  1383. /* Set xor routine and capability */
  1384. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1385. dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
  1386. dma_dev->max_xor = sba->max_xor_srcs;
  1387. }
  1388. /* Set pq routine and capability */
  1389. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1390. dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
  1391. dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
  1392. }
  1393. /* Initialize DMA device channel list */
  1394. INIT_LIST_HEAD(&dma_dev->channels);
  1395. list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
  1396. /* Register with Linux async DMA framework*/
  1397. ret = dma_async_device_register(dma_dev);
  1398. if (ret) {
  1399. dev_err(sba->dev, "async device register error %d", ret);
  1400. return ret;
  1401. }
  1402. dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
  1403. dma_chan_name(&sba->dma_chan),
  1404. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
  1405. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
  1406. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1407. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
  1408. return 0;
  1409. }
  1410. static int sba_probe(struct platform_device *pdev)
  1411. {
  1412. int ret = 0;
  1413. struct sba_device *sba;
  1414. struct platform_device *mbox_pdev;
  1415. struct of_phandle_args args;
  1416. /* Allocate main SBA struct */
  1417. sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
  1418. if (!sba)
  1419. return -ENOMEM;
  1420. sba->dev = &pdev->dev;
  1421. platform_set_drvdata(pdev, sba);
  1422. /* Number of mailbox channels should be atleast 1 */
  1423. ret = of_count_phandle_with_args(pdev->dev.of_node,
  1424. "mboxes", "#mbox-cells");
  1425. if (ret <= 0)
  1426. return -ENODEV;
  1427. /* Determine SBA version from DT compatible string */
  1428. if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
  1429. sba->ver = SBA_VER_1;
  1430. else if (of_device_is_compatible(sba->dev->of_node,
  1431. "brcm,iproc-sba-v2"))
  1432. sba->ver = SBA_VER_2;
  1433. else
  1434. return -ENODEV;
  1435. /* Derived Configuration parameters */
  1436. switch (sba->ver) {
  1437. case SBA_VER_1:
  1438. sba->hw_buf_size = 4096;
  1439. sba->hw_resp_size = 8;
  1440. sba->max_pq_coefs = 6;
  1441. sba->max_pq_srcs = 6;
  1442. break;
  1443. case SBA_VER_2:
  1444. sba->hw_buf_size = 4096;
  1445. sba->hw_resp_size = 8;
  1446. sba->max_pq_coefs = 30;
  1447. /*
  1448. * We can support max_pq_srcs == max_pq_coefs because
  1449. * we are limited by number of SBA commands that we can
  1450. * fit in one message for underlying ring manager HW.
  1451. */
  1452. sba->max_pq_srcs = 12;
  1453. break;
  1454. default:
  1455. return -EINVAL;
  1456. }
  1457. sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
  1458. sba->max_cmd_per_req = sba->max_pq_srcs + 3;
  1459. sba->max_xor_srcs = sba->max_cmd_per_req - 1;
  1460. sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
  1461. sba->max_cmds_pool_size = sba->max_req *
  1462. sba->max_cmd_per_req * sizeof(u64);
  1463. /* Setup mailbox client */
  1464. sba->client.dev = &pdev->dev;
  1465. sba->client.rx_callback = sba_receive_message;
  1466. sba->client.tx_block = false;
  1467. sba->client.knows_txdone = true;
  1468. sba->client.tx_tout = 0;
  1469. /* Request mailbox channel */
  1470. sba->mchan = mbox_request_channel(&sba->client, 0);
  1471. if (IS_ERR(sba->mchan)) {
  1472. ret = PTR_ERR(sba->mchan);
  1473. goto fail_free_mchan;
  1474. }
  1475. /* Find-out underlying mailbox device */
  1476. ret = of_parse_phandle_with_args(pdev->dev.of_node,
  1477. "mboxes", "#mbox-cells", 0, &args);
  1478. if (ret)
  1479. goto fail_free_mchan;
  1480. mbox_pdev = of_find_device_by_node(args.np);
  1481. of_node_put(args.np);
  1482. if (!mbox_pdev) {
  1483. ret = -ENODEV;
  1484. goto fail_free_mchan;
  1485. }
  1486. sba->mbox_dev = &mbox_pdev->dev;
  1487. /* Prealloc channel resource */
  1488. ret = sba_prealloc_channel_resources(sba);
  1489. if (ret)
  1490. goto fail_free_mchan;
  1491. /* Check availability of debugfs */
  1492. if (!debugfs_initialized())
  1493. goto skip_debugfs;
  1494. /* Create debugfs root entry */
  1495. sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
  1496. if (IS_ERR_OR_NULL(sba->root)) {
  1497. dev_err(sba->dev, "failed to create debugfs root entry\n");
  1498. sba->root = NULL;
  1499. goto skip_debugfs;
  1500. }
  1501. /* Create debugfs stats entry */
  1502. sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
  1503. sba_debugfs_stats_show);
  1504. if (IS_ERR_OR_NULL(sba->stats))
  1505. dev_err(sba->dev, "failed to create debugfs stats file\n");
  1506. skip_debugfs:
  1507. /* Register DMA device with Linux async framework */
  1508. ret = sba_async_register(sba);
  1509. if (ret)
  1510. goto fail_free_resources;
  1511. /* Print device info */
  1512. dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
  1513. dma_chan_name(&sba->dma_chan), sba->ver+1,
  1514. dev_name(sba->mbox_dev));
  1515. return 0;
  1516. fail_free_resources:
  1517. debugfs_remove_recursive(sba->root);
  1518. sba_freeup_channel_resources(sba);
  1519. fail_free_mchan:
  1520. mbox_free_channel(sba->mchan);
  1521. return ret;
  1522. }
  1523. static int sba_remove(struct platform_device *pdev)
  1524. {
  1525. struct sba_device *sba = platform_get_drvdata(pdev);
  1526. dma_async_device_unregister(&sba->dma_dev);
  1527. debugfs_remove_recursive(sba->root);
  1528. sba_freeup_channel_resources(sba);
  1529. mbox_free_channel(sba->mchan);
  1530. return 0;
  1531. }
  1532. static const struct of_device_id sba_of_match[] = {
  1533. { .compatible = "brcm,iproc-sba", },
  1534. { .compatible = "brcm,iproc-sba-v2", },
  1535. {},
  1536. };
  1537. MODULE_DEVICE_TABLE(of, sba_of_match);
  1538. static struct platform_driver sba_driver = {
  1539. .probe = sba_probe,
  1540. .remove = sba_remove,
  1541. .driver = {
  1542. .name = "bcm-sba-raid",
  1543. .of_match_table = sba_of_match,
  1544. },
  1545. };
  1546. module_platform_driver(sba_driver);
  1547. MODULE_DESCRIPTION("Broadcom SBA RAID driver");
  1548. MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
  1549. MODULE_LICENSE("GPL v2");