qplib_rcfw.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /*
  2. * Broadcom NetXtreme-E RoCE driver.
  3. *
  4. * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
  5. * Broadcom refers to Broadcom Limited and/or its subsidiaries.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. *
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in
  21. * the documentation and/or other materials provided with the
  22. * distribution.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Description: RDMA Controller HW interface
  37. */
  38. #include <linux/interrupt.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/pci.h>
  41. #include <linux/prefetch.h>
  42. #include <linux/delay.h>
  43. #include "roce_hsi.h"
  44. #include "qplib_res.h"
  45. #include "qplib_rcfw.h"
  46. #include "qplib_sp.h"
  47. #include "qplib_fp.h"
  48. static void bnxt_qplib_service_creq(unsigned long data);
  49. /* Hardware communication channel */
  50. static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
  51. {
  52. u16 cbit;
  53. int rc;
  54. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  55. rc = wait_event_timeout(rcfw->waitq,
  56. !test_bit(cbit, rcfw->cmdq_bitmap),
  57. msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
  58. return rc ? 0 : -ETIMEDOUT;
  59. };
  60. static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
  61. {
  62. u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
  63. u16 cbit;
  64. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  65. if (!test_bit(cbit, rcfw->cmdq_bitmap))
  66. goto done;
  67. do {
  68. mdelay(1); /* 1m sec */
  69. bnxt_qplib_service_creq((unsigned long)rcfw);
  70. } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
  71. done:
  72. return count ? 0 : -ETIMEDOUT;
  73. };
  74. static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
  75. struct creq_base *resp, void *sb, u8 is_block)
  76. {
  77. struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
  78. struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
  79. struct bnxt_qplib_crsq *crsqe;
  80. u32 sw_prod, cmdq_prod;
  81. unsigned long flags;
  82. u32 size, opcode;
  83. u16 cookie, cbit;
  84. u8 *preq;
  85. opcode = req->opcode;
  86. if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
  87. (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
  88. opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
  89. opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
  90. dev_err(&rcfw->pdev->dev,
  91. "QPLIB: RCFW not initialized, reject opcode 0x%x",
  92. opcode);
  93. return -EINVAL;
  94. }
  95. if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
  96. opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
  97. dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
  98. return -EINVAL;
  99. }
  100. if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
  101. return -ETIMEDOUT;
  102. /* Cmdq are in 16-byte units, each request can consume 1 or more
  103. * cmdqe
  104. */
  105. spin_lock_irqsave(&cmdq->lock, flags);
  106. if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
  107. dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
  108. spin_unlock_irqrestore(&cmdq->lock, flags);
  109. return -EAGAIN;
  110. }
  111. cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
  112. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  113. if (is_block)
  114. cookie |= RCFW_CMD_IS_BLOCKING;
  115. set_bit(cbit, rcfw->cmdq_bitmap);
  116. req->cookie = cpu_to_le16(cookie);
  117. crsqe = &rcfw->crsqe_tbl[cbit];
  118. if (crsqe->resp) {
  119. spin_unlock_irqrestore(&cmdq->lock, flags);
  120. return -EBUSY;
  121. }
  122. memset(resp, 0, sizeof(*resp));
  123. crsqe->resp = (struct creq_qp_event *)resp;
  124. crsqe->resp->cookie = req->cookie;
  125. crsqe->req_size = req->cmd_size;
  126. if (req->resp_size && sb) {
  127. struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
  128. req->resp_addr = cpu_to_le64(sbuf->dma_addr);
  129. req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
  130. BNXT_QPLIB_CMDQE_UNITS;
  131. }
  132. cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
  133. preq = (u8 *)req;
  134. size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
  135. do {
  136. /* Locate the next cmdq slot */
  137. sw_prod = HWQ_CMP(cmdq->prod, cmdq);
  138. cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
  139. if (!cmdqe) {
  140. dev_err(&rcfw->pdev->dev,
  141. "QPLIB: RCFW request failed with no cmdqe!");
  142. goto done;
  143. }
  144. /* Copy a segment of the req cmd to the cmdq */
  145. memset(cmdqe, 0, sizeof(*cmdqe));
  146. memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
  147. preq += min_t(u32, size, sizeof(*cmdqe));
  148. size -= min_t(u32, size, sizeof(*cmdqe));
  149. cmdq->prod++;
  150. rcfw->seq_num++;
  151. } while (size > 0);
  152. rcfw->seq_num++;
  153. cmdq_prod = cmdq->prod;
  154. if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
  155. /* The very first doorbell write
  156. * is required to set this flag
  157. * which prompts the FW to reset
  158. * its internal pointers
  159. */
  160. cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
  161. clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
  162. }
  163. /* ring CMDQ DB */
  164. wmb();
  165. writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
  166. rcfw->cmdq_bar_reg_prod_off);
  167. writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
  168. rcfw->cmdq_bar_reg_trig_off);
  169. done:
  170. spin_unlock_irqrestore(&cmdq->lock, flags);
  171. /* Return the CREQ response pointer */
  172. return 0;
  173. }
  174. int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
  175. struct cmdq_base *req,
  176. struct creq_base *resp,
  177. void *sb, u8 is_block)
  178. {
  179. struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
  180. u16 cookie;
  181. u8 opcode, retry_cnt = 0xFF;
  182. int rc = 0;
  183. do {
  184. opcode = req->opcode;
  185. rc = __send_message(rcfw, req, resp, sb, is_block);
  186. cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
  187. if (!rc)
  188. break;
  189. if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
  190. /* send failed */
  191. dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
  192. cookie, opcode);
  193. return rc;
  194. }
  195. is_block ? mdelay(1) : usleep_range(500, 1000);
  196. } while (retry_cnt--);
  197. if (is_block)
  198. rc = __block_for_resp(rcfw, cookie);
  199. else
  200. rc = __wait_for_resp(rcfw, cookie);
  201. if (rc) {
  202. /* timed out */
  203. dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
  204. cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
  205. set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
  206. return rc;
  207. }
  208. if (evnt->status) {
  209. /* failed with status */
  210. dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
  211. cookie, opcode, evnt->status);
  212. rc = -EFAULT;
  213. }
  214. return rc;
  215. }
  216. /* Completions */
  217. static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
  218. struct creq_func_event *func_event)
  219. {
  220. switch (func_event->event) {
  221. case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
  222. break;
  223. case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
  224. break;
  225. case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
  226. break;
  227. case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
  228. break;
  229. case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
  230. break;
  231. case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
  232. break;
  233. case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
  234. break;
  235. case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
  236. /* SRQ ctx error, call srq_handler??
  237. * But there's no SRQ handle!
  238. */
  239. break;
  240. case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
  241. break;
  242. case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
  243. break;
  244. case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
  245. break;
  246. case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
  247. break;
  248. case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
  249. break;
  250. default:
  251. return -EINVAL;
  252. }
  253. return 0;
  254. }
  255. static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
  256. struct creq_qp_event *qp_event)
  257. {
  258. struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
  259. struct creq_qp_error_notification *err_event;
  260. struct bnxt_qplib_crsq *crsqe;
  261. unsigned long flags;
  262. struct bnxt_qplib_qp *qp;
  263. u16 cbit, blocked = 0;
  264. u16 cookie;
  265. __le16 mcookie;
  266. u32 qp_id;
  267. switch (qp_event->event) {
  268. case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
  269. err_event = (struct creq_qp_error_notification *)qp_event;
  270. qp_id = le32_to_cpu(err_event->xid);
  271. qp = rcfw->qp_tbl[qp_id].qp_handle;
  272. dev_dbg(&rcfw->pdev->dev,
  273. "QPLIB: Received QP error notification");
  274. dev_dbg(&rcfw->pdev->dev,
  275. "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
  276. qp_id, err_event->req_err_state_reason,
  277. err_event->res_err_state_reason);
  278. if (!qp)
  279. break;
  280. bnxt_qplib_mark_qp_error(qp);
  281. rcfw->aeq_handler(rcfw, qp_event, qp);
  282. break;
  283. default:
  284. /*
  285. * Command Response
  286. * cmdq->lock needs to be acquired to synchronie
  287. * the command send and completion reaping. This function
  288. * is always called with creq->lock held. Using
  289. * the nested variant of spin_lock.
  290. *
  291. */
  292. spin_lock_irqsave_nested(&cmdq->lock, flags,
  293. SINGLE_DEPTH_NESTING);
  294. cookie = le16_to_cpu(qp_event->cookie);
  295. mcookie = qp_event->cookie;
  296. blocked = cookie & RCFW_CMD_IS_BLOCKING;
  297. cookie &= RCFW_MAX_COOKIE_VALUE;
  298. cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
  299. crsqe = &rcfw->crsqe_tbl[cbit];
  300. if (crsqe->resp &&
  301. crsqe->resp->cookie == mcookie) {
  302. memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
  303. crsqe->resp = NULL;
  304. } else {
  305. dev_err(&rcfw->pdev->dev,
  306. "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
  307. crsqe->resp ? "mismatch" : "collision",
  308. crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
  309. }
  310. if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
  311. dev_warn(&rcfw->pdev->dev,
  312. "QPLIB: CMD bit %d was not requested", cbit);
  313. cmdq->cons += crsqe->req_size;
  314. crsqe->req_size = 0;
  315. if (!blocked)
  316. wake_up(&rcfw->waitq);
  317. spin_unlock_irqrestore(&cmdq->lock, flags);
  318. }
  319. return 0;
  320. }
  321. /* SP - CREQ Completion handlers */
  322. static void bnxt_qplib_service_creq(unsigned long data)
  323. {
  324. struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
  325. struct bnxt_qplib_hwq *creq = &rcfw->creq;
  326. struct creq_base *creqe, **creq_ptr;
  327. u32 sw_cons, raw_cons;
  328. unsigned long flags;
  329. u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
  330. /* Service the CREQ until budget is over */
  331. spin_lock_irqsave(&creq->lock, flags);
  332. raw_cons = creq->cons;
  333. while (budget > 0) {
  334. sw_cons = HWQ_CMP(raw_cons, creq);
  335. creq_ptr = (struct creq_base **)creq->pbl_ptr;
  336. creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
  337. if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
  338. break;
  339. /* The valid test of the entry must be done first before
  340. * reading any further.
  341. */
  342. dma_rmb();
  343. type = creqe->type & CREQ_BASE_TYPE_MASK;
  344. switch (type) {
  345. case CREQ_BASE_TYPE_QP_EVENT:
  346. bnxt_qplib_process_qp_event
  347. (rcfw, (struct creq_qp_event *)creqe);
  348. rcfw->creq_qp_event_processed++;
  349. break;
  350. case CREQ_BASE_TYPE_FUNC_EVENT:
  351. if (!bnxt_qplib_process_func_event
  352. (rcfw, (struct creq_func_event *)creqe))
  353. rcfw->creq_func_event_processed++;
  354. else
  355. dev_warn
  356. (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
  357. type);
  358. break;
  359. default:
  360. dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
  361. dev_warn(&rcfw->pdev->dev,
  362. "QPLIB: op_event = 0x%x not handled", type);
  363. break;
  364. }
  365. raw_cons++;
  366. budget--;
  367. }
  368. if (creq->cons != raw_cons) {
  369. creq->cons = raw_cons;
  370. CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
  371. creq->max_elements);
  372. }
  373. spin_unlock_irqrestore(&creq->lock, flags);
  374. }
  375. static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
  376. {
  377. struct bnxt_qplib_rcfw *rcfw = dev_instance;
  378. struct bnxt_qplib_hwq *creq = &rcfw->creq;
  379. struct creq_base **creq_ptr;
  380. u32 sw_cons;
  381. /* Prefetch the CREQ element */
  382. sw_cons = HWQ_CMP(creq->cons, creq);
  383. creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
  384. prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
  385. tasklet_schedule(&rcfw->worker);
  386. return IRQ_HANDLED;
  387. }
  388. /* RCFW */
  389. int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
  390. {
  391. struct cmdq_deinitialize_fw req;
  392. struct creq_deinitialize_fw_resp resp;
  393. u16 cmd_flags = 0;
  394. int rc;
  395. RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
  396. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  397. NULL, 0);
  398. if (rc)
  399. return rc;
  400. clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
  401. return 0;
  402. }
  403. static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
  404. {
  405. return (pbl->pg_size == ROCE_PG_SIZE_4K ?
  406. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
  407. pbl->pg_size == ROCE_PG_SIZE_8K ?
  408. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
  409. pbl->pg_size == ROCE_PG_SIZE_64K ?
  410. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
  411. pbl->pg_size == ROCE_PG_SIZE_2M ?
  412. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
  413. pbl->pg_size == ROCE_PG_SIZE_8M ?
  414. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
  415. pbl->pg_size == ROCE_PG_SIZE_1G ?
  416. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
  417. CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
  418. }
  419. int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
  420. struct bnxt_qplib_ctx *ctx, int is_virtfn)
  421. {
  422. struct cmdq_initialize_fw req;
  423. struct creq_initialize_fw_resp resp;
  424. u16 cmd_flags = 0, level;
  425. int rc;
  426. RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
  427. /* Supply (log-base-2-of-host-page-size - base-page-shift)
  428. * to bono to adjust the doorbell page sizes.
  429. */
  430. req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
  431. RCFW_DBR_BASE_PAGE_SHIFT);
  432. /*
  433. * VFs need not setup the HW context area, PF
  434. * shall setup this area for VF. Skipping the
  435. * HW programming
  436. */
  437. if (is_virtfn)
  438. goto skip_ctx_setup;
  439. level = ctx->qpc_tbl.level;
  440. req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
  441. __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
  442. level = ctx->mrw_tbl.level;
  443. req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
  444. __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
  445. level = ctx->srqc_tbl.level;
  446. req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
  447. __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
  448. level = ctx->cq_tbl.level;
  449. req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
  450. __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
  451. level = ctx->srqc_tbl.level;
  452. req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
  453. __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
  454. level = ctx->cq_tbl.level;
  455. req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
  456. __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
  457. level = ctx->tim_tbl.level;
  458. req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
  459. __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
  460. level = ctx->tqm_pde_level;
  461. req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
  462. __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
  463. req.qpc_page_dir =
  464. cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  465. req.mrw_page_dir =
  466. cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  467. req.srq_page_dir =
  468. cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  469. req.cq_page_dir =
  470. cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  471. req.tim_page_dir =
  472. cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
  473. req.tqm_page_dir =
  474. cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
  475. req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
  476. req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
  477. req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
  478. req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
  479. req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
  480. req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
  481. req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
  482. req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
  483. req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
  484. skip_ctx_setup:
  485. req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
  486. rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
  487. NULL, 0);
  488. if (rc)
  489. return rc;
  490. set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
  491. return 0;
  492. }
  493. void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
  494. {
  495. kfree(rcfw->qp_tbl);
  496. kfree(rcfw->crsqe_tbl);
  497. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
  498. bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
  499. rcfw->pdev = NULL;
  500. }
  501. int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
  502. struct bnxt_qplib_rcfw *rcfw,
  503. int qp_tbl_sz)
  504. {
  505. rcfw->pdev = pdev;
  506. rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
  507. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
  508. &rcfw->creq.max_elements,
  509. BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
  510. HWQ_TYPE_L2_CMPL)) {
  511. dev_err(&rcfw->pdev->dev,
  512. "QPLIB: HW channel CREQ allocation failed");
  513. goto fail;
  514. }
  515. rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
  516. if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
  517. &rcfw->cmdq.max_elements,
  518. BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
  519. HWQ_TYPE_CTX)) {
  520. dev_err(&rcfw->pdev->dev,
  521. "QPLIB: HW channel CMDQ allocation failed");
  522. goto fail;
  523. }
  524. rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
  525. sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
  526. if (!rcfw->crsqe_tbl)
  527. goto fail;
  528. rcfw->qp_tbl_size = qp_tbl_sz;
  529. rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
  530. GFP_KERNEL);
  531. if (!rcfw->qp_tbl)
  532. goto fail;
  533. return 0;
  534. fail:
  535. bnxt_qplib_free_rcfw_channel(rcfw);
  536. return -ENOMEM;
  537. }
  538. void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
  539. {
  540. tasklet_disable(&rcfw->worker);
  541. /* Mask h/w interrupts */
  542. CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
  543. rcfw->creq.max_elements);
  544. /* Sync with last running IRQ-handler */
  545. synchronize_irq(rcfw->vector);
  546. if (kill)
  547. tasklet_kill(&rcfw->worker);
  548. if (rcfw->requested) {
  549. free_irq(rcfw->vector, rcfw);
  550. rcfw->requested = false;
  551. }
  552. }
  553. void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
  554. {
  555. unsigned long indx;
  556. bnxt_qplib_rcfw_stop_irq(rcfw, true);
  557. iounmap(rcfw->cmdq_bar_reg_iomem);
  558. iounmap(rcfw->creq_bar_reg_iomem);
  559. indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
  560. if (indx != rcfw->bmap_size)
  561. dev_err(&rcfw->pdev->dev,
  562. "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
  563. kfree(rcfw->cmdq_bitmap);
  564. rcfw->bmap_size = 0;
  565. rcfw->cmdq_bar_reg_iomem = NULL;
  566. rcfw->creq_bar_reg_iomem = NULL;
  567. rcfw->aeq_handler = NULL;
  568. rcfw->vector = 0;
  569. }
  570. int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
  571. bool need_init)
  572. {
  573. int rc;
  574. if (rcfw->requested)
  575. return -EFAULT;
  576. rcfw->vector = msix_vector;
  577. if (need_init)
  578. tasklet_init(&rcfw->worker,
  579. bnxt_qplib_service_creq, (unsigned long)rcfw);
  580. else
  581. tasklet_enable(&rcfw->worker);
  582. rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
  583. "bnxt_qplib_creq", rcfw);
  584. if (rc)
  585. return rc;
  586. rcfw->requested = true;
  587. CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
  588. rcfw->creq.max_elements);
  589. return 0;
  590. }
  591. int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
  592. struct bnxt_qplib_rcfw *rcfw,
  593. int msix_vector,
  594. int cp_bar_reg_off, int virt_fn,
  595. int (*aeq_handler)(struct bnxt_qplib_rcfw *,
  596. void *, void *))
  597. {
  598. resource_size_t res_base;
  599. struct cmdq_init init;
  600. u16 bmap_size;
  601. int rc;
  602. /* General */
  603. rcfw->seq_num = 0;
  604. set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
  605. bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
  606. sizeof(unsigned long));
  607. rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
  608. if (!rcfw->cmdq_bitmap)
  609. return -ENOMEM;
  610. rcfw->bmap_size = bmap_size;
  611. /* CMDQ */
  612. rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
  613. res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
  614. if (!res_base)
  615. return -ENOMEM;
  616. rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
  617. RCFW_COMM_BASE_OFFSET,
  618. RCFW_COMM_SIZE);
  619. if (!rcfw->cmdq_bar_reg_iomem) {
  620. dev_err(&rcfw->pdev->dev,
  621. "QPLIB: CMDQ BAR region %d mapping failed",
  622. rcfw->cmdq_bar_reg);
  623. return -ENOMEM;
  624. }
  625. rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
  626. RCFW_PF_COMM_PROD_OFFSET;
  627. rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
  628. /* CREQ */
  629. rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
  630. res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
  631. if (!res_base)
  632. dev_err(&rcfw->pdev->dev,
  633. "QPLIB: CREQ BAR region %d resc start is 0!",
  634. rcfw->creq_bar_reg);
  635. rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
  636. 4);
  637. if (!rcfw->creq_bar_reg_iomem) {
  638. dev_err(&rcfw->pdev->dev,
  639. "QPLIB: CREQ BAR region %d mapping failed",
  640. rcfw->creq_bar_reg);
  641. iounmap(rcfw->cmdq_bar_reg_iomem);
  642. rcfw->cmdq_bar_reg_iomem = NULL;
  643. return -ENOMEM;
  644. }
  645. rcfw->creq_qp_event_processed = 0;
  646. rcfw->creq_func_event_processed = 0;
  647. if (aeq_handler)
  648. rcfw->aeq_handler = aeq_handler;
  649. init_waitqueue_head(&rcfw->waitq);
  650. rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
  651. if (rc) {
  652. dev_err(&rcfw->pdev->dev,
  653. "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
  654. bnxt_qplib_disable_rcfw_channel(rcfw);
  655. return rc;
  656. }
  657. init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
  658. init.cmdq_size_cmdq_lvl = cpu_to_le16(
  659. ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
  660. CMDQ_INIT_CMDQ_SIZE_MASK) |
  661. ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
  662. CMDQ_INIT_CMDQ_LVL_MASK));
  663. init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
  664. /* Write to the Bono mailbox register */
  665. __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
  666. return 0;
  667. }
  668. struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
  669. struct bnxt_qplib_rcfw *rcfw,
  670. u32 size)
  671. {
  672. struct bnxt_qplib_rcfw_sbuf *sbuf;
  673. sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
  674. if (!sbuf)
  675. return NULL;
  676. sbuf->size = size;
  677. sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
  678. &sbuf->dma_addr, GFP_ATOMIC);
  679. if (!sbuf->sb)
  680. goto bail;
  681. return sbuf;
  682. bail:
  683. kfree(sbuf);
  684. return NULL;
  685. }
  686. void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
  687. struct bnxt_qplib_rcfw_sbuf *sbuf)
  688. {
  689. if (sbuf->sb)
  690. dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
  691. sbuf->sb, sbuf->dma_addr);
  692. kfree(sbuf);
  693. }