usnic_ib_qp_grp.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/bug.h>
  34. #include <linux/errno.h>
  35. #include <linux/module.h>
  36. #include <linux/spinlock.h>
  37. #include "usnic_log.h"
  38. #include "usnic_vnic.h"
  39. #include "usnic_fwd.h"
  40. #include "usnic_uiom.h"
  41. #include "usnic_debugfs.h"
  42. #include "usnic_ib_qp_grp.h"
  43. #include "usnic_ib_sysfs.h"
  44. #include "usnic_transport.h"
  45. #define DFLT_RQ_IDX 0
  46. const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
  47. {
  48. switch (state) {
  49. case IB_QPS_RESET:
  50. return "Rst";
  51. case IB_QPS_INIT:
  52. return "Init";
  53. case IB_QPS_RTR:
  54. return "RTR";
  55. case IB_QPS_RTS:
  56. return "RTS";
  57. case IB_QPS_SQD:
  58. return "SQD";
  59. case IB_QPS_SQE:
  60. return "SQE";
  61. case IB_QPS_ERR:
  62. return "ERR";
  63. default:
  64. return "UNKNOWN STATE";
  65. }
  66. }
  67. int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
  68. {
  69. return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
  70. }
  71. int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
  72. {
  73. struct usnic_ib_qp_grp *qp_grp = obj;
  74. struct usnic_ib_qp_grp_flow *default_flow;
  75. if (obj) {
  76. default_flow = list_first_entry(&qp_grp->flows_lst,
  77. struct usnic_ib_qp_grp_flow, link);
  78. return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
  79. qp_grp->ibqp.qp_num,
  80. usnic_ib_qp_grp_state_to_string(
  81. qp_grp->state),
  82. qp_grp->owner_pid,
  83. usnic_vnic_get_index(qp_grp->vf->vnic),
  84. default_flow->flow->flow_id);
  85. } else {
  86. return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
  87. }
  88. }
  89. static struct usnic_vnic_res_chunk *
  90. get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
  91. {
  92. lockdep_assert_held(&qp_grp->lock);
  93. /*
  94. * The QP res chunk, used to derive qp indices,
  95. * are just indices of the RQs
  96. */
  97. return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  98. }
  99. static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  100. {
  101. int status;
  102. int i, vnic_idx;
  103. struct usnic_vnic_res_chunk *res_chunk;
  104. struct usnic_vnic_res *res;
  105. lockdep_assert_held(&qp_grp->lock);
  106. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  107. res_chunk = get_qp_res_chunk(qp_grp);
  108. if (IS_ERR(res_chunk)) {
  109. usnic_err("Unable to get qp res with err %ld\n",
  110. PTR_ERR(res_chunk));
  111. return PTR_ERR(res_chunk);
  112. }
  113. for (i = 0; i < res_chunk->cnt; i++) {
  114. res = res_chunk->res[i];
  115. status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
  116. res->vnic_idx);
  117. if (status) {
  118. usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
  119. res->vnic_idx, qp_grp->ufdev->name,
  120. vnic_idx, status);
  121. goto out_err;
  122. }
  123. }
  124. return 0;
  125. out_err:
  126. for (i--; i >= 0; i--) {
  127. res = res_chunk->res[i];
  128. usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  129. res->vnic_idx);
  130. }
  131. return status;
  132. }
  133. static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
  134. {
  135. int i, vnic_idx;
  136. struct usnic_vnic_res_chunk *res_chunk;
  137. struct usnic_vnic_res *res;
  138. int status = 0;
  139. lockdep_assert_held(&qp_grp->lock);
  140. vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  141. res_chunk = get_qp_res_chunk(qp_grp);
  142. if (IS_ERR(res_chunk)) {
  143. usnic_err("Unable to get qp res with err %ld\n",
  144. PTR_ERR(res_chunk));
  145. return PTR_ERR(res_chunk);
  146. }
  147. for (i = 0; i < res_chunk->cnt; i++) {
  148. res = res_chunk->res[i];
  149. status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
  150. res->vnic_idx);
  151. if (status) {
  152. usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
  153. res->vnic_idx,
  154. qp_grp->ufdev->name,
  155. vnic_idx, status);
  156. }
  157. }
  158. return status;
  159. }
  160. static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
  161. struct usnic_filter_action *uaction)
  162. {
  163. struct usnic_vnic_res_chunk *res_chunk;
  164. res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  165. if (IS_ERR(res_chunk)) {
  166. usnic_err("Unable to get %s with err %ld\n",
  167. usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  168. PTR_ERR(res_chunk));
  169. return PTR_ERR(res_chunk);
  170. }
  171. uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
  172. uaction->action.type = FILTER_ACTION_RQ_STEERING;
  173. uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
  174. return 0;
  175. }
  176. static struct usnic_ib_qp_grp_flow*
  177. create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
  178. struct usnic_transport_spec *trans_spec)
  179. {
  180. uint16_t port_num;
  181. int err;
  182. struct filter filter;
  183. struct usnic_filter_action uaction;
  184. struct usnic_ib_qp_grp_flow *qp_flow;
  185. struct usnic_fwd_flow *flow;
  186. enum usnic_transport_type trans_type;
  187. trans_type = trans_spec->trans_type;
  188. port_num = trans_spec->usnic_roce.port_num;
  189. /* Reserve Port */
  190. port_num = usnic_transport_rsrv_port(trans_type, port_num);
  191. if (port_num == 0)
  192. return ERR_PTR(-EINVAL);
  193. /* Create Flow */
  194. usnic_fwd_init_usnic_filter(&filter, port_num);
  195. err = init_filter_action(qp_grp, &uaction);
  196. if (err)
  197. goto out_unreserve_port;
  198. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  199. if (IS_ERR_OR_NULL(flow)) {
  200. err = flow ? PTR_ERR(flow) : -EFAULT;
  201. goto out_unreserve_port;
  202. }
  203. /* Create Flow Handle */
  204. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  205. if (!qp_flow) {
  206. err = -ENOMEM;
  207. goto out_dealloc_flow;
  208. }
  209. qp_flow->flow = flow;
  210. qp_flow->trans_type = trans_type;
  211. qp_flow->usnic_roce.port_num = port_num;
  212. qp_flow->qp_grp = qp_grp;
  213. return qp_flow;
  214. out_dealloc_flow:
  215. usnic_fwd_dealloc_flow(flow);
  216. out_unreserve_port:
  217. usnic_transport_unrsrv_port(trans_type, port_num);
  218. return ERR_PTR(err);
  219. }
  220. static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  221. {
  222. usnic_fwd_dealloc_flow(qp_flow->flow);
  223. usnic_transport_unrsrv_port(qp_flow->trans_type,
  224. qp_flow->usnic_roce.port_num);
  225. kfree(qp_flow);
  226. }
  227. static struct usnic_ib_qp_grp_flow*
  228. create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
  229. struct usnic_transport_spec *trans_spec)
  230. {
  231. struct socket *sock;
  232. int sock_fd;
  233. int err;
  234. struct filter filter;
  235. struct usnic_filter_action uaction;
  236. struct usnic_ib_qp_grp_flow *qp_flow;
  237. struct usnic_fwd_flow *flow;
  238. enum usnic_transport_type trans_type;
  239. uint32_t addr;
  240. uint16_t port_num;
  241. int proto;
  242. trans_type = trans_spec->trans_type;
  243. sock_fd = trans_spec->udp.sock_fd;
  244. /* Get and check socket */
  245. sock = usnic_transport_get_socket(sock_fd);
  246. if (IS_ERR_OR_NULL(sock))
  247. return ERR_CAST(sock);
  248. err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
  249. if (err)
  250. goto out_put_sock;
  251. if (proto != IPPROTO_UDP) {
  252. usnic_err("Protocol for fd %d is not UDP", sock_fd);
  253. err = -EPERM;
  254. goto out_put_sock;
  255. }
  256. /* Create flow */
  257. usnic_fwd_init_udp_filter(&filter, addr, port_num);
  258. err = init_filter_action(qp_grp, &uaction);
  259. if (err)
  260. goto out_put_sock;
  261. flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
  262. if (IS_ERR_OR_NULL(flow)) {
  263. err = flow ? PTR_ERR(flow) : -EFAULT;
  264. goto out_put_sock;
  265. }
  266. /* Create qp_flow */
  267. qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
  268. if (!qp_flow) {
  269. err = -ENOMEM;
  270. goto out_dealloc_flow;
  271. }
  272. qp_flow->flow = flow;
  273. qp_flow->trans_type = trans_type;
  274. qp_flow->udp.sock = sock;
  275. qp_flow->qp_grp = qp_grp;
  276. return qp_flow;
  277. out_dealloc_flow:
  278. usnic_fwd_dealloc_flow(flow);
  279. out_put_sock:
  280. usnic_transport_put_socket(sock);
  281. return ERR_PTR(err);
  282. }
  283. static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  284. {
  285. usnic_fwd_dealloc_flow(qp_flow->flow);
  286. usnic_transport_put_socket(qp_flow->udp.sock);
  287. kfree(qp_flow);
  288. }
  289. static struct usnic_ib_qp_grp_flow*
  290. create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
  291. struct usnic_transport_spec *trans_spec)
  292. {
  293. struct usnic_ib_qp_grp_flow *qp_flow;
  294. enum usnic_transport_type trans_type;
  295. trans_type = trans_spec->trans_type;
  296. switch (trans_type) {
  297. case USNIC_TRANSPORT_ROCE_CUSTOM:
  298. qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
  299. break;
  300. case USNIC_TRANSPORT_IPV4_UDP:
  301. qp_flow = create_udp_flow(qp_grp, trans_spec);
  302. break;
  303. default:
  304. usnic_err("Unsupported transport %u\n",
  305. trans_spec->trans_type);
  306. return ERR_PTR(-EINVAL);
  307. }
  308. if (!IS_ERR_OR_NULL(qp_flow)) {
  309. list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
  310. usnic_debugfs_flow_add(qp_flow);
  311. }
  312. return qp_flow;
  313. }
  314. static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
  315. {
  316. usnic_debugfs_flow_remove(qp_flow);
  317. list_del(&qp_flow->link);
  318. switch (qp_flow->trans_type) {
  319. case USNIC_TRANSPORT_ROCE_CUSTOM:
  320. release_roce_custom_flow(qp_flow);
  321. break;
  322. case USNIC_TRANSPORT_IPV4_UDP:
  323. release_udp_flow(qp_flow);
  324. break;
  325. default:
  326. WARN(1, "Unsupported transport %u\n",
  327. qp_flow->trans_type);
  328. break;
  329. }
  330. }
  331. static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
  332. {
  333. struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
  334. list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
  335. release_and_remove_flow(qp_flow);
  336. }
  337. int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
  338. enum ib_qp_state new_state,
  339. void *data)
  340. {
  341. int status = 0;
  342. struct ib_event ib_event;
  343. enum ib_qp_state old_state;
  344. struct usnic_transport_spec *trans_spec;
  345. struct usnic_ib_qp_grp_flow *qp_flow;
  346. old_state = qp_grp->state;
  347. trans_spec = (struct usnic_transport_spec *) data;
  348. spin_lock(&qp_grp->lock);
  349. switch (new_state) {
  350. case IB_QPS_RESET:
  351. switch (old_state) {
  352. case IB_QPS_RESET:
  353. /* NO-OP */
  354. break;
  355. case IB_QPS_INIT:
  356. release_and_remove_all_flows(qp_grp);
  357. status = 0;
  358. break;
  359. case IB_QPS_RTR:
  360. case IB_QPS_RTS:
  361. case IB_QPS_ERR:
  362. status = disable_qp_grp(qp_grp);
  363. release_and_remove_all_flows(qp_grp);
  364. break;
  365. default:
  366. status = -EINVAL;
  367. }
  368. break;
  369. case IB_QPS_INIT:
  370. switch (old_state) {
  371. case IB_QPS_RESET:
  372. if (trans_spec) {
  373. qp_flow = create_and_add_flow(qp_grp,
  374. trans_spec);
  375. if (IS_ERR_OR_NULL(qp_flow)) {
  376. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  377. break;
  378. }
  379. } else {
  380. /*
  381. * Optional to specify filters.
  382. */
  383. status = 0;
  384. }
  385. break;
  386. case IB_QPS_INIT:
  387. if (trans_spec) {
  388. qp_flow = create_and_add_flow(qp_grp,
  389. trans_spec);
  390. if (IS_ERR_OR_NULL(qp_flow)) {
  391. status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  392. break;
  393. }
  394. } else {
  395. /*
  396. * Doesn't make sense to go into INIT state
  397. * from INIT state w/o adding filters.
  398. */
  399. status = -EINVAL;
  400. }
  401. break;
  402. case IB_QPS_RTR:
  403. status = disable_qp_grp(qp_grp);
  404. break;
  405. case IB_QPS_RTS:
  406. status = disable_qp_grp(qp_grp);
  407. break;
  408. default:
  409. status = -EINVAL;
  410. }
  411. break;
  412. case IB_QPS_RTR:
  413. switch (old_state) {
  414. case IB_QPS_INIT:
  415. status = enable_qp_grp(qp_grp);
  416. break;
  417. default:
  418. status = -EINVAL;
  419. }
  420. break;
  421. case IB_QPS_RTS:
  422. switch (old_state) {
  423. case IB_QPS_RTR:
  424. /* NO-OP FOR NOW */
  425. break;
  426. default:
  427. status = -EINVAL;
  428. }
  429. break;
  430. case IB_QPS_ERR:
  431. ib_event.device = &qp_grp->vf->pf->ib_dev;
  432. ib_event.element.qp = &qp_grp->ibqp;
  433. ib_event.event = IB_EVENT_QP_FATAL;
  434. switch (old_state) {
  435. case IB_QPS_RESET:
  436. qp_grp->ibqp.event_handler(&ib_event,
  437. qp_grp->ibqp.qp_context);
  438. break;
  439. case IB_QPS_INIT:
  440. release_and_remove_all_flows(qp_grp);
  441. qp_grp->ibqp.event_handler(&ib_event,
  442. qp_grp->ibqp.qp_context);
  443. break;
  444. case IB_QPS_RTR:
  445. case IB_QPS_RTS:
  446. status = disable_qp_grp(qp_grp);
  447. release_and_remove_all_flows(qp_grp);
  448. qp_grp->ibqp.event_handler(&ib_event,
  449. qp_grp->ibqp.qp_context);
  450. break;
  451. default:
  452. status = -EINVAL;
  453. }
  454. break;
  455. default:
  456. status = -EINVAL;
  457. }
  458. spin_unlock(&qp_grp->lock);
  459. if (!status) {
  460. qp_grp->state = new_state;
  461. usnic_info("Transitioned %u from %s to %s",
  462. qp_grp->grp_id,
  463. usnic_ib_qp_grp_state_to_string(old_state),
  464. usnic_ib_qp_grp_state_to_string(new_state));
  465. } else {
  466. usnic_err("Failed to transition %u from %s to %s",
  467. qp_grp->grp_id,
  468. usnic_ib_qp_grp_state_to_string(old_state),
  469. usnic_ib_qp_grp_state_to_string(new_state));
  470. }
  471. return status;
  472. }
  473. static struct usnic_vnic_res_chunk**
  474. alloc_res_chunk_list(struct usnic_vnic *vnic,
  475. struct usnic_vnic_res_spec *res_spec, void *owner_obj)
  476. {
  477. enum usnic_vnic_res_type res_type;
  478. struct usnic_vnic_res_chunk **res_chunk_list;
  479. int err, i, res_cnt, res_lst_sz;
  480. for (res_lst_sz = 0;
  481. res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
  482. res_lst_sz++) {
  483. /* Do Nothing */
  484. }
  485. res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list),
  486. GFP_ATOMIC);
  487. if (!res_chunk_list)
  488. return ERR_PTR(-ENOMEM);
  489. for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
  490. i++) {
  491. res_type = res_spec->resources[i].type;
  492. res_cnt = res_spec->resources[i].cnt;
  493. res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
  494. res_cnt, owner_obj);
  495. if (IS_ERR_OR_NULL(res_chunk_list[i])) {
  496. err = res_chunk_list[i] ?
  497. PTR_ERR(res_chunk_list[i]) : -ENOMEM;
  498. usnic_err("Failed to get %s from %s with err %d\n",
  499. usnic_vnic_res_type_to_str(res_type),
  500. usnic_vnic_pci_name(vnic),
  501. err);
  502. goto out_free_res;
  503. }
  504. }
  505. return res_chunk_list;
  506. out_free_res:
  507. for (i--; i >= 0; i--)
  508. usnic_vnic_put_resources(res_chunk_list[i]);
  509. kfree(res_chunk_list);
  510. return ERR_PTR(err);
  511. }
  512. static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
  513. {
  514. int i;
  515. for (i = 0; res_chunk_list[i]; i++)
  516. usnic_vnic_put_resources(res_chunk_list[i]);
  517. kfree(res_chunk_list);
  518. }
  519. static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
  520. struct usnic_ib_pd *pd,
  521. struct usnic_ib_qp_grp *qp_grp)
  522. {
  523. int err;
  524. struct pci_dev *pdev;
  525. lockdep_assert_held(&vf->lock);
  526. pdev = usnic_vnic_get_pdev(vf->vnic);
  527. if (vf->qp_grp_ref_cnt == 0) {
  528. err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
  529. if (err) {
  530. usnic_err("Failed to attach %s to domain\n",
  531. pci_name(pdev));
  532. return err;
  533. }
  534. vf->pd = pd;
  535. }
  536. vf->qp_grp_ref_cnt++;
  537. WARN_ON(vf->pd != pd);
  538. qp_grp->vf = vf;
  539. return 0;
  540. }
  541. static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
  542. {
  543. struct pci_dev *pdev;
  544. struct usnic_ib_pd *pd;
  545. lockdep_assert_held(&qp_grp->vf->lock);
  546. pd = qp_grp->vf->pd;
  547. pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  548. if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
  549. qp_grp->vf->pd = NULL;
  550. usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
  551. }
  552. qp_grp->vf = NULL;
  553. }
  554. static void log_spec(struct usnic_vnic_res_spec *res_spec)
  555. {
  556. char buf[512];
  557. usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
  558. usnic_dbg("%s\n", buf);
  559. }
  560. static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
  561. uint32_t *id)
  562. {
  563. enum usnic_transport_type trans_type = qp_flow->trans_type;
  564. int err;
  565. uint16_t port_num = 0;
  566. switch (trans_type) {
  567. case USNIC_TRANSPORT_ROCE_CUSTOM:
  568. *id = qp_flow->usnic_roce.port_num;
  569. break;
  570. case USNIC_TRANSPORT_IPV4_UDP:
  571. err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
  572. NULL, NULL,
  573. &port_num);
  574. if (err)
  575. return err;
  576. /*
  577. * Copy port_num to stack first and then to *id,
  578. * so that the short to int cast works for little
  579. * and big endian systems.
  580. */
  581. *id = port_num;
  582. break;
  583. default:
  584. usnic_err("Unsupported transport %u\n", trans_type);
  585. return -EINVAL;
  586. }
  587. return 0;
  588. }
  589. struct usnic_ib_qp_grp *
  590. usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
  591. struct usnic_ib_pd *pd,
  592. struct usnic_vnic_res_spec *res_spec,
  593. struct usnic_transport_spec *transport_spec)
  594. {
  595. struct usnic_ib_qp_grp *qp_grp;
  596. int err;
  597. enum usnic_transport_type transport = transport_spec->trans_type;
  598. struct usnic_ib_qp_grp_flow *qp_flow;
  599. lockdep_assert_held(&vf->lock);
  600. err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
  601. res_spec);
  602. if (err) {
  603. usnic_err("Spec does not meet miniumum req for transport %d\n",
  604. transport);
  605. log_spec(res_spec);
  606. return ERR_PTR(err);
  607. }
  608. qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
  609. if (!qp_grp)
  610. return NULL;
  611. qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
  612. qp_grp);
  613. if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
  614. err = qp_grp->res_chunk_list ?
  615. PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
  616. goto out_free_qp_grp;
  617. }
  618. err = qp_grp_and_vf_bind(vf, pd, qp_grp);
  619. if (err)
  620. goto out_free_res;
  621. INIT_LIST_HEAD(&qp_grp->flows_lst);
  622. spin_lock_init(&qp_grp->lock);
  623. qp_grp->ufdev = ufdev;
  624. qp_grp->state = IB_QPS_RESET;
  625. qp_grp->owner_pid = current->pid;
  626. qp_flow = create_and_add_flow(qp_grp, transport_spec);
  627. if (IS_ERR_OR_NULL(qp_flow)) {
  628. usnic_err("Unable to create and add flow with err %ld\n",
  629. PTR_ERR(qp_flow));
  630. err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
  631. goto out_qp_grp_vf_unbind;
  632. }
  633. err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
  634. if (err)
  635. goto out_release_flow;
  636. qp_grp->ibqp.qp_num = qp_grp->grp_id;
  637. usnic_ib_sysfs_qpn_add(qp_grp);
  638. return qp_grp;
  639. out_release_flow:
  640. release_and_remove_flow(qp_flow);
  641. out_qp_grp_vf_unbind:
  642. qp_grp_and_vf_unbind(qp_grp);
  643. out_free_res:
  644. free_qp_grp_res(qp_grp->res_chunk_list);
  645. out_free_qp_grp:
  646. kfree(qp_grp);
  647. return ERR_PTR(err);
  648. }
  649. void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
  650. {
  651. WARN_ON(qp_grp->state != IB_QPS_RESET);
  652. lockdep_assert_held(&qp_grp->vf->lock);
  653. release_and_remove_all_flows(qp_grp);
  654. usnic_ib_sysfs_qpn_remove(qp_grp);
  655. qp_grp_and_vf_unbind(qp_grp);
  656. free_qp_grp_res(qp_grp->res_chunk_list);
  657. kfree(qp_grp);
  658. }
  659. struct usnic_vnic_res_chunk*
  660. usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
  661. enum usnic_vnic_res_type res_type)
  662. {
  663. int i;
  664. for (i = 0; qp_grp->res_chunk_list[i]; i++) {
  665. if (qp_grp->res_chunk_list[i]->type == res_type)
  666. return qp_grp->res_chunk_list[i];
  667. }
  668. return ERR_PTR(-EINVAL);
  669. }