hns_roce_mr.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * Copyright (c) 2016 Hisilicon Limited.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/platform_device.h>
  34. #include <linux/vmalloc.h>
  35. #include <rdma/ib_umem.h>
  36. #include "hns_roce_device.h"
  37. #include "hns_roce_cmd.h"
  38. #include "hns_roce_hem.h"
  39. static u32 hw_index_to_key(unsigned long ind)
  40. {
  41. return (u32)(ind >> 24) | (ind << 8);
  42. }
  43. unsigned long key_to_hw_index(u32 key)
  44. {
  45. return (key << 24) | (key >> 8);
  46. }
  47. EXPORT_SYMBOL_GPL(key_to_hw_index);
  48. static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
  49. struct hns_roce_cmd_mailbox *mailbox,
  50. unsigned long mpt_index)
  51. {
  52. return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
  53. HNS_ROCE_CMD_SW2HW_MPT,
  54. HNS_ROCE_CMD_TIMEOUT_MSECS);
  55. }
  56. int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
  57. struct hns_roce_cmd_mailbox *mailbox,
  58. unsigned long mpt_index)
  59. {
  60. return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
  61. mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
  62. HNS_ROCE_CMD_TIMEOUT_MSECS);
  63. }
  64. EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
  65. static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
  66. unsigned long *seg)
  67. {
  68. int o;
  69. u32 m;
  70. spin_lock(&buddy->lock);
  71. for (o = order; o <= buddy->max_order; ++o) {
  72. if (buddy->num_free[o]) {
  73. m = 1 << (buddy->max_order - o);
  74. *seg = find_first_bit(buddy->bits[o], m);
  75. if (*seg < m)
  76. goto found;
  77. }
  78. }
  79. spin_unlock(&buddy->lock);
  80. return -1;
  81. found:
  82. clear_bit(*seg, buddy->bits[o]);
  83. --buddy->num_free[o];
  84. while (o > order) {
  85. --o;
  86. *seg <<= 1;
  87. set_bit(*seg ^ 1, buddy->bits[o]);
  88. ++buddy->num_free[o];
  89. }
  90. spin_unlock(&buddy->lock);
  91. *seg <<= order;
  92. return 0;
  93. }
  94. static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
  95. int order)
  96. {
  97. seg >>= order;
  98. spin_lock(&buddy->lock);
  99. while (test_bit(seg ^ 1, buddy->bits[order])) {
  100. clear_bit(seg ^ 1, buddy->bits[order]);
  101. --buddy->num_free[order];
  102. seg >>= 1;
  103. ++order;
  104. }
  105. set_bit(seg, buddy->bits[order]);
  106. ++buddy->num_free[order];
  107. spin_unlock(&buddy->lock);
  108. }
  109. static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
  110. {
  111. int i, s;
  112. buddy->max_order = max_order;
  113. spin_lock_init(&buddy->lock);
  114. buddy->bits = kcalloc(buddy->max_order + 1,
  115. sizeof(*buddy->bits),
  116. GFP_KERNEL);
  117. buddy->num_free = kcalloc(buddy->max_order + 1,
  118. sizeof(*buddy->num_free),
  119. GFP_KERNEL);
  120. if (!buddy->bits || !buddy->num_free)
  121. goto err_out;
  122. for (i = 0; i <= buddy->max_order; ++i) {
  123. s = BITS_TO_LONGS(1 << (buddy->max_order - i));
  124. buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
  125. __GFP_NOWARN);
  126. if (!buddy->bits[i]) {
  127. buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
  128. if (!buddy->bits[i])
  129. goto err_out_free;
  130. }
  131. }
  132. set_bit(0, buddy->bits[buddy->max_order]);
  133. buddy->num_free[buddy->max_order] = 1;
  134. return 0;
  135. err_out_free:
  136. for (i = 0; i <= buddy->max_order; ++i)
  137. kvfree(buddy->bits[i]);
  138. err_out:
  139. kfree(buddy->bits);
  140. kfree(buddy->num_free);
  141. return -ENOMEM;
  142. }
  143. static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
  144. {
  145. int i;
  146. for (i = 0; i <= buddy->max_order; ++i)
  147. kvfree(buddy->bits[i]);
  148. kfree(buddy->bits);
  149. kfree(buddy->num_free);
  150. }
  151. static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
  152. unsigned long *seg, u32 mtt_type)
  153. {
  154. struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
  155. struct hns_roce_hem_table *table;
  156. struct hns_roce_buddy *buddy;
  157. int ret;
  158. if (mtt_type == MTT_TYPE_WQE) {
  159. buddy = &mr_table->mtt_buddy;
  160. table = &mr_table->mtt_table;
  161. } else {
  162. buddy = &mr_table->mtt_cqe_buddy;
  163. table = &mr_table->mtt_cqe_table;
  164. }
  165. ret = hns_roce_buddy_alloc(buddy, order, seg);
  166. if (ret == -1)
  167. return -1;
  168. if (hns_roce_table_get_range(hr_dev, table, *seg,
  169. *seg + (1 << order) - 1)) {
  170. hns_roce_buddy_free(buddy, *seg, order);
  171. return -1;
  172. }
  173. return 0;
  174. }
  175. int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
  176. struct hns_roce_mtt *mtt)
  177. {
  178. int ret;
  179. int i;
  180. /* Page num is zero, correspond to DMA memory register */
  181. if (!npages) {
  182. mtt->order = -1;
  183. mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
  184. return 0;
  185. }
  186. /* Note: if page_shift is zero, FAST memory register */
  187. mtt->page_shift = page_shift;
  188. /* Compute MTT entry necessary */
  189. for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
  190. i <<= 1)
  191. ++mtt->order;
  192. /* Allocate MTT entry */
  193. ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
  194. mtt->mtt_type);
  195. if (ret == -1)
  196. return -ENOMEM;
  197. return 0;
  198. }
  199. void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
  200. {
  201. struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
  202. if (mtt->order < 0)
  203. return;
  204. if (mtt->mtt_type == MTT_TYPE_WQE) {
  205. hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
  206. mtt->order);
  207. hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
  208. mtt->first_seg,
  209. mtt->first_seg + (1 << mtt->order) - 1);
  210. } else {
  211. hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
  212. mtt->order);
  213. hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
  214. mtt->first_seg,
  215. mtt->first_seg + (1 << mtt->order) - 1);
  216. }
  217. }
  218. EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
  219. static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
  220. struct hns_roce_mr *mr, int err_loop_index,
  221. int loop_i, int loop_j)
  222. {
  223. struct device *dev = hr_dev->dev;
  224. u32 mhop_num;
  225. u32 pbl_bt_sz;
  226. u64 bt_idx;
  227. int i, j;
  228. pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
  229. mhop_num = hr_dev->caps.pbl_hop_num;
  230. i = loop_i;
  231. if (mhop_num == 3 && err_loop_index == 2) {
  232. for (; i >= 0; i--) {
  233. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
  234. mr->pbl_l1_dma_addr[i]);
  235. for (j = 0; j < pbl_bt_sz / 8; j++) {
  236. if (i == loop_i && j >= loop_j)
  237. break;
  238. bt_idx = i * pbl_bt_sz / 8 + j;
  239. dma_free_coherent(dev, pbl_bt_sz,
  240. mr->pbl_bt_l2[bt_idx],
  241. mr->pbl_l2_dma_addr[bt_idx]);
  242. }
  243. }
  244. } else if (mhop_num == 3 && err_loop_index == 1) {
  245. for (i -= 1; i >= 0; i--) {
  246. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
  247. mr->pbl_l1_dma_addr[i]);
  248. for (j = 0; j < pbl_bt_sz / 8; j++) {
  249. bt_idx = i * pbl_bt_sz / 8 + j;
  250. dma_free_coherent(dev, pbl_bt_sz,
  251. mr->pbl_bt_l2[bt_idx],
  252. mr->pbl_l2_dma_addr[bt_idx]);
  253. }
  254. }
  255. } else if (mhop_num == 2 && err_loop_index == 1) {
  256. for (i -= 1; i >= 0; i--)
  257. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
  258. mr->pbl_l1_dma_addr[i]);
  259. } else {
  260. dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
  261. mhop_num, err_loop_index);
  262. return;
  263. }
  264. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
  265. mr->pbl_bt_l0 = NULL;
  266. mr->pbl_l0_dma_addr = 0;
  267. }
  268. /* PBL multi hop addressing */
  269. static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
  270. struct hns_roce_mr *mr)
  271. {
  272. struct device *dev = hr_dev->dev;
  273. int mr_alloc_done = 0;
  274. int npages_allocated;
  275. int i = 0, j = 0;
  276. u32 pbl_bt_sz;
  277. u32 mhop_num;
  278. u64 pbl_last_bt_num;
  279. u64 pbl_bt_cnt = 0;
  280. u64 bt_idx;
  281. u64 size;
  282. mhop_num = hr_dev->caps.pbl_hop_num;
  283. pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
  284. pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
  285. if (mhop_num == HNS_ROCE_HOP_NUM_0)
  286. return 0;
  287. /* hop_num = 1 */
  288. if (mhop_num == 1) {
  289. if (npages > pbl_bt_sz / 8) {
  290. dev_err(dev, "npages %d is larger than buf_pg_sz!",
  291. npages);
  292. return -EINVAL;
  293. }
  294. mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
  295. &(mr->pbl_dma_addr),
  296. GFP_KERNEL);
  297. if (!mr->pbl_buf)
  298. return -ENOMEM;
  299. mr->pbl_size = npages;
  300. mr->pbl_ba = mr->pbl_dma_addr;
  301. mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
  302. mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
  303. mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
  304. return 0;
  305. }
  306. mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
  307. sizeof(*mr->pbl_l1_dma_addr),
  308. GFP_KERNEL);
  309. if (!mr->pbl_l1_dma_addr)
  310. return -ENOMEM;
  311. mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
  312. GFP_KERNEL);
  313. if (!mr->pbl_bt_l1)
  314. goto err_kcalloc_bt_l1;
  315. if (mhop_num == 3) {
  316. mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
  317. sizeof(*mr->pbl_l2_dma_addr),
  318. GFP_KERNEL);
  319. if (!mr->pbl_l2_dma_addr)
  320. goto err_kcalloc_l2_dma;
  321. mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
  322. sizeof(*mr->pbl_bt_l2),
  323. GFP_KERNEL);
  324. if (!mr->pbl_bt_l2)
  325. goto err_kcalloc_bt_l2;
  326. }
  327. /* alloc L0 BT */
  328. mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
  329. &(mr->pbl_l0_dma_addr),
  330. GFP_KERNEL);
  331. if (!mr->pbl_bt_l0)
  332. goto err_dma_alloc_l0;
  333. if (mhop_num == 2) {
  334. /* alloc L1 BT */
  335. for (i = 0; i < pbl_bt_sz / 8; i++) {
  336. if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
  337. size = pbl_bt_sz;
  338. } else {
  339. npages_allocated = i * (pbl_bt_sz / 8);
  340. size = (npages - npages_allocated) * 8;
  341. }
  342. mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
  343. &(mr->pbl_l1_dma_addr[i]),
  344. GFP_KERNEL);
  345. if (!mr->pbl_bt_l1[i]) {
  346. hns_roce_loop_free(hr_dev, mr, 1, i, 0);
  347. goto err_dma_alloc_l0;
  348. }
  349. *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
  350. pbl_bt_cnt++;
  351. if (pbl_bt_cnt >= pbl_last_bt_num)
  352. break;
  353. }
  354. } else if (mhop_num == 3) {
  355. /* alloc L1, L2 BT */
  356. for (i = 0; i < pbl_bt_sz / 8; i++) {
  357. mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
  358. &(mr->pbl_l1_dma_addr[i]),
  359. GFP_KERNEL);
  360. if (!mr->pbl_bt_l1[i]) {
  361. hns_roce_loop_free(hr_dev, mr, 1, i, 0);
  362. goto err_dma_alloc_l0;
  363. }
  364. *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
  365. for (j = 0; j < pbl_bt_sz / 8; j++) {
  366. bt_idx = i * pbl_bt_sz / 8 + j;
  367. if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
  368. size = pbl_bt_sz;
  369. } else {
  370. npages_allocated = bt_idx *
  371. (pbl_bt_sz / 8);
  372. size = (npages - npages_allocated) * 8;
  373. }
  374. mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
  375. dev, size,
  376. &(mr->pbl_l2_dma_addr[bt_idx]),
  377. GFP_KERNEL);
  378. if (!mr->pbl_bt_l2[bt_idx]) {
  379. hns_roce_loop_free(hr_dev, mr, 2, i, j);
  380. goto err_dma_alloc_l0;
  381. }
  382. *(mr->pbl_bt_l1[i] + j) =
  383. mr->pbl_l2_dma_addr[bt_idx];
  384. pbl_bt_cnt++;
  385. if (pbl_bt_cnt >= pbl_last_bt_num) {
  386. mr_alloc_done = 1;
  387. break;
  388. }
  389. }
  390. if (mr_alloc_done)
  391. break;
  392. }
  393. }
  394. mr->l0_chunk_last_num = i + 1;
  395. if (mhop_num == 3)
  396. mr->l1_chunk_last_num = j + 1;
  397. mr->pbl_size = npages;
  398. mr->pbl_ba = mr->pbl_l0_dma_addr;
  399. mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
  400. mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
  401. mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
  402. return 0;
  403. err_dma_alloc_l0:
  404. kfree(mr->pbl_bt_l2);
  405. mr->pbl_bt_l2 = NULL;
  406. err_kcalloc_bt_l2:
  407. kfree(mr->pbl_l2_dma_addr);
  408. mr->pbl_l2_dma_addr = NULL;
  409. err_kcalloc_l2_dma:
  410. kfree(mr->pbl_bt_l1);
  411. mr->pbl_bt_l1 = NULL;
  412. err_kcalloc_bt_l1:
  413. kfree(mr->pbl_l1_dma_addr);
  414. mr->pbl_l1_dma_addr = NULL;
  415. return -ENOMEM;
  416. }
  417. static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
  418. u64 size, u32 access, int npages,
  419. struct hns_roce_mr *mr)
  420. {
  421. struct device *dev = hr_dev->dev;
  422. unsigned long index = 0;
  423. int ret = 0;
  424. /* Allocate a key for mr from mr_table */
  425. ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
  426. if (ret == -1)
  427. return -ENOMEM;
  428. mr->iova = iova; /* MR va starting addr */
  429. mr->size = size; /* MR addr range */
  430. mr->pd = pd; /* MR num */
  431. mr->access = access; /* MR access permit */
  432. mr->enabled = 0; /* MR active status */
  433. mr->key = hw_index_to_key(index); /* MR key */
  434. if (size == ~0ull) {
  435. mr->type = MR_TYPE_DMA;
  436. mr->pbl_buf = NULL;
  437. mr->pbl_dma_addr = 0;
  438. /* PBL multi-hop addressing parameters */
  439. mr->pbl_bt_l2 = NULL;
  440. mr->pbl_bt_l1 = NULL;
  441. mr->pbl_bt_l0 = NULL;
  442. mr->pbl_l2_dma_addr = NULL;
  443. mr->pbl_l1_dma_addr = NULL;
  444. mr->pbl_l0_dma_addr = 0;
  445. } else {
  446. mr->type = MR_TYPE_MR;
  447. if (!hr_dev->caps.pbl_hop_num) {
  448. mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
  449. &(mr->pbl_dma_addr),
  450. GFP_KERNEL);
  451. if (!mr->pbl_buf)
  452. return -ENOMEM;
  453. } else {
  454. ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
  455. }
  456. }
  457. return ret;
  458. }
  459. static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
  460. struct hns_roce_mr *mr)
  461. {
  462. struct device *dev = hr_dev->dev;
  463. int npages_allocated;
  464. int npages;
  465. int i, j;
  466. u32 pbl_bt_sz;
  467. u32 mhop_num;
  468. u64 bt_idx;
  469. npages = ib_umem_page_count(mr->umem);
  470. pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
  471. mhop_num = hr_dev->caps.pbl_hop_num;
  472. if (mhop_num == HNS_ROCE_HOP_NUM_0)
  473. return;
  474. /* hop_num = 1 */
  475. if (mhop_num == 1) {
  476. dma_free_coherent(dev, (unsigned int)(npages * 8),
  477. mr->pbl_buf, mr->pbl_dma_addr);
  478. return;
  479. }
  480. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
  481. mr->pbl_l0_dma_addr);
  482. if (mhop_num == 2) {
  483. for (i = 0; i < mr->l0_chunk_last_num; i++) {
  484. if (i == mr->l0_chunk_last_num - 1) {
  485. npages_allocated = i * (pbl_bt_sz / 8);
  486. dma_free_coherent(dev,
  487. (npages - npages_allocated) * 8,
  488. mr->pbl_bt_l1[i],
  489. mr->pbl_l1_dma_addr[i]);
  490. break;
  491. }
  492. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
  493. mr->pbl_l1_dma_addr[i]);
  494. }
  495. } else if (mhop_num == 3) {
  496. for (i = 0; i < mr->l0_chunk_last_num; i++) {
  497. dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
  498. mr->pbl_l1_dma_addr[i]);
  499. for (j = 0; j < pbl_bt_sz / 8; j++) {
  500. bt_idx = i * (pbl_bt_sz / 8) + j;
  501. if ((i == mr->l0_chunk_last_num - 1)
  502. && j == mr->l1_chunk_last_num - 1) {
  503. npages_allocated = bt_idx *
  504. (pbl_bt_sz / 8);
  505. dma_free_coherent(dev,
  506. (npages - npages_allocated) * 8,
  507. mr->pbl_bt_l2[bt_idx],
  508. mr->pbl_l2_dma_addr[bt_idx]);
  509. break;
  510. }
  511. dma_free_coherent(dev, pbl_bt_sz,
  512. mr->pbl_bt_l2[bt_idx],
  513. mr->pbl_l2_dma_addr[bt_idx]);
  514. }
  515. }
  516. }
  517. kfree(mr->pbl_bt_l1);
  518. kfree(mr->pbl_l1_dma_addr);
  519. mr->pbl_bt_l1 = NULL;
  520. mr->pbl_l1_dma_addr = NULL;
  521. if (mhop_num == 3) {
  522. kfree(mr->pbl_bt_l2);
  523. kfree(mr->pbl_l2_dma_addr);
  524. mr->pbl_bt_l2 = NULL;
  525. mr->pbl_l2_dma_addr = NULL;
  526. }
  527. }
  528. static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
  529. struct hns_roce_mr *mr)
  530. {
  531. struct device *dev = hr_dev->dev;
  532. int npages = 0;
  533. int ret;
  534. if (mr->enabled) {
  535. ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
  536. & (hr_dev->caps.num_mtpts - 1));
  537. if (ret)
  538. dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
  539. }
  540. if (mr->size != ~0ULL) {
  541. npages = ib_umem_page_count(mr->umem);
  542. if (!hr_dev->caps.pbl_hop_num)
  543. dma_free_coherent(dev, (unsigned int)(npages * 8),
  544. mr->pbl_buf, mr->pbl_dma_addr);
  545. else
  546. hns_roce_mhop_free(hr_dev, mr);
  547. }
  548. if (mr->enabled)
  549. hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
  550. key_to_hw_index(mr->key));
  551. hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
  552. key_to_hw_index(mr->key), BITMAP_NO_RR);
  553. }
  554. static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
  555. struct hns_roce_mr *mr)
  556. {
  557. int ret;
  558. unsigned long mtpt_idx = key_to_hw_index(mr->key);
  559. struct device *dev = hr_dev->dev;
  560. struct hns_roce_cmd_mailbox *mailbox;
  561. struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
  562. /* Prepare HEM entry memory */
  563. ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
  564. if (ret)
  565. return ret;
  566. /* Allocate mailbox memory */
  567. mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
  568. if (IS_ERR(mailbox)) {
  569. ret = PTR_ERR(mailbox);
  570. goto err_table;
  571. }
  572. ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
  573. if (ret) {
  574. dev_err(dev, "Write mtpt fail!\n");
  575. goto err_page;
  576. }
  577. ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
  578. mtpt_idx & (hr_dev->caps.num_mtpts - 1));
  579. if (ret) {
  580. dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
  581. goto err_page;
  582. }
  583. mr->enabled = 1;
  584. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  585. return 0;
  586. err_page:
  587. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  588. err_table:
  589. hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
  590. return ret;
  591. }
  592. static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
  593. struct hns_roce_mtt *mtt, u32 start_index,
  594. u32 npages, u64 *page_list)
  595. {
  596. struct hns_roce_hem_table *table;
  597. dma_addr_t dma_handle;
  598. __le64 *mtts;
  599. u32 bt_page_size;
  600. u32 i;
  601. if (mtt->mtt_type == MTT_TYPE_WQE)
  602. bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
  603. else
  604. bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
  605. /* All MTTs must fit in the same page */
  606. if (start_index / (bt_page_size / sizeof(u64)) !=
  607. (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
  608. return -EINVAL;
  609. if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
  610. return -EINVAL;
  611. if (mtt->mtt_type == MTT_TYPE_WQE)
  612. table = &hr_dev->mr_table.mtt_table;
  613. else
  614. table = &hr_dev->mr_table.mtt_cqe_table;
  615. mtts = hns_roce_table_find(hr_dev, table,
  616. mtt->first_seg +
  617. start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
  618. &dma_handle);
  619. if (!mtts)
  620. return -ENOMEM;
  621. /* Save page addr, low 12 bits : 0 */
  622. for (i = 0; i < npages; ++i) {
  623. if (!hr_dev->caps.mtt_hop_num)
  624. mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
  625. else
  626. mtts[i] = cpu_to_le64(page_list[i]);
  627. }
  628. return 0;
  629. }
  630. static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
  631. struct hns_roce_mtt *mtt, u32 start_index,
  632. u32 npages, u64 *page_list)
  633. {
  634. int chunk;
  635. int ret;
  636. u32 bt_page_size;
  637. if (mtt->order < 0)
  638. return -EINVAL;
  639. if (mtt->mtt_type == MTT_TYPE_WQE)
  640. bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
  641. else
  642. bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
  643. while (npages > 0) {
  644. chunk = min_t(int, bt_page_size / sizeof(u64), npages);
  645. ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
  646. page_list);
  647. if (ret)
  648. return ret;
  649. npages -= chunk;
  650. start_index += chunk;
  651. page_list += chunk;
  652. }
  653. return 0;
  654. }
  655. int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
  656. struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
  657. {
  658. u64 *page_list;
  659. int ret;
  660. u32 i;
  661. page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
  662. if (!page_list)
  663. return -ENOMEM;
  664. for (i = 0; i < buf->npages; ++i) {
  665. if (buf->nbufs == 1)
  666. page_list[i] = buf->direct.map + (i << buf->page_shift);
  667. else
  668. page_list[i] = buf->page_list[i].map;
  669. }
  670. ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
  671. kfree(page_list);
  672. return ret;
  673. }
  674. int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
  675. {
  676. struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
  677. int ret;
  678. ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
  679. hr_dev->caps.num_mtpts,
  680. hr_dev->caps.num_mtpts - 1,
  681. hr_dev->caps.reserved_mrws, 0);
  682. if (ret)
  683. return ret;
  684. ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
  685. ilog2(hr_dev->caps.num_mtt_segs));
  686. if (ret)
  687. goto err_buddy;
  688. if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
  689. ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
  690. ilog2(hr_dev->caps.num_cqe_segs));
  691. if (ret)
  692. goto err_buddy_cqe;
  693. }
  694. return 0;
  695. err_buddy_cqe:
  696. hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
  697. err_buddy:
  698. hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
  699. return ret;
  700. }
  701. void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
  702. {
  703. struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
  704. hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
  705. if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
  706. hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
  707. hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
  708. }
  709. struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
  710. {
  711. struct hns_roce_mr *mr;
  712. int ret;
  713. mr = kmalloc(sizeof(*mr), GFP_KERNEL);
  714. if (mr == NULL)
  715. return ERR_PTR(-ENOMEM);
  716. /* Allocate memory region key */
  717. ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
  718. ~0ULL, acc, 0, mr);
  719. if (ret)
  720. goto err_free;
  721. ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
  722. if (ret)
  723. goto err_mr;
  724. mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
  725. mr->umem = NULL;
  726. return &mr->ibmr;
  727. err_mr:
  728. hns_roce_mr_free(to_hr_dev(pd->device), mr);
  729. err_free:
  730. kfree(mr);
  731. return ERR_PTR(ret);
  732. }
  733. int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
  734. struct hns_roce_mtt *mtt, struct ib_umem *umem)
  735. {
  736. struct device *dev = hr_dev->dev;
  737. struct scatterlist *sg;
  738. unsigned int order;
  739. int i, k, entry;
  740. int npage = 0;
  741. int ret = 0;
  742. int len;
  743. u64 page_addr;
  744. u64 *pages;
  745. u32 bt_page_size;
  746. u32 n;
  747. order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
  748. hr_dev->caps.cqe_ba_pg_sz;
  749. bt_page_size = 1 << (order + PAGE_SHIFT);
  750. pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
  751. if (!pages)
  752. return -ENOMEM;
  753. i = n = 0;
  754. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  755. len = sg_dma_len(sg) >> PAGE_SHIFT;
  756. for (k = 0; k < len; ++k) {
  757. page_addr =
  758. sg_dma_address(sg) + (k << umem->page_shift);
  759. if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
  760. if (page_addr & ((1 << mtt->page_shift) - 1)) {
  761. dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
  762. page_addr, mtt->page_shift);
  763. ret = -EINVAL;
  764. goto out;
  765. }
  766. pages[i++] = page_addr;
  767. }
  768. npage++;
  769. if (i == bt_page_size / sizeof(u64)) {
  770. ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
  771. pages);
  772. if (ret)
  773. goto out;
  774. n += i;
  775. i = 0;
  776. }
  777. }
  778. }
  779. if (i)
  780. ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
  781. out:
  782. free_pages((unsigned long) pages, order);
  783. return ret;
  784. }
  785. static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
  786. struct hns_roce_mr *mr,
  787. struct ib_umem *umem)
  788. {
  789. struct scatterlist *sg;
  790. int i = 0, j = 0, k;
  791. int entry;
  792. int len;
  793. u64 page_addr;
  794. u32 pbl_bt_sz;
  795. if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
  796. return 0;
  797. pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
  798. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  799. len = sg_dma_len(sg) >> PAGE_SHIFT;
  800. for (k = 0; k < len; ++k) {
  801. page_addr = sg_dma_address(sg) +
  802. (k << umem->page_shift);
  803. if (!hr_dev->caps.pbl_hop_num) {
  804. mr->pbl_buf[i++] = page_addr >> 12;
  805. } else if (hr_dev->caps.pbl_hop_num == 1) {
  806. mr->pbl_buf[i++] = page_addr;
  807. } else {
  808. if (hr_dev->caps.pbl_hop_num == 2)
  809. mr->pbl_bt_l1[i][j] = page_addr;
  810. else if (hr_dev->caps.pbl_hop_num == 3)
  811. mr->pbl_bt_l2[i][j] = page_addr;
  812. j++;
  813. if (j >= (pbl_bt_sz / 8)) {
  814. i++;
  815. j = 0;
  816. }
  817. }
  818. }
  819. }
  820. /* Memory barrier */
  821. mb();
  822. return 0;
  823. }
  824. struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
  825. u64 virt_addr, int access_flags,
  826. struct ib_udata *udata)
  827. {
  828. struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
  829. struct device *dev = hr_dev->dev;
  830. struct hns_roce_mr *mr;
  831. int bt_size;
  832. int ret;
  833. int n;
  834. int i;
  835. mr = kmalloc(sizeof(*mr), GFP_KERNEL);
  836. if (!mr)
  837. return ERR_PTR(-ENOMEM);
  838. mr->umem = ib_umem_get(pd->uobject->context, start, length,
  839. access_flags, 0);
  840. if (IS_ERR(mr->umem)) {
  841. ret = PTR_ERR(mr->umem);
  842. goto err_free;
  843. }
  844. n = ib_umem_page_count(mr->umem);
  845. if (!hr_dev->caps.pbl_hop_num) {
  846. if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
  847. dev_err(dev,
  848. " MR len %lld err. MR is limited to 4G at most!\n",
  849. length);
  850. ret = -EINVAL;
  851. goto err_umem;
  852. }
  853. } else {
  854. u64 pbl_size = 1;
  855. bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
  856. for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
  857. pbl_size *= bt_size;
  858. if (n > pbl_size) {
  859. dev_err(dev,
  860. " MR len %lld err. MR page num is limited to %lld!\n",
  861. length, pbl_size);
  862. ret = -EINVAL;
  863. goto err_umem;
  864. }
  865. }
  866. ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
  867. access_flags, n, mr);
  868. if (ret)
  869. goto err_umem;
  870. ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
  871. if (ret)
  872. goto err_mr;
  873. ret = hns_roce_mr_enable(hr_dev, mr);
  874. if (ret)
  875. goto err_mr;
  876. mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
  877. return &mr->ibmr;
  878. err_mr:
  879. hns_roce_mr_free(hr_dev, mr);
  880. err_umem:
  881. ib_umem_release(mr->umem);
  882. err_free:
  883. kfree(mr);
  884. return ERR_PTR(ret);
  885. }
  886. int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
  887. u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
  888. struct ib_udata *udata)
  889. {
  890. struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
  891. struct hns_roce_mr *mr = to_hr_mr(ibmr);
  892. struct hns_roce_cmd_mailbox *mailbox;
  893. struct device *dev = hr_dev->dev;
  894. unsigned long mtpt_idx;
  895. u32 pdn = 0;
  896. int npages;
  897. int ret;
  898. if (!mr->enabled)
  899. return -EINVAL;
  900. mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
  901. if (IS_ERR(mailbox))
  902. return PTR_ERR(mailbox);
  903. mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
  904. ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
  905. HNS_ROCE_CMD_QUERY_MPT,
  906. HNS_ROCE_CMD_TIMEOUT_MSECS);
  907. if (ret)
  908. goto free_cmd_mbox;
  909. ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
  910. if (ret)
  911. dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
  912. mr->enabled = 0;
  913. if (flags & IB_MR_REREG_PD)
  914. pdn = to_hr_pd(pd)->pdn;
  915. if (flags & IB_MR_REREG_TRANS) {
  916. if (mr->size != ~0ULL) {
  917. npages = ib_umem_page_count(mr->umem);
  918. if (hr_dev->caps.pbl_hop_num)
  919. hns_roce_mhop_free(hr_dev, mr);
  920. else
  921. dma_free_coherent(dev, npages * 8, mr->pbl_buf,
  922. mr->pbl_dma_addr);
  923. }
  924. ib_umem_release(mr->umem);
  925. mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
  926. mr_access_flags, 0);
  927. if (IS_ERR(mr->umem)) {
  928. ret = PTR_ERR(mr->umem);
  929. mr->umem = NULL;
  930. goto free_cmd_mbox;
  931. }
  932. npages = ib_umem_page_count(mr->umem);
  933. if (hr_dev->caps.pbl_hop_num) {
  934. ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
  935. if (ret)
  936. goto release_umem;
  937. } else {
  938. mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
  939. &(mr->pbl_dma_addr),
  940. GFP_KERNEL);
  941. if (!mr->pbl_buf) {
  942. ret = -ENOMEM;
  943. goto release_umem;
  944. }
  945. }
  946. }
  947. ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
  948. mr_access_flags, virt_addr,
  949. length, mailbox->buf);
  950. if (ret) {
  951. if (flags & IB_MR_REREG_TRANS)
  952. goto release_umem;
  953. else
  954. goto free_cmd_mbox;
  955. }
  956. if (flags & IB_MR_REREG_TRANS) {
  957. ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
  958. if (ret) {
  959. if (mr->size != ~0ULL) {
  960. npages = ib_umem_page_count(mr->umem);
  961. if (hr_dev->caps.pbl_hop_num)
  962. hns_roce_mhop_free(hr_dev, mr);
  963. else
  964. dma_free_coherent(dev, npages * 8,
  965. mr->pbl_buf,
  966. mr->pbl_dma_addr);
  967. }
  968. goto release_umem;
  969. }
  970. }
  971. ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
  972. if (ret) {
  973. dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
  974. goto release_umem;
  975. }
  976. mr->enabled = 1;
  977. if (flags & IB_MR_REREG_ACCESS)
  978. mr->access = mr_access_flags;
  979. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  980. return 0;
  981. release_umem:
  982. ib_umem_release(mr->umem);
  983. free_cmd_mbox:
  984. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  985. return ret;
  986. }
  987. int hns_roce_dereg_mr(struct ib_mr *ibmr)
  988. {
  989. struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
  990. struct hns_roce_mr *mr = to_hr_mr(ibmr);
  991. int ret = 0;
  992. if (hr_dev->hw->dereg_mr) {
  993. ret = hr_dev->hw->dereg_mr(hr_dev, mr);
  994. } else {
  995. hns_roce_mr_free(hr_dev, mr);
  996. if (mr->umem)
  997. ib_umem_release(mr->umem);
  998. kfree(mr);
  999. }
  1000. return ret;
  1001. }