devx.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
  4. */
  5. #include <rdma/ib_user_verbs.h>
  6. #include <rdma/ib_verbs.h>
  7. #include <rdma/uverbs_types.h>
  8. #include <rdma/uverbs_ioctl.h>
  9. #include <rdma/mlx5_user_ioctl_cmds.h>
  10. #include <rdma/ib_umem.h>
  11. #include <linux/mlx5/driver.h>
  12. #include <linux/mlx5/fs.h>
  13. #include "mlx5_ib.h"
  14. #define UVERBS_MODULE_NAME mlx5_ib
  15. #include <rdma/uverbs_named_ioctl.h>
  16. #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
  17. struct devx_obj {
  18. struct mlx5_core_dev *mdev;
  19. u32 obj_id;
  20. u32 dinlen; /* destroy inbox length */
  21. u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
  22. };
  23. struct devx_umem {
  24. struct mlx5_core_dev *mdev;
  25. struct ib_umem *umem;
  26. u32 page_offset;
  27. int page_shift;
  28. int ncont;
  29. u32 dinlen;
  30. u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
  31. };
  32. struct devx_umem_reg_cmd {
  33. void *in;
  34. u32 inlen;
  35. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  36. };
  37. static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
  38. {
  39. return to_mucontext(ib_uverbs_get_ucontext(file));
  40. }
  41. int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
  42. {
  43. u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
  44. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  45. u64 general_obj_types;
  46. void *hdr;
  47. int err;
  48. hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
  49. general_obj_types = MLX5_CAP_GEN_64(dev->mdev, general_obj_types);
  50. if (!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) ||
  51. !(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
  52. return -EINVAL;
  53. if (!capable(CAP_NET_RAW))
  54. return -EPERM;
  55. MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  56. MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
  57. err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  58. if (err)
  59. return err;
  60. context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  61. return 0;
  62. }
  63. void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
  64. struct mlx5_ib_ucontext *context)
  65. {
  66. u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
  67. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
  68. MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  69. MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
  70. MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
  71. mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
  72. }
  73. bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
  74. {
  75. struct devx_obj *devx_obj = obj;
  76. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
  77. switch (opcode) {
  78. case MLX5_CMD_OP_DESTROY_TIR:
  79. *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
  80. *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
  81. obj_id);
  82. return true;
  83. case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
  84. *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
  85. *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
  86. table_id);
  87. return true;
  88. default:
  89. return false;
  90. }
  91. }
  92. static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
  93. {
  94. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  95. u32 obj_id;
  96. switch (opcode) {
  97. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  98. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  99. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  100. break;
  101. case MLX5_CMD_OP_QUERY_MKEY:
  102. obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
  103. break;
  104. case MLX5_CMD_OP_QUERY_CQ:
  105. obj_id = MLX5_GET(query_cq_in, in, cqn);
  106. break;
  107. case MLX5_CMD_OP_MODIFY_CQ:
  108. obj_id = MLX5_GET(modify_cq_in, in, cqn);
  109. break;
  110. case MLX5_CMD_OP_QUERY_SQ:
  111. obj_id = MLX5_GET(query_sq_in, in, sqn);
  112. break;
  113. case MLX5_CMD_OP_MODIFY_SQ:
  114. obj_id = MLX5_GET(modify_sq_in, in, sqn);
  115. break;
  116. case MLX5_CMD_OP_QUERY_RQ:
  117. obj_id = MLX5_GET(query_rq_in, in, rqn);
  118. break;
  119. case MLX5_CMD_OP_MODIFY_RQ:
  120. obj_id = MLX5_GET(modify_rq_in, in, rqn);
  121. break;
  122. case MLX5_CMD_OP_QUERY_RMP:
  123. obj_id = MLX5_GET(query_rmp_in, in, rmpn);
  124. break;
  125. case MLX5_CMD_OP_MODIFY_RMP:
  126. obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
  127. break;
  128. case MLX5_CMD_OP_QUERY_RQT:
  129. obj_id = MLX5_GET(query_rqt_in, in, rqtn);
  130. break;
  131. case MLX5_CMD_OP_MODIFY_RQT:
  132. obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
  133. break;
  134. case MLX5_CMD_OP_QUERY_TIR:
  135. obj_id = MLX5_GET(query_tir_in, in, tirn);
  136. break;
  137. case MLX5_CMD_OP_MODIFY_TIR:
  138. obj_id = MLX5_GET(modify_tir_in, in, tirn);
  139. break;
  140. case MLX5_CMD_OP_QUERY_TIS:
  141. obj_id = MLX5_GET(query_tis_in, in, tisn);
  142. break;
  143. case MLX5_CMD_OP_MODIFY_TIS:
  144. obj_id = MLX5_GET(modify_tis_in, in, tisn);
  145. break;
  146. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  147. obj_id = MLX5_GET(query_flow_table_in, in, table_id);
  148. break;
  149. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  150. obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
  151. break;
  152. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  153. obj_id = MLX5_GET(query_flow_group_in, in, group_id);
  154. break;
  155. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  156. obj_id = MLX5_GET(query_fte_in, in, flow_index);
  157. break;
  158. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  159. obj_id = MLX5_GET(set_fte_in, in, flow_index);
  160. break;
  161. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  162. obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
  163. break;
  164. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  165. obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
  166. break;
  167. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  168. obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
  169. break;
  170. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  171. obj_id = MLX5_GET(query_scheduling_element_in, in,
  172. scheduling_element_id);
  173. break;
  174. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  175. obj_id = MLX5_GET(modify_scheduling_element_in, in,
  176. scheduling_element_id);
  177. break;
  178. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  179. obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  180. break;
  181. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  182. obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
  183. break;
  184. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  185. obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  186. break;
  187. case MLX5_CMD_OP_QUERY_QP:
  188. obj_id = MLX5_GET(query_qp_in, in, qpn);
  189. break;
  190. case MLX5_CMD_OP_RST2INIT_QP:
  191. obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
  192. break;
  193. case MLX5_CMD_OP_INIT2RTR_QP:
  194. obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
  195. break;
  196. case MLX5_CMD_OP_RTR2RTS_QP:
  197. obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
  198. break;
  199. case MLX5_CMD_OP_RTS2RTS_QP:
  200. obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
  201. break;
  202. case MLX5_CMD_OP_SQERR2RTS_QP:
  203. obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
  204. break;
  205. case MLX5_CMD_OP_2ERR_QP:
  206. obj_id = MLX5_GET(qp_2err_in, in, qpn);
  207. break;
  208. case MLX5_CMD_OP_2RST_QP:
  209. obj_id = MLX5_GET(qp_2rst_in, in, qpn);
  210. break;
  211. case MLX5_CMD_OP_QUERY_DCT:
  212. obj_id = MLX5_GET(query_dct_in, in, dctn);
  213. break;
  214. case MLX5_CMD_OP_QUERY_XRQ:
  215. obj_id = MLX5_GET(query_xrq_in, in, xrqn);
  216. break;
  217. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  218. obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
  219. break;
  220. case MLX5_CMD_OP_ARM_XRC_SRQ:
  221. obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
  222. break;
  223. case MLX5_CMD_OP_QUERY_SRQ:
  224. obj_id = MLX5_GET(query_srq_in, in, srqn);
  225. break;
  226. case MLX5_CMD_OP_ARM_RQ:
  227. obj_id = MLX5_GET(arm_rq_in, in, srq_number);
  228. break;
  229. case MLX5_CMD_OP_DRAIN_DCT:
  230. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  231. obj_id = MLX5_GET(drain_dct_in, in, dctn);
  232. break;
  233. case MLX5_CMD_OP_ARM_XRQ:
  234. obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
  235. break;
  236. default:
  237. return false;
  238. }
  239. if (obj_id == obj->obj_id)
  240. return true;
  241. return false;
  242. }
  243. static bool devx_is_obj_create_cmd(const void *in)
  244. {
  245. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  246. switch (opcode) {
  247. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  248. case MLX5_CMD_OP_CREATE_MKEY:
  249. case MLX5_CMD_OP_CREATE_CQ:
  250. case MLX5_CMD_OP_ALLOC_PD:
  251. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  252. case MLX5_CMD_OP_CREATE_RMP:
  253. case MLX5_CMD_OP_CREATE_SQ:
  254. case MLX5_CMD_OP_CREATE_RQ:
  255. case MLX5_CMD_OP_CREATE_RQT:
  256. case MLX5_CMD_OP_CREATE_TIR:
  257. case MLX5_CMD_OP_CREATE_TIS:
  258. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  259. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  260. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  261. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  262. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  263. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  264. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  265. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  266. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  267. case MLX5_CMD_OP_CREATE_QP:
  268. case MLX5_CMD_OP_CREATE_SRQ:
  269. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  270. case MLX5_CMD_OP_CREATE_DCT:
  271. case MLX5_CMD_OP_CREATE_XRQ:
  272. case MLX5_CMD_OP_ATTACH_TO_MCG:
  273. case MLX5_CMD_OP_ALLOC_XRCD:
  274. return true;
  275. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  276. {
  277. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  278. if (op_mod == 0)
  279. return true;
  280. return false;
  281. }
  282. default:
  283. return false;
  284. }
  285. }
  286. static bool devx_is_obj_modify_cmd(const void *in)
  287. {
  288. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  289. switch (opcode) {
  290. case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
  291. case MLX5_CMD_OP_MODIFY_CQ:
  292. case MLX5_CMD_OP_MODIFY_RMP:
  293. case MLX5_CMD_OP_MODIFY_SQ:
  294. case MLX5_CMD_OP_MODIFY_RQ:
  295. case MLX5_CMD_OP_MODIFY_RQT:
  296. case MLX5_CMD_OP_MODIFY_TIR:
  297. case MLX5_CMD_OP_MODIFY_TIS:
  298. case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
  299. case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
  300. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  301. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  302. case MLX5_CMD_OP_RST2INIT_QP:
  303. case MLX5_CMD_OP_INIT2RTR_QP:
  304. case MLX5_CMD_OP_INIT2INIT_QP:
  305. case MLX5_CMD_OP_RTR2RTS_QP:
  306. case MLX5_CMD_OP_RTS2RTS_QP:
  307. case MLX5_CMD_OP_SQERR2RTS_QP:
  308. case MLX5_CMD_OP_2ERR_QP:
  309. case MLX5_CMD_OP_2RST_QP:
  310. case MLX5_CMD_OP_ARM_XRC_SRQ:
  311. case MLX5_CMD_OP_ARM_RQ:
  312. case MLX5_CMD_OP_DRAIN_DCT:
  313. case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
  314. case MLX5_CMD_OP_ARM_XRQ:
  315. return true;
  316. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  317. {
  318. u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
  319. if (op_mod == 1)
  320. return true;
  321. return false;
  322. }
  323. default:
  324. return false;
  325. }
  326. }
  327. static bool devx_is_obj_query_cmd(const void *in)
  328. {
  329. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  330. switch (opcode) {
  331. case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
  332. case MLX5_CMD_OP_QUERY_MKEY:
  333. case MLX5_CMD_OP_QUERY_CQ:
  334. case MLX5_CMD_OP_QUERY_RMP:
  335. case MLX5_CMD_OP_QUERY_SQ:
  336. case MLX5_CMD_OP_QUERY_RQ:
  337. case MLX5_CMD_OP_QUERY_RQT:
  338. case MLX5_CMD_OP_QUERY_TIR:
  339. case MLX5_CMD_OP_QUERY_TIS:
  340. case MLX5_CMD_OP_QUERY_Q_COUNTER:
  341. case MLX5_CMD_OP_QUERY_FLOW_TABLE:
  342. case MLX5_CMD_OP_QUERY_FLOW_GROUP:
  343. case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
  344. case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
  345. case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
  346. case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
  347. case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
  348. case MLX5_CMD_OP_QUERY_QP:
  349. case MLX5_CMD_OP_QUERY_SRQ:
  350. case MLX5_CMD_OP_QUERY_XRC_SRQ:
  351. case MLX5_CMD_OP_QUERY_DCT:
  352. case MLX5_CMD_OP_QUERY_XRQ:
  353. return true;
  354. default:
  355. return false;
  356. }
  357. }
  358. static bool devx_is_general_cmd(void *in)
  359. {
  360. u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
  361. switch (opcode) {
  362. case MLX5_CMD_OP_QUERY_HCA_CAP:
  363. case MLX5_CMD_OP_QUERY_VPORT_STATE:
  364. case MLX5_CMD_OP_QUERY_ADAPTER:
  365. case MLX5_CMD_OP_QUERY_ISSI:
  366. case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
  367. case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
  368. case MLX5_CMD_OP_QUERY_VNIC_ENV:
  369. case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
  370. case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
  371. case MLX5_CMD_OP_NOP:
  372. case MLX5_CMD_OP_QUERY_CONG_STATUS:
  373. case MLX5_CMD_OP_QUERY_CONG_PARAMS:
  374. case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
  375. return true;
  376. default:
  377. return false;
  378. }
  379. }
  380. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
  381. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  382. {
  383. struct mlx5_ib_ucontext *c;
  384. struct mlx5_ib_dev *dev;
  385. int user_vector;
  386. int dev_eqn;
  387. unsigned int irqn;
  388. int err;
  389. if (uverbs_copy_from(&user_vector, attrs,
  390. MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
  391. return -EFAULT;
  392. c = devx_ufile2uctx(file);
  393. if (IS_ERR(c))
  394. return PTR_ERR(c);
  395. dev = to_mdev(c->ibucontext.device);
  396. err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
  397. if (err < 0)
  398. return err;
  399. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  400. &dev_eqn, sizeof(dev_eqn)))
  401. return -EFAULT;
  402. return 0;
  403. }
  404. /*
  405. *Security note:
  406. * The hardware protection mechanism works like this: Each device object that
  407. * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
  408. * the device specification manual) upon its creation. Then upon doorbell,
  409. * hardware fetches the object context for which the doorbell was rang, and
  410. * validates that the UAR through which the DB was rang matches the UAR ID
  411. * of the object.
  412. * If no match the doorbell is silently ignored by the hardware. Of course,
  413. * the user cannot ring a doorbell on a UAR that was not mapped to it.
  414. * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
  415. * mailboxes (except tagging them with UID), we expose to the user its UAR
  416. * ID, so it can embed it in these objects in the expected specification
  417. * format. So the only thing the user can do is hurt itself by creating a
  418. * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
  419. * may ring a doorbell on its objects.
  420. * The consequence of that will be that another user can schedule a QP/SQ
  421. * of the buggy user for execution (just insert it to the hardware schedule
  422. * queue or arm its CQ for event generation), no further harm is expected.
  423. */
  424. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
  425. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  426. {
  427. struct mlx5_ib_ucontext *c;
  428. struct mlx5_ib_dev *dev;
  429. u32 user_idx;
  430. s32 dev_idx;
  431. c = devx_ufile2uctx(file);
  432. if (IS_ERR(c))
  433. return PTR_ERR(c);
  434. dev = to_mdev(c->ibucontext.device);
  435. if (uverbs_copy_from(&user_idx, attrs,
  436. MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
  437. return -EFAULT;
  438. dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
  439. if (dev_idx < 0)
  440. return dev_idx;
  441. if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  442. &dev_idx, sizeof(dev_idx)))
  443. return -EFAULT;
  444. return 0;
  445. }
  446. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
  447. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  448. {
  449. struct mlx5_ib_ucontext *c;
  450. struct mlx5_ib_dev *dev;
  451. void *cmd_in = uverbs_attr_get_alloced_ptr(
  452. attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
  453. int cmd_out_len = uverbs_attr_get_len(attrs,
  454. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
  455. void *cmd_out;
  456. int err;
  457. c = devx_ufile2uctx(file);
  458. if (IS_ERR(c))
  459. return PTR_ERR(c);
  460. dev = to_mdev(c->ibucontext.device);
  461. if (!c->devx_uid)
  462. return -EPERM;
  463. /* Only white list of some general HCA commands are allowed for this method. */
  464. if (!devx_is_general_cmd(cmd_in))
  465. return -EINVAL;
  466. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  467. if (IS_ERR(cmd_out))
  468. return PTR_ERR(cmd_out);
  469. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  470. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  471. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
  472. cmd_out, cmd_out_len);
  473. if (err)
  474. return err;
  475. return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
  476. cmd_out_len);
  477. }
  478. static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
  479. u32 *dinlen,
  480. u32 *obj_id)
  481. {
  482. u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
  483. u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
  484. *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
  485. *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
  486. MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
  487. MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
  488. switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
  489. case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
  490. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
  491. MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
  492. break;
  493. case MLX5_CMD_OP_CREATE_MKEY:
  494. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
  495. break;
  496. case MLX5_CMD_OP_CREATE_CQ:
  497. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
  498. break;
  499. case MLX5_CMD_OP_ALLOC_PD:
  500. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
  501. break;
  502. case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
  503. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  504. MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
  505. break;
  506. case MLX5_CMD_OP_CREATE_RMP:
  507. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
  508. break;
  509. case MLX5_CMD_OP_CREATE_SQ:
  510. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
  511. break;
  512. case MLX5_CMD_OP_CREATE_RQ:
  513. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
  514. break;
  515. case MLX5_CMD_OP_CREATE_RQT:
  516. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
  517. break;
  518. case MLX5_CMD_OP_CREATE_TIR:
  519. *obj_id = MLX5_GET(create_tir_out, out, tirn);
  520. MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
  521. MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
  522. break;
  523. case MLX5_CMD_OP_CREATE_TIS:
  524. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
  525. break;
  526. case MLX5_CMD_OP_ALLOC_Q_COUNTER:
  527. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  528. MLX5_CMD_OP_DEALLOC_Q_COUNTER);
  529. break;
  530. case MLX5_CMD_OP_CREATE_FLOW_TABLE:
  531. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
  532. *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
  533. MLX5_SET(destroy_flow_table_in, din, other_vport,
  534. MLX5_GET(create_flow_table_in, in, other_vport));
  535. MLX5_SET(destroy_flow_table_in, din, vport_number,
  536. MLX5_GET(create_flow_table_in, in, vport_number));
  537. MLX5_SET(destroy_flow_table_in, din, table_type,
  538. MLX5_GET(create_flow_table_in, in, table_type));
  539. MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
  540. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  541. MLX5_CMD_OP_DESTROY_FLOW_TABLE);
  542. break;
  543. case MLX5_CMD_OP_CREATE_FLOW_GROUP:
  544. *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
  545. *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
  546. MLX5_SET(destroy_flow_group_in, din, other_vport,
  547. MLX5_GET(create_flow_group_in, in, other_vport));
  548. MLX5_SET(destroy_flow_group_in, din, vport_number,
  549. MLX5_GET(create_flow_group_in, in, vport_number));
  550. MLX5_SET(destroy_flow_group_in, din, table_type,
  551. MLX5_GET(create_flow_group_in, in, table_type));
  552. MLX5_SET(destroy_flow_group_in, din, table_id,
  553. MLX5_GET(create_flow_group_in, in, table_id));
  554. MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
  555. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  556. MLX5_CMD_OP_DESTROY_FLOW_GROUP);
  557. break;
  558. case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
  559. *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
  560. *obj_id = MLX5_GET(set_fte_in, in, flow_index);
  561. MLX5_SET(delete_fte_in, din, other_vport,
  562. MLX5_GET(set_fte_in, in, other_vport));
  563. MLX5_SET(delete_fte_in, din, vport_number,
  564. MLX5_GET(set_fte_in, in, vport_number));
  565. MLX5_SET(delete_fte_in, din, table_type,
  566. MLX5_GET(set_fte_in, in, table_type));
  567. MLX5_SET(delete_fte_in, din, table_id,
  568. MLX5_GET(set_fte_in, in, table_id));
  569. MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
  570. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  571. MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
  572. break;
  573. case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
  574. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  575. MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
  576. break;
  577. case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
  578. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  579. MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
  580. break;
  581. case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
  582. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  583. MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
  584. break;
  585. case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
  586. *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
  587. *obj_id = MLX5_GET(create_scheduling_element_out, out,
  588. scheduling_element_id);
  589. MLX5_SET(destroy_scheduling_element_in, din,
  590. scheduling_hierarchy,
  591. MLX5_GET(create_scheduling_element_in, in,
  592. scheduling_hierarchy));
  593. MLX5_SET(destroy_scheduling_element_in, din,
  594. scheduling_element_id, *obj_id);
  595. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  596. MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
  597. break;
  598. case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
  599. *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
  600. *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
  601. MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
  602. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  603. MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
  604. break;
  605. case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
  606. *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
  607. *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
  608. MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
  609. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  610. MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
  611. break;
  612. case MLX5_CMD_OP_CREATE_QP:
  613. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
  614. break;
  615. case MLX5_CMD_OP_CREATE_SRQ:
  616. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
  617. break;
  618. case MLX5_CMD_OP_CREATE_XRC_SRQ:
  619. MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
  620. MLX5_CMD_OP_DESTROY_XRC_SRQ);
  621. break;
  622. case MLX5_CMD_OP_CREATE_DCT:
  623. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
  624. break;
  625. case MLX5_CMD_OP_CREATE_XRQ:
  626. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
  627. break;
  628. case MLX5_CMD_OP_ATTACH_TO_MCG:
  629. *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
  630. MLX5_SET(detach_from_mcg_in, din, qpn,
  631. MLX5_GET(attach_to_mcg_in, in, qpn));
  632. memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
  633. MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
  634. MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
  635. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
  636. break;
  637. case MLX5_CMD_OP_ALLOC_XRCD:
  638. MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
  639. break;
  640. default:
  641. /* The entry must match to one of the devx_is_obj_create_cmd */
  642. WARN_ON(true);
  643. break;
  644. }
  645. }
  646. static int devx_obj_cleanup(struct ib_uobject *uobject,
  647. enum rdma_remove_reason why)
  648. {
  649. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  650. struct devx_obj *obj = uobject->object;
  651. int ret;
  652. ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  653. if (ib_is_destroy_retryable(ret, why, uobject))
  654. return ret;
  655. kfree(obj);
  656. return ret;
  657. }
  658. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
  659. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  660. {
  661. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
  662. int cmd_out_len = uverbs_attr_get_len(attrs,
  663. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
  664. void *cmd_out;
  665. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  666. attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
  667. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  668. struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
  669. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  670. struct devx_obj *obj;
  671. int err;
  672. if (!c->devx_uid)
  673. return -EPERM;
  674. if (!devx_is_obj_create_cmd(cmd_in))
  675. return -EINVAL;
  676. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  677. if (IS_ERR(cmd_out))
  678. return PTR_ERR(cmd_out);
  679. obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
  680. if (!obj)
  681. return -ENOMEM;
  682. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  683. err = mlx5_cmd_exec(dev->mdev, cmd_in,
  684. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
  685. cmd_out, cmd_out_len);
  686. if (err)
  687. goto obj_free;
  688. uobj->object = obj;
  689. obj->mdev = dev->mdev;
  690. devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
  691. WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
  692. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
  693. if (err)
  694. goto obj_destroy;
  695. return 0;
  696. obj_destroy:
  697. mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  698. obj_free:
  699. kfree(obj);
  700. return err;
  701. }
  702. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
  703. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  704. {
  705. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
  706. int cmd_out_len = uverbs_attr_get_len(attrs,
  707. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
  708. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  709. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
  710. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  711. struct devx_obj *obj = uobj->object;
  712. void *cmd_out;
  713. int err;
  714. if (!c->devx_uid)
  715. return -EPERM;
  716. if (!devx_is_obj_modify_cmd(cmd_in))
  717. return -EINVAL;
  718. if (!devx_is_valid_obj_id(obj, cmd_in))
  719. return -EINVAL;
  720. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  721. if (IS_ERR(cmd_out))
  722. return PTR_ERR(cmd_out);
  723. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  724. err = mlx5_cmd_exec(obj->mdev, cmd_in,
  725. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
  726. cmd_out, cmd_out_len);
  727. if (err)
  728. return err;
  729. return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  730. cmd_out, cmd_out_len);
  731. }
  732. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
  733. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  734. {
  735. void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
  736. int cmd_out_len = uverbs_attr_get_len(attrs,
  737. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
  738. struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
  739. MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
  740. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  741. struct devx_obj *obj = uobj->object;
  742. void *cmd_out;
  743. int err;
  744. if (!c->devx_uid)
  745. return -EPERM;
  746. if (!devx_is_obj_query_cmd(cmd_in))
  747. return -EINVAL;
  748. if (!devx_is_valid_obj_id(obj, cmd_in))
  749. return -EINVAL;
  750. cmd_out = uverbs_zalloc(attrs, cmd_out_len);
  751. if (IS_ERR(cmd_out))
  752. return PTR_ERR(cmd_out);
  753. MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
  754. err = mlx5_cmd_exec(obj->mdev, cmd_in,
  755. uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
  756. cmd_out, cmd_out_len);
  757. if (err)
  758. return err;
  759. return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
  760. cmd_out, cmd_out_len);
  761. }
  762. static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
  763. struct uverbs_attr_bundle *attrs,
  764. struct devx_umem *obj)
  765. {
  766. u64 addr;
  767. size_t size;
  768. u32 access;
  769. int npages;
  770. int err;
  771. u32 page_mask;
  772. if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
  773. uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
  774. return -EFAULT;
  775. err = uverbs_get_flags32(&access, attrs,
  776. MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
  777. IB_ACCESS_LOCAL_WRITE |
  778. IB_ACCESS_REMOTE_WRITE |
  779. IB_ACCESS_REMOTE_READ);
  780. if (err)
  781. return err;
  782. err = ib_check_mr_access(access);
  783. if (err)
  784. return err;
  785. obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
  786. if (IS_ERR(obj->umem))
  787. return PTR_ERR(obj->umem);
  788. mlx5_ib_cont_pages(obj->umem, obj->umem->address,
  789. MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
  790. &obj->page_shift, &obj->ncont, NULL);
  791. if (!npages) {
  792. ib_umem_release(obj->umem);
  793. return -EINVAL;
  794. }
  795. page_mask = (1 << obj->page_shift) - 1;
  796. obj->page_offset = obj->umem->address & page_mask;
  797. return 0;
  798. }
  799. static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
  800. struct devx_umem *obj,
  801. struct devx_umem_reg_cmd *cmd)
  802. {
  803. cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
  804. (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
  805. cmd->in = uverbs_zalloc(attrs, cmd->inlen);
  806. return PTR_ERR_OR_ZERO(cmd->in);
  807. }
  808. static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
  809. struct devx_umem *obj,
  810. struct devx_umem_reg_cmd *cmd)
  811. {
  812. void *umem;
  813. __be64 *mtt;
  814. umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
  815. mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
  816. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
  817. MLX5_SET(general_obj_in_cmd_hdr, cmd->in, obj_type, MLX5_OBJ_TYPE_UMEM);
  818. MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
  819. MLX5_SET(umem, umem, log_page_size, obj->page_shift -
  820. MLX5_ADAPTER_PAGE_SHIFT);
  821. MLX5_SET(umem, umem, page_offset, obj->page_offset);
  822. mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
  823. (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
  824. MLX5_IB_MTT_READ);
  825. }
  826. static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
  827. struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
  828. {
  829. struct devx_umem_reg_cmd cmd;
  830. struct devx_umem *obj;
  831. struct ib_uobject *uobj = uverbs_attr_get_uobject(
  832. attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
  833. u32 obj_id;
  834. struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
  835. struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
  836. int err;
  837. if (!c->devx_uid)
  838. return -EPERM;
  839. obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
  840. if (!obj)
  841. return -ENOMEM;
  842. err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
  843. if (err)
  844. goto err_obj_free;
  845. err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
  846. if (err)
  847. goto err_umem_release;
  848. devx_umem_reg_cmd_build(dev, obj, &cmd);
  849. MLX5_SET(general_obj_in_cmd_hdr, cmd.in, uid, c->devx_uid);
  850. err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
  851. sizeof(cmd.out));
  852. if (err)
  853. goto err_umem_release;
  854. obj->mdev = dev->mdev;
  855. uobj->object = obj;
  856. devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
  857. err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
  858. if (err)
  859. goto err_umem_destroy;
  860. return 0;
  861. err_umem_destroy:
  862. mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
  863. err_umem_release:
  864. ib_umem_release(obj->umem);
  865. err_obj_free:
  866. kfree(obj);
  867. return err;
  868. }
  869. static int devx_umem_cleanup(struct ib_uobject *uobject,
  870. enum rdma_remove_reason why)
  871. {
  872. struct devx_umem *obj = uobject->object;
  873. u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
  874. int err;
  875. err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
  876. if (ib_is_destroy_retryable(err, why, uobject))
  877. return err;
  878. ib_umem_release(obj->umem);
  879. kfree(obj);
  880. return 0;
  881. }
  882. DECLARE_UVERBS_NAMED_METHOD(
  883. MLX5_IB_METHOD_DEVX_UMEM_REG,
  884. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
  885. MLX5_IB_OBJECT_DEVX_UMEM,
  886. UVERBS_ACCESS_NEW,
  887. UA_MANDATORY),
  888. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
  889. UVERBS_ATTR_TYPE(u64),
  890. UA_MANDATORY),
  891. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
  892. UVERBS_ATTR_TYPE(u64),
  893. UA_MANDATORY),
  894. UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
  895. enum ib_access_flags),
  896. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
  897. UVERBS_ATTR_TYPE(u32),
  898. UA_MANDATORY));
  899. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  900. MLX5_IB_METHOD_DEVX_UMEM_DEREG,
  901. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
  902. MLX5_IB_OBJECT_DEVX_UMEM,
  903. UVERBS_ACCESS_DESTROY,
  904. UA_MANDATORY));
  905. DECLARE_UVERBS_NAMED_METHOD(
  906. MLX5_IB_METHOD_DEVX_QUERY_EQN,
  907. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
  908. UVERBS_ATTR_TYPE(u32),
  909. UA_MANDATORY),
  910. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
  911. UVERBS_ATTR_TYPE(u32),
  912. UA_MANDATORY));
  913. DECLARE_UVERBS_NAMED_METHOD(
  914. MLX5_IB_METHOD_DEVX_QUERY_UAR,
  915. UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
  916. UVERBS_ATTR_TYPE(u32),
  917. UA_MANDATORY),
  918. UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
  919. UVERBS_ATTR_TYPE(u32),
  920. UA_MANDATORY));
  921. DECLARE_UVERBS_NAMED_METHOD(
  922. MLX5_IB_METHOD_DEVX_OTHER,
  923. UVERBS_ATTR_PTR_IN(
  924. MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
  925. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  926. UA_MANDATORY,
  927. UA_ALLOC_AND_COPY),
  928. UVERBS_ATTR_PTR_OUT(
  929. MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
  930. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  931. UA_MANDATORY));
  932. DECLARE_UVERBS_NAMED_METHOD(
  933. MLX5_IB_METHOD_DEVX_OBJ_CREATE,
  934. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
  935. MLX5_IB_OBJECT_DEVX_OBJ,
  936. UVERBS_ACCESS_NEW,
  937. UA_MANDATORY),
  938. UVERBS_ATTR_PTR_IN(
  939. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
  940. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  941. UA_MANDATORY,
  942. UA_ALLOC_AND_COPY),
  943. UVERBS_ATTR_PTR_OUT(
  944. MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
  945. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  946. UA_MANDATORY));
  947. DECLARE_UVERBS_NAMED_METHOD_DESTROY(
  948. MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
  949. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
  950. MLX5_IB_OBJECT_DEVX_OBJ,
  951. UVERBS_ACCESS_DESTROY,
  952. UA_MANDATORY));
  953. DECLARE_UVERBS_NAMED_METHOD(
  954. MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
  955. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
  956. MLX5_IB_OBJECT_DEVX_OBJ,
  957. UVERBS_ACCESS_WRITE,
  958. UA_MANDATORY),
  959. UVERBS_ATTR_PTR_IN(
  960. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
  961. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  962. UA_MANDATORY,
  963. UA_ALLOC_AND_COPY),
  964. UVERBS_ATTR_PTR_OUT(
  965. MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
  966. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  967. UA_MANDATORY));
  968. DECLARE_UVERBS_NAMED_METHOD(
  969. MLX5_IB_METHOD_DEVX_OBJ_QUERY,
  970. UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
  971. MLX5_IB_OBJECT_DEVX_OBJ,
  972. UVERBS_ACCESS_READ,
  973. UA_MANDATORY),
  974. UVERBS_ATTR_PTR_IN(
  975. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
  976. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
  977. UA_MANDATORY,
  978. UA_ALLOC_AND_COPY),
  979. UVERBS_ATTR_PTR_OUT(
  980. MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
  981. UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
  982. UA_MANDATORY));
  983. DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
  984. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
  985. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
  986. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN));
  987. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
  988. UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
  989. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
  990. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
  991. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
  992. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY));
  993. DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
  994. UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
  995. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
  996. &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
  997. DECLARE_UVERBS_OBJECT_TREE(devx_objects,
  998. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX),
  999. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ),
  1000. &UVERBS_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM));
  1001. const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void)
  1002. {
  1003. return &devx_objects;
  1004. }