security.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/security.h>
  33. #include <linux/completion.h>
  34. #include <linux/list.h>
  35. #include <rdma/ib_verbs.h>
  36. #include <rdma/ib_cache.h>
  37. #include "core_priv.h"
  38. #include "mad_priv.h"
  39. static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
  40. {
  41. struct pkey_index_qp_list *pkey = NULL;
  42. struct pkey_index_qp_list *tmp_pkey;
  43. struct ib_device *dev = pp->sec->dev;
  44. spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
  45. list_for_each_entry(tmp_pkey,
  46. &dev->port_pkey_list[pp->port_num].pkey_list,
  47. pkey_index_list) {
  48. if (tmp_pkey->pkey_index == pp->pkey_index) {
  49. pkey = tmp_pkey;
  50. break;
  51. }
  52. }
  53. spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
  54. return pkey;
  55. }
  56. static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
  57. u16 *pkey,
  58. u64 *subnet_prefix)
  59. {
  60. struct ib_device *dev = pp->sec->dev;
  61. int ret;
  62. ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
  63. if (ret)
  64. return ret;
  65. ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
  66. return ret;
  67. }
  68. static int enforce_qp_pkey_security(u16 pkey,
  69. u64 subnet_prefix,
  70. struct ib_qp_security *qp_sec)
  71. {
  72. struct ib_qp_security *shared_qp_sec;
  73. int ret;
  74. ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
  75. if (ret)
  76. return ret;
  77. list_for_each_entry(shared_qp_sec,
  78. &qp_sec->shared_qp_list,
  79. shared_qp_list) {
  80. ret = security_ib_pkey_access(shared_qp_sec->security,
  81. subnet_prefix,
  82. pkey);
  83. if (ret)
  84. return ret;
  85. }
  86. return 0;
  87. }
  88. /* The caller of this function must hold the QP security
  89. * mutex of the QP of the security structure in *pps.
  90. *
  91. * It takes separate ports_pkeys and security structure
  92. * because in some cases the pps will be for a new settings
  93. * or the pps will be for the real QP and security structure
  94. * will be for a shared QP.
  95. */
  96. static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
  97. struct ib_qp_security *sec)
  98. {
  99. u64 subnet_prefix;
  100. u16 pkey;
  101. int ret = 0;
  102. if (!pps)
  103. return 0;
  104. if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
  105. ret = get_pkey_and_subnet_prefix(&pps->main,
  106. &pkey,
  107. &subnet_prefix);
  108. if (ret)
  109. return ret;
  110. ret = enforce_qp_pkey_security(pkey,
  111. subnet_prefix,
  112. sec);
  113. if (ret)
  114. return ret;
  115. }
  116. if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
  117. ret = get_pkey_and_subnet_prefix(&pps->alt,
  118. &pkey,
  119. &subnet_prefix);
  120. if (ret)
  121. return ret;
  122. ret = enforce_qp_pkey_security(pkey,
  123. subnet_prefix,
  124. sec);
  125. }
  126. return ret;
  127. }
  128. /* The caller of this function must hold the QP security
  129. * mutex.
  130. */
  131. static void qp_to_error(struct ib_qp_security *sec)
  132. {
  133. struct ib_qp_security *shared_qp_sec;
  134. struct ib_qp_attr attr = {
  135. .qp_state = IB_QPS_ERR
  136. };
  137. struct ib_event event = {
  138. .event = IB_EVENT_QP_FATAL
  139. };
  140. /* If the QP is in the process of being destroyed
  141. * the qp pointer in the security structure is
  142. * undefined. It cannot be modified now.
  143. */
  144. if (sec->destroying)
  145. return;
  146. ib_modify_qp(sec->qp,
  147. &attr,
  148. IB_QP_STATE);
  149. if (sec->qp->event_handler && sec->qp->qp_context) {
  150. event.element.qp = sec->qp;
  151. sec->qp->event_handler(&event,
  152. sec->qp->qp_context);
  153. }
  154. list_for_each_entry(shared_qp_sec,
  155. &sec->shared_qp_list,
  156. shared_qp_list) {
  157. struct ib_qp *qp = shared_qp_sec->qp;
  158. if (qp->event_handler && qp->qp_context) {
  159. event.element.qp = qp;
  160. event.device = qp->device;
  161. qp->event_handler(&event,
  162. qp->qp_context);
  163. }
  164. }
  165. }
  166. static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
  167. struct ib_device *device,
  168. u8 port_num,
  169. u64 subnet_prefix)
  170. {
  171. struct ib_port_pkey *pp, *tmp_pp;
  172. bool comp;
  173. LIST_HEAD(to_error_list);
  174. u16 pkey_val;
  175. if (!ib_get_cached_pkey(device,
  176. port_num,
  177. pkey->pkey_index,
  178. &pkey_val)) {
  179. spin_lock(&pkey->qp_list_lock);
  180. list_for_each_entry(pp, &pkey->qp_list, qp_list) {
  181. if (atomic_read(&pp->sec->error_list_count))
  182. continue;
  183. if (enforce_qp_pkey_security(pkey_val,
  184. subnet_prefix,
  185. pp->sec)) {
  186. atomic_inc(&pp->sec->error_list_count);
  187. list_add(&pp->to_error_list,
  188. &to_error_list);
  189. }
  190. }
  191. spin_unlock(&pkey->qp_list_lock);
  192. }
  193. list_for_each_entry_safe(pp,
  194. tmp_pp,
  195. &to_error_list,
  196. to_error_list) {
  197. mutex_lock(&pp->sec->mutex);
  198. qp_to_error(pp->sec);
  199. list_del(&pp->to_error_list);
  200. atomic_dec(&pp->sec->error_list_count);
  201. comp = pp->sec->destroying;
  202. mutex_unlock(&pp->sec->mutex);
  203. if (comp)
  204. complete(&pp->sec->error_complete);
  205. }
  206. }
  207. /* The caller of this function must hold the QP security
  208. * mutex.
  209. */
  210. static int port_pkey_list_insert(struct ib_port_pkey *pp)
  211. {
  212. struct pkey_index_qp_list *tmp_pkey;
  213. struct pkey_index_qp_list *pkey;
  214. struct ib_device *dev;
  215. u8 port_num = pp->port_num;
  216. int ret = 0;
  217. if (pp->state != IB_PORT_PKEY_VALID)
  218. return 0;
  219. dev = pp->sec->dev;
  220. pkey = get_pkey_idx_qp_list(pp);
  221. if (!pkey) {
  222. bool found = false;
  223. pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
  224. if (!pkey)
  225. return -ENOMEM;
  226. spin_lock(&dev->port_pkey_list[port_num].list_lock);
  227. /* Check for the PKey again. A racing process may
  228. * have created it.
  229. */
  230. list_for_each_entry(tmp_pkey,
  231. &dev->port_pkey_list[port_num].pkey_list,
  232. pkey_index_list) {
  233. if (tmp_pkey->pkey_index == pp->pkey_index) {
  234. kfree(pkey);
  235. pkey = tmp_pkey;
  236. found = true;
  237. break;
  238. }
  239. }
  240. if (!found) {
  241. pkey->pkey_index = pp->pkey_index;
  242. spin_lock_init(&pkey->qp_list_lock);
  243. INIT_LIST_HEAD(&pkey->qp_list);
  244. list_add(&pkey->pkey_index_list,
  245. &dev->port_pkey_list[port_num].pkey_list);
  246. }
  247. spin_unlock(&dev->port_pkey_list[port_num].list_lock);
  248. }
  249. spin_lock(&pkey->qp_list_lock);
  250. list_add(&pp->qp_list, &pkey->qp_list);
  251. spin_unlock(&pkey->qp_list_lock);
  252. pp->state = IB_PORT_PKEY_LISTED;
  253. return ret;
  254. }
  255. /* The caller of this function must hold the QP security
  256. * mutex.
  257. */
  258. static void port_pkey_list_remove(struct ib_port_pkey *pp)
  259. {
  260. struct pkey_index_qp_list *pkey;
  261. if (pp->state != IB_PORT_PKEY_LISTED)
  262. return;
  263. pkey = get_pkey_idx_qp_list(pp);
  264. spin_lock(&pkey->qp_list_lock);
  265. list_del(&pp->qp_list);
  266. spin_unlock(&pkey->qp_list_lock);
  267. /* The setting may still be valid, i.e. after
  268. * a destroy has failed for example.
  269. */
  270. pp->state = IB_PORT_PKEY_VALID;
  271. }
  272. static void destroy_qp_security(struct ib_qp_security *sec)
  273. {
  274. security_ib_free_security(sec->security);
  275. kfree(sec->ports_pkeys);
  276. kfree(sec);
  277. }
  278. /* The caller of this function must hold the QP security
  279. * mutex.
  280. */
  281. static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
  282. const struct ib_qp_attr *qp_attr,
  283. int qp_attr_mask)
  284. {
  285. struct ib_ports_pkeys *new_pps;
  286. struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
  287. new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
  288. if (!new_pps)
  289. return NULL;
  290. if (qp_attr_mask & IB_QP_PORT)
  291. new_pps->main.port_num = qp_attr->port_num;
  292. else if (qp_pps)
  293. new_pps->main.port_num = qp_pps->main.port_num;
  294. if (qp_attr_mask & IB_QP_PKEY_INDEX)
  295. new_pps->main.pkey_index = qp_attr->pkey_index;
  296. else if (qp_pps)
  297. new_pps->main.pkey_index = qp_pps->main.pkey_index;
  298. if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
  299. (qp_attr_mask & IB_QP_PORT)) ||
  300. (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
  301. new_pps->main.state = IB_PORT_PKEY_VALID;
  302. if (qp_attr_mask & IB_QP_ALT_PATH) {
  303. new_pps->alt.port_num = qp_attr->alt_port_num;
  304. new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
  305. new_pps->alt.state = IB_PORT_PKEY_VALID;
  306. } else if (qp_pps) {
  307. new_pps->alt.port_num = qp_pps->alt.port_num;
  308. new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
  309. if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
  310. new_pps->alt.state = IB_PORT_PKEY_VALID;
  311. }
  312. new_pps->main.sec = qp->qp_sec;
  313. new_pps->alt.sec = qp->qp_sec;
  314. return new_pps;
  315. }
  316. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
  317. {
  318. struct ib_qp *real_qp = qp->real_qp;
  319. int ret;
  320. ret = ib_create_qp_security(qp, dev);
  321. if (ret)
  322. return ret;
  323. if (!qp->qp_sec)
  324. return 0;
  325. mutex_lock(&real_qp->qp_sec->mutex);
  326. ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
  327. qp->qp_sec);
  328. if (ret)
  329. goto ret;
  330. if (qp != real_qp)
  331. list_add(&qp->qp_sec->shared_qp_list,
  332. &real_qp->qp_sec->shared_qp_list);
  333. ret:
  334. mutex_unlock(&real_qp->qp_sec->mutex);
  335. if (ret)
  336. destroy_qp_security(qp->qp_sec);
  337. return ret;
  338. }
  339. void ib_close_shared_qp_security(struct ib_qp_security *sec)
  340. {
  341. struct ib_qp *real_qp = sec->qp->real_qp;
  342. mutex_lock(&real_qp->qp_sec->mutex);
  343. list_del(&sec->shared_qp_list);
  344. mutex_unlock(&real_qp->qp_sec->mutex);
  345. destroy_qp_security(sec);
  346. }
  347. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
  348. {
  349. u8 i = rdma_start_port(dev);
  350. bool is_ib = false;
  351. int ret;
  352. while (i <= rdma_end_port(dev) && !is_ib)
  353. is_ib = rdma_protocol_ib(dev, i++);
  354. /* If this isn't an IB device don't create the security context */
  355. if (!is_ib)
  356. return 0;
  357. qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
  358. if (!qp->qp_sec)
  359. return -ENOMEM;
  360. qp->qp_sec->qp = qp;
  361. qp->qp_sec->dev = dev;
  362. mutex_init(&qp->qp_sec->mutex);
  363. INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
  364. atomic_set(&qp->qp_sec->error_list_count, 0);
  365. init_completion(&qp->qp_sec->error_complete);
  366. ret = security_ib_alloc_security(&qp->qp_sec->security);
  367. if (ret) {
  368. kfree(qp->qp_sec);
  369. qp->qp_sec = NULL;
  370. }
  371. return ret;
  372. }
  373. EXPORT_SYMBOL(ib_create_qp_security);
  374. void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  375. {
  376. /* Return if not IB */
  377. if (!sec)
  378. return;
  379. mutex_lock(&sec->mutex);
  380. /* Remove the QP from the lists so it won't get added to
  381. * a to_error_list during the destroy process.
  382. */
  383. if (sec->ports_pkeys) {
  384. port_pkey_list_remove(&sec->ports_pkeys->main);
  385. port_pkey_list_remove(&sec->ports_pkeys->alt);
  386. }
  387. /* If the QP is already in one or more of those lists
  388. * the destroying flag will ensure the to error flow
  389. * doesn't operate on an undefined QP.
  390. */
  391. sec->destroying = true;
  392. /* Record the error list count to know how many completions
  393. * to wait for.
  394. */
  395. sec->error_comps_pending = atomic_read(&sec->error_list_count);
  396. mutex_unlock(&sec->mutex);
  397. }
  398. void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  399. {
  400. int ret;
  401. int i;
  402. /* Return if not IB */
  403. if (!sec)
  404. return;
  405. /* If a concurrent cache update is in progress this
  406. * QP security could be marked for an error state
  407. * transition. Wait for this to complete.
  408. */
  409. for (i = 0; i < sec->error_comps_pending; i++)
  410. wait_for_completion(&sec->error_complete);
  411. mutex_lock(&sec->mutex);
  412. sec->destroying = false;
  413. /* Restore the position in the lists and verify
  414. * access is still allowed in case a cache update
  415. * occurred while attempting to destroy.
  416. *
  417. * Because these setting were listed already
  418. * and removed during ib_destroy_qp_security_begin
  419. * we know the pkey_index_qp_list for the PKey
  420. * already exists so port_pkey_list_insert won't fail.
  421. */
  422. if (sec->ports_pkeys) {
  423. port_pkey_list_insert(&sec->ports_pkeys->main);
  424. port_pkey_list_insert(&sec->ports_pkeys->alt);
  425. }
  426. ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
  427. if (ret)
  428. qp_to_error(sec);
  429. mutex_unlock(&sec->mutex);
  430. }
  431. void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  432. {
  433. int i;
  434. /* Return if not IB */
  435. if (!sec)
  436. return;
  437. /* If a concurrent cache update is occurring we must
  438. * wait until this QP security structure is processed
  439. * in the QP to error flow before destroying it because
  440. * the to_error_list is in use.
  441. */
  442. for (i = 0; i < sec->error_comps_pending; i++)
  443. wait_for_completion(&sec->error_complete);
  444. destroy_qp_security(sec);
  445. }
  446. void ib_security_cache_change(struct ib_device *device,
  447. u8 port_num,
  448. u64 subnet_prefix)
  449. {
  450. struct pkey_index_qp_list *pkey;
  451. list_for_each_entry(pkey,
  452. &device->port_pkey_list[port_num].pkey_list,
  453. pkey_index_list) {
  454. check_pkey_qps(pkey,
  455. device,
  456. port_num,
  457. subnet_prefix);
  458. }
  459. }
  460. void ib_security_destroy_port_pkey_list(struct ib_device *device)
  461. {
  462. struct pkey_index_qp_list *pkey, *tmp_pkey;
  463. int i;
  464. for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
  465. spin_lock(&device->port_pkey_list[i].list_lock);
  466. list_for_each_entry_safe(pkey,
  467. tmp_pkey,
  468. &device->port_pkey_list[i].pkey_list,
  469. pkey_index_list) {
  470. list_del(&pkey->pkey_index_list);
  471. kfree(pkey);
  472. }
  473. spin_unlock(&device->port_pkey_list[i].list_lock);
  474. }
  475. }
  476. int ib_security_modify_qp(struct ib_qp *qp,
  477. struct ib_qp_attr *qp_attr,
  478. int qp_attr_mask,
  479. struct ib_udata *udata)
  480. {
  481. int ret = 0;
  482. struct ib_ports_pkeys *tmp_pps;
  483. struct ib_ports_pkeys *new_pps = NULL;
  484. struct ib_qp *real_qp = qp->real_qp;
  485. bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
  486. real_qp->qp_type == IB_QPT_GSI ||
  487. real_qp->qp_type >= IB_QPT_RESERVED1);
  488. bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
  489. (qp_attr_mask & IB_QP_ALT_PATH));
  490. WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
  491. rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
  492. !real_qp->qp_sec),
  493. "%s: QP security is not initialized for IB QP: %d\n",
  494. __func__, real_qp->qp_num);
  495. /* The port/pkey settings are maintained only for the real QP. Open
  496. * handles on the real QP will be in the shared_qp_list. When
  497. * enforcing security on the real QP all the shared QPs will be
  498. * checked as well.
  499. */
  500. if (pps_change && !special_qp && real_qp->qp_sec) {
  501. mutex_lock(&real_qp->qp_sec->mutex);
  502. new_pps = get_new_pps(real_qp,
  503. qp_attr,
  504. qp_attr_mask);
  505. if (!new_pps) {
  506. mutex_unlock(&real_qp->qp_sec->mutex);
  507. return -ENOMEM;
  508. }
  509. /* Add this QP to the lists for the new port
  510. * and pkey settings before checking for permission
  511. * in case there is a concurrent cache update
  512. * occurring. Walking the list for a cache change
  513. * doesn't acquire the security mutex unless it's
  514. * sending the QP to error.
  515. */
  516. ret = port_pkey_list_insert(&new_pps->main);
  517. if (!ret)
  518. ret = port_pkey_list_insert(&new_pps->alt);
  519. if (!ret)
  520. ret = check_qp_port_pkey_settings(new_pps,
  521. real_qp->qp_sec);
  522. }
  523. if (!ret)
  524. ret = real_qp->device->modify_qp(real_qp,
  525. qp_attr,
  526. qp_attr_mask,
  527. udata);
  528. if (new_pps) {
  529. /* Clean up the lists and free the appropriate
  530. * ports_pkeys structure.
  531. */
  532. if (ret) {
  533. tmp_pps = new_pps;
  534. } else {
  535. tmp_pps = real_qp->qp_sec->ports_pkeys;
  536. real_qp->qp_sec->ports_pkeys = new_pps;
  537. }
  538. if (tmp_pps) {
  539. port_pkey_list_remove(&tmp_pps->main);
  540. port_pkey_list_remove(&tmp_pps->alt);
  541. }
  542. kfree(tmp_pps);
  543. mutex_unlock(&real_qp->qp_sec->mutex);
  544. }
  545. return ret;
  546. }
  547. static int ib_security_pkey_access(struct ib_device *dev,
  548. u8 port_num,
  549. u16 pkey_index,
  550. void *sec)
  551. {
  552. u64 subnet_prefix;
  553. u16 pkey;
  554. int ret;
  555. if (!rdma_protocol_ib(dev, port_num))
  556. return 0;
  557. ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
  558. if (ret)
  559. return ret;
  560. ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
  561. if (ret)
  562. return ret;
  563. return security_ib_pkey_access(sec, subnet_prefix, pkey);
  564. }
  565. static int ib_mad_agent_security_change(struct notifier_block *nb,
  566. unsigned long event,
  567. void *data)
  568. {
  569. struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
  570. if (event != LSM_POLICY_CHANGE)
  571. return NOTIFY_DONE;
  572. ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
  573. ag->device->name,
  574. ag->port_num);
  575. return NOTIFY_OK;
  576. }
  577. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  578. enum ib_qp_type qp_type)
  579. {
  580. int ret;
  581. if (!rdma_protocol_ib(agent->device, agent->port_num))
  582. return 0;
  583. ret = security_ib_alloc_security(&agent->security);
  584. if (ret)
  585. return ret;
  586. if (qp_type != IB_QPT_SMI)
  587. return 0;
  588. ret = security_ib_endport_manage_subnet(agent->security,
  589. agent->device->name,
  590. agent->port_num);
  591. if (ret)
  592. goto free_security;
  593. agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
  594. ret = register_lsm_notifier(&agent->lsm_nb);
  595. if (ret)
  596. goto free_security;
  597. agent->smp_allowed = true;
  598. agent->lsm_nb_reg = true;
  599. return 0;
  600. free_security:
  601. security_ib_free_security(agent->security);
  602. return ret;
  603. }
  604. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  605. {
  606. if (!rdma_protocol_ib(agent->device, agent->port_num))
  607. return;
  608. if (agent->lsm_nb_reg)
  609. unregister_lsm_notifier(&agent->lsm_nb);
  610. security_ib_free_security(agent->security);
  611. }
  612. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
  613. {
  614. if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
  615. return 0;
  616. if (map->agent.qp->qp_type == IB_QPT_SMI) {
  617. if (!map->agent.smp_allowed)
  618. return -EACCES;
  619. return 0;
  620. }
  621. return ib_security_pkey_access(map->agent.device,
  622. map->agent.port_num,
  623. pkey_index,
  624. map->agent.security);
  625. }