qrtr.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*
  2. * Copyright (c) 2015, Sony Mobile Communications Inc.
  3. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/netlink.h>
  16. #include <linux/qrtr.h>
  17. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  18. #include <net/sock.h>
  19. #include "qrtr.h"
  20. #define QRTR_PROTO_VER_1 1
  21. #define QRTR_PROTO_VER_2 3
  22. /* auto-bind range */
  23. #define QRTR_MIN_EPH_SOCKET 0x4000
  24. #define QRTR_MAX_EPH_SOCKET 0x7fff
  25. /**
  26. * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
  27. * @version: protocol version
  28. * @type: packet type; one of QRTR_TYPE_*
  29. * @src_node_id: source node
  30. * @src_port_id: source port
  31. * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
  32. * @size: length of packet, excluding this header
  33. * @dst_node_id: destination node
  34. * @dst_port_id: destination port
  35. */
  36. struct qrtr_hdr_v1 {
  37. __le32 version;
  38. __le32 type;
  39. __le32 src_node_id;
  40. __le32 src_port_id;
  41. __le32 confirm_rx;
  42. __le32 size;
  43. __le32 dst_node_id;
  44. __le32 dst_port_id;
  45. } __packed;
  46. /**
  47. * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
  48. * @version: protocol version
  49. * @type: packet type; one of QRTR_TYPE_*
  50. * @flags: bitmask of QRTR_FLAGS_*
  51. * @optlen: length of optional header data
  52. * @size: length of packet, excluding this header and optlen
  53. * @src_node_id: source node
  54. * @src_port_id: source port
  55. * @dst_node_id: destination node
  56. * @dst_port_id: destination port
  57. */
  58. struct qrtr_hdr_v2 {
  59. u8 version;
  60. u8 type;
  61. u8 flags;
  62. u8 optlen;
  63. __le32 size;
  64. __le16 src_node_id;
  65. __le16 src_port_id;
  66. __le16 dst_node_id;
  67. __le16 dst_port_id;
  68. };
  69. #define QRTR_FLAGS_CONFIRM_RX BIT(0)
  70. struct qrtr_cb {
  71. u32 src_node;
  72. u32 src_port;
  73. u32 dst_node;
  74. u32 dst_port;
  75. u8 type;
  76. u8 confirm_rx;
  77. };
  78. #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
  79. sizeof(struct qrtr_hdr_v2))
  80. struct qrtr_sock {
  81. /* WARNING: sk must be the first member */
  82. struct sock sk;
  83. struct sockaddr_qrtr us;
  84. struct sockaddr_qrtr peer;
  85. };
  86. static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
  87. {
  88. BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
  89. return container_of(sk, struct qrtr_sock, sk);
  90. }
  91. static unsigned int qrtr_local_nid = -1;
  92. /* for node ids */
  93. static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
  94. /* broadcast list */
  95. static LIST_HEAD(qrtr_all_nodes);
  96. /* lock for qrtr_nodes, qrtr_all_nodes and node reference */
  97. static DEFINE_MUTEX(qrtr_node_lock);
  98. /* local port allocation management */
  99. static DEFINE_IDR(qrtr_ports);
  100. static DEFINE_MUTEX(qrtr_port_lock);
  101. /**
  102. * struct qrtr_node - endpoint node
  103. * @ep_lock: lock for endpoint management and callbacks
  104. * @ep: endpoint
  105. * @ref: reference count for node
  106. * @nid: node id
  107. * @rx_queue: receive queue
  108. * @work: scheduled work struct for recv work
  109. * @item: list item for broadcast list
  110. */
  111. struct qrtr_node {
  112. struct mutex ep_lock;
  113. struct qrtr_endpoint *ep;
  114. struct kref ref;
  115. unsigned int nid;
  116. struct sk_buff_head rx_queue;
  117. struct work_struct work;
  118. struct list_head item;
  119. };
  120. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  121. int type, struct sockaddr_qrtr *from,
  122. struct sockaddr_qrtr *to);
  123. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  124. int type, struct sockaddr_qrtr *from,
  125. struct sockaddr_qrtr *to);
  126. /* Release node resources and free the node.
  127. *
  128. * Do not call directly, use qrtr_node_release. To be used with
  129. * kref_put_mutex. As such, the node mutex is expected to be locked on call.
  130. */
  131. static void __qrtr_node_release(struct kref *kref)
  132. {
  133. struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
  134. if (node->nid != QRTR_EP_NID_AUTO)
  135. radix_tree_delete(&qrtr_nodes, node->nid);
  136. list_del(&node->item);
  137. mutex_unlock(&qrtr_node_lock);
  138. cancel_work_sync(&node->work);
  139. skb_queue_purge(&node->rx_queue);
  140. kfree(node);
  141. }
  142. /* Increment reference to node. */
  143. static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
  144. {
  145. if (node)
  146. kref_get(&node->ref);
  147. return node;
  148. }
  149. /* Decrement reference to node and release as necessary. */
  150. static void qrtr_node_release(struct qrtr_node *node)
  151. {
  152. if (!node)
  153. return;
  154. kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
  155. }
  156. /* Pass an outgoing packet socket buffer to the endpoint driver. */
  157. static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  158. int type, struct sockaddr_qrtr *from,
  159. struct sockaddr_qrtr *to)
  160. {
  161. struct qrtr_hdr_v1 *hdr;
  162. size_t len = skb->len;
  163. int rc;
  164. hdr = skb_push(skb, sizeof(*hdr));
  165. hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
  166. hdr->type = cpu_to_le32(type);
  167. hdr->src_node_id = cpu_to_le32(from->sq_node);
  168. hdr->src_port_id = cpu_to_le32(from->sq_port);
  169. if (to->sq_port == QRTR_PORT_CTRL) {
  170. hdr->dst_node_id = cpu_to_le32(node->nid);
  171. hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
  172. } else {
  173. hdr->dst_node_id = cpu_to_le32(to->sq_node);
  174. hdr->dst_port_id = cpu_to_le32(to->sq_port);
  175. }
  176. hdr->size = cpu_to_le32(len);
  177. hdr->confirm_rx = 0;
  178. rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
  179. if (!rc) {
  180. mutex_lock(&node->ep_lock);
  181. rc = -ENODEV;
  182. if (node->ep)
  183. rc = node->ep->xmit(node->ep, skb);
  184. else
  185. kfree_skb(skb);
  186. mutex_unlock(&node->ep_lock);
  187. }
  188. return rc;
  189. }
  190. /* Lookup node by id.
  191. *
  192. * callers must release with qrtr_node_release()
  193. */
  194. static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
  195. {
  196. struct qrtr_node *node;
  197. mutex_lock(&qrtr_node_lock);
  198. node = radix_tree_lookup(&qrtr_nodes, nid);
  199. node = qrtr_node_acquire(node);
  200. mutex_unlock(&qrtr_node_lock);
  201. return node;
  202. }
  203. /* Assign node id to node.
  204. *
  205. * This is mostly useful for automatic node id assignment, based on
  206. * the source id in the incoming packet.
  207. */
  208. static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
  209. {
  210. if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
  211. return;
  212. mutex_lock(&qrtr_node_lock);
  213. radix_tree_insert(&qrtr_nodes, nid, node);
  214. node->nid = nid;
  215. mutex_unlock(&qrtr_node_lock);
  216. }
  217. /**
  218. * qrtr_endpoint_post() - post incoming data
  219. * @ep: endpoint handle
  220. * @data: data pointer
  221. * @len: size of data in bytes
  222. *
  223. * Return: 0 on success; negative error code on failure
  224. */
  225. int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
  226. {
  227. struct qrtr_node *node = ep->node;
  228. const struct qrtr_hdr_v1 *v1;
  229. const struct qrtr_hdr_v2 *v2;
  230. struct sk_buff *skb;
  231. struct qrtr_cb *cb;
  232. unsigned int size;
  233. unsigned int ver;
  234. size_t hdrlen;
  235. if (len == 0 || len & 3)
  236. return -EINVAL;
  237. skb = __netdev_alloc_skb(NULL, len, GFP_ATOMIC | __GFP_NOWARN);
  238. if (!skb)
  239. return -ENOMEM;
  240. cb = (struct qrtr_cb *)skb->cb;
  241. /* Version field in v1 is little endian, so this works for both cases */
  242. ver = *(u8*)data;
  243. switch (ver) {
  244. case QRTR_PROTO_VER_1:
  245. if (len < sizeof(*v1))
  246. goto err;
  247. v1 = data;
  248. hdrlen = sizeof(*v1);
  249. cb->type = le32_to_cpu(v1->type);
  250. cb->src_node = le32_to_cpu(v1->src_node_id);
  251. cb->src_port = le32_to_cpu(v1->src_port_id);
  252. cb->confirm_rx = !!v1->confirm_rx;
  253. cb->dst_node = le32_to_cpu(v1->dst_node_id);
  254. cb->dst_port = le32_to_cpu(v1->dst_port_id);
  255. size = le32_to_cpu(v1->size);
  256. break;
  257. case QRTR_PROTO_VER_2:
  258. if (len < sizeof(*v2))
  259. goto err;
  260. v2 = data;
  261. hdrlen = sizeof(*v2) + v2->optlen;
  262. cb->type = v2->type;
  263. cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
  264. cb->src_node = le16_to_cpu(v2->src_node_id);
  265. cb->src_port = le16_to_cpu(v2->src_port_id);
  266. cb->dst_node = le16_to_cpu(v2->dst_node_id);
  267. cb->dst_port = le16_to_cpu(v2->dst_port_id);
  268. if (cb->src_port == (u16)QRTR_PORT_CTRL)
  269. cb->src_port = QRTR_PORT_CTRL;
  270. if (cb->dst_port == (u16)QRTR_PORT_CTRL)
  271. cb->dst_port = QRTR_PORT_CTRL;
  272. size = le32_to_cpu(v2->size);
  273. break;
  274. default:
  275. pr_err("qrtr: Invalid version %d\n", ver);
  276. goto err;
  277. }
  278. if (len != ALIGN(size, 4) + hdrlen)
  279. goto err;
  280. if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)
  281. goto err;
  282. skb_put_data(skb, data + hdrlen, size);
  283. skb_queue_tail(&node->rx_queue, skb);
  284. schedule_work(&node->work);
  285. return 0;
  286. err:
  287. kfree_skb(skb);
  288. return -EINVAL;
  289. }
  290. EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
  291. /**
  292. * qrtr_alloc_ctrl_packet() - allocate control packet skb
  293. * @pkt: reference to qrtr_ctrl_pkt pointer
  294. *
  295. * Returns newly allocated sk_buff, or NULL on failure
  296. *
  297. * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
  298. * on success returns a reference to the control packet in @pkt.
  299. */
  300. static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
  301. {
  302. const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
  303. struct sk_buff *skb;
  304. skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
  305. if (!skb)
  306. return NULL;
  307. skb_reserve(skb, QRTR_HDR_MAX_SIZE);
  308. *pkt = skb_put_zero(skb, pkt_len);
  309. return skb;
  310. }
  311. static struct qrtr_sock *qrtr_port_lookup(int port);
  312. static void qrtr_port_put(struct qrtr_sock *ipc);
  313. /* Handle and route a received packet.
  314. *
  315. * This will auto-reply with resume-tx packet as necessary.
  316. */
  317. static void qrtr_node_rx_work(struct work_struct *work)
  318. {
  319. struct qrtr_node *node = container_of(work, struct qrtr_node, work);
  320. struct qrtr_ctrl_pkt *pkt;
  321. struct sockaddr_qrtr dst;
  322. struct sockaddr_qrtr src;
  323. struct sk_buff *skb;
  324. while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
  325. struct qrtr_sock *ipc;
  326. struct qrtr_cb *cb;
  327. int confirm;
  328. cb = (struct qrtr_cb *)skb->cb;
  329. src.sq_node = cb->src_node;
  330. src.sq_port = cb->src_port;
  331. dst.sq_node = cb->dst_node;
  332. dst.sq_port = cb->dst_port;
  333. confirm = !!cb->confirm_rx;
  334. qrtr_node_assign(node, cb->src_node);
  335. ipc = qrtr_port_lookup(cb->dst_port);
  336. if (!ipc) {
  337. kfree_skb(skb);
  338. } else {
  339. if (sock_queue_rcv_skb(&ipc->sk, skb))
  340. kfree_skb(skb);
  341. qrtr_port_put(ipc);
  342. }
  343. if (confirm) {
  344. skb = qrtr_alloc_ctrl_packet(&pkt);
  345. if (!skb)
  346. break;
  347. pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
  348. pkt->client.node = cpu_to_le32(dst.sq_node);
  349. pkt->client.port = cpu_to_le32(dst.sq_port);
  350. if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
  351. &dst, &src))
  352. break;
  353. }
  354. }
  355. }
  356. /**
  357. * qrtr_endpoint_register() - register a new endpoint
  358. * @ep: endpoint to register
  359. * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
  360. * Return: 0 on success; negative error code on failure
  361. *
  362. * The specified endpoint must have the xmit function pointer set on call.
  363. */
  364. int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
  365. {
  366. struct qrtr_node *node;
  367. if (!ep || !ep->xmit)
  368. return -EINVAL;
  369. node = kzalloc(sizeof(*node), GFP_KERNEL);
  370. if (!node)
  371. return -ENOMEM;
  372. INIT_WORK(&node->work, qrtr_node_rx_work);
  373. kref_init(&node->ref);
  374. mutex_init(&node->ep_lock);
  375. skb_queue_head_init(&node->rx_queue);
  376. node->nid = QRTR_EP_NID_AUTO;
  377. node->ep = ep;
  378. qrtr_node_assign(node, nid);
  379. mutex_lock(&qrtr_node_lock);
  380. list_add(&node->item, &qrtr_all_nodes);
  381. mutex_unlock(&qrtr_node_lock);
  382. ep->node = node;
  383. return 0;
  384. }
  385. EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
  386. /**
  387. * qrtr_endpoint_unregister - unregister endpoint
  388. * @ep: endpoint to unregister
  389. */
  390. void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
  391. {
  392. struct qrtr_node *node = ep->node;
  393. struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
  394. struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
  395. struct qrtr_ctrl_pkt *pkt;
  396. struct sk_buff *skb;
  397. mutex_lock(&node->ep_lock);
  398. node->ep = NULL;
  399. mutex_unlock(&node->ep_lock);
  400. /* Notify the local controller about the event */
  401. skb = qrtr_alloc_ctrl_packet(&pkt);
  402. if (skb) {
  403. pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
  404. qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
  405. }
  406. qrtr_node_release(node);
  407. ep->node = NULL;
  408. }
  409. EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
  410. /* Lookup socket by port.
  411. *
  412. * Callers must release with qrtr_port_put()
  413. */
  414. static struct qrtr_sock *qrtr_port_lookup(int port)
  415. {
  416. struct qrtr_sock *ipc;
  417. if (port == QRTR_PORT_CTRL)
  418. port = 0;
  419. mutex_lock(&qrtr_port_lock);
  420. ipc = idr_find(&qrtr_ports, port);
  421. if (ipc)
  422. sock_hold(&ipc->sk);
  423. mutex_unlock(&qrtr_port_lock);
  424. return ipc;
  425. }
  426. /* Release acquired socket. */
  427. static void qrtr_port_put(struct qrtr_sock *ipc)
  428. {
  429. sock_put(&ipc->sk);
  430. }
  431. /* Remove port assignment. */
  432. static void qrtr_port_remove(struct qrtr_sock *ipc)
  433. {
  434. struct qrtr_ctrl_pkt *pkt;
  435. struct sk_buff *skb;
  436. int port = ipc->us.sq_port;
  437. struct sockaddr_qrtr to;
  438. to.sq_family = AF_QIPCRTR;
  439. to.sq_node = QRTR_NODE_BCAST;
  440. to.sq_port = QRTR_PORT_CTRL;
  441. skb = qrtr_alloc_ctrl_packet(&pkt);
  442. if (skb) {
  443. pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
  444. pkt->client.node = cpu_to_le32(ipc->us.sq_node);
  445. pkt->client.port = cpu_to_le32(ipc->us.sq_port);
  446. skb_set_owner_w(skb, &ipc->sk);
  447. qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
  448. &to);
  449. }
  450. if (port == QRTR_PORT_CTRL)
  451. port = 0;
  452. __sock_put(&ipc->sk);
  453. mutex_lock(&qrtr_port_lock);
  454. idr_remove(&qrtr_ports, port);
  455. mutex_unlock(&qrtr_port_lock);
  456. }
  457. /* Assign port number to socket.
  458. *
  459. * Specify port in the integer pointed to by port, and it will be adjusted
  460. * on return as necesssary.
  461. *
  462. * Port may be:
  463. * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
  464. * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
  465. * >QRTR_MIN_EPH_SOCKET: Specified; available to all
  466. */
  467. static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
  468. {
  469. u32 min_port;
  470. int rc;
  471. mutex_lock(&qrtr_port_lock);
  472. if (!*port) {
  473. min_port = QRTR_MIN_EPH_SOCKET;
  474. rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
  475. if (!rc)
  476. *port = min_port;
  477. } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
  478. rc = -EACCES;
  479. } else if (*port == QRTR_PORT_CTRL) {
  480. min_port = 0;
  481. rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
  482. } else {
  483. min_port = *port;
  484. rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
  485. if (!rc)
  486. *port = min_port;
  487. }
  488. mutex_unlock(&qrtr_port_lock);
  489. if (rc == -ENOSPC)
  490. return -EADDRINUSE;
  491. else if (rc < 0)
  492. return rc;
  493. sock_hold(&ipc->sk);
  494. return 0;
  495. }
  496. /* Reset all non-control ports */
  497. static void qrtr_reset_ports(void)
  498. {
  499. struct qrtr_sock *ipc;
  500. int id;
  501. mutex_lock(&qrtr_port_lock);
  502. idr_for_each_entry(&qrtr_ports, ipc, id) {
  503. /* Don't reset control port */
  504. if (id == 0)
  505. continue;
  506. sock_hold(&ipc->sk);
  507. ipc->sk.sk_err = ENETRESET;
  508. ipc->sk.sk_error_report(&ipc->sk);
  509. sock_put(&ipc->sk);
  510. }
  511. mutex_unlock(&qrtr_port_lock);
  512. }
  513. /* Bind socket to address.
  514. *
  515. * Socket should be locked upon call.
  516. */
  517. static int __qrtr_bind(struct socket *sock,
  518. const struct sockaddr_qrtr *addr, int zapped)
  519. {
  520. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  521. struct sock *sk = sock->sk;
  522. int port;
  523. int rc;
  524. /* rebinding ok */
  525. if (!zapped && addr->sq_port == ipc->us.sq_port)
  526. return 0;
  527. port = addr->sq_port;
  528. rc = qrtr_port_assign(ipc, &port);
  529. if (rc)
  530. return rc;
  531. /* unbind previous, if any */
  532. if (!zapped)
  533. qrtr_port_remove(ipc);
  534. ipc->us.sq_port = port;
  535. sock_reset_flag(sk, SOCK_ZAPPED);
  536. /* Notify all open ports about the new controller */
  537. if (port == QRTR_PORT_CTRL)
  538. qrtr_reset_ports();
  539. return 0;
  540. }
  541. /* Auto bind to an ephemeral port. */
  542. static int qrtr_autobind(struct socket *sock)
  543. {
  544. struct sock *sk = sock->sk;
  545. struct sockaddr_qrtr addr;
  546. if (!sock_flag(sk, SOCK_ZAPPED))
  547. return 0;
  548. addr.sq_family = AF_QIPCRTR;
  549. addr.sq_node = qrtr_local_nid;
  550. addr.sq_port = 0;
  551. return __qrtr_bind(sock, &addr, 1);
  552. }
  553. /* Bind socket to specified sockaddr. */
  554. static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
  555. {
  556. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  557. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  558. struct sock *sk = sock->sk;
  559. int rc;
  560. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  561. return -EINVAL;
  562. if (addr->sq_node != ipc->us.sq_node)
  563. return -EINVAL;
  564. lock_sock(sk);
  565. rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
  566. release_sock(sk);
  567. return rc;
  568. }
  569. /* Queue packet to local peer socket. */
  570. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  571. int type, struct sockaddr_qrtr *from,
  572. struct sockaddr_qrtr *to)
  573. {
  574. struct qrtr_sock *ipc;
  575. struct qrtr_cb *cb;
  576. ipc = qrtr_port_lookup(to->sq_port);
  577. if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
  578. kfree_skb(skb);
  579. return -ENODEV;
  580. }
  581. cb = (struct qrtr_cb *)skb->cb;
  582. cb->src_node = from->sq_node;
  583. cb->src_port = from->sq_port;
  584. if (sock_queue_rcv_skb(&ipc->sk, skb)) {
  585. qrtr_port_put(ipc);
  586. kfree_skb(skb);
  587. return -ENOSPC;
  588. }
  589. qrtr_port_put(ipc);
  590. return 0;
  591. }
  592. /* Queue packet for broadcast. */
  593. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  594. int type, struct sockaddr_qrtr *from,
  595. struct sockaddr_qrtr *to)
  596. {
  597. struct sk_buff *skbn;
  598. mutex_lock(&qrtr_node_lock);
  599. list_for_each_entry(node, &qrtr_all_nodes, item) {
  600. skbn = skb_clone(skb, GFP_KERNEL);
  601. if (!skbn)
  602. break;
  603. skb_set_owner_w(skbn, skb->sk);
  604. qrtr_node_enqueue(node, skbn, type, from, to);
  605. }
  606. mutex_unlock(&qrtr_node_lock);
  607. qrtr_local_enqueue(NULL, skb, type, from, to);
  608. return 0;
  609. }
  610. static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  611. {
  612. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  613. int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
  614. struct sockaddr_qrtr *, struct sockaddr_qrtr *);
  615. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  616. struct sock *sk = sock->sk;
  617. struct qrtr_node *node;
  618. struct sk_buff *skb;
  619. size_t plen;
  620. u32 type = QRTR_TYPE_DATA;
  621. int rc;
  622. if (msg->msg_flags & ~(MSG_DONTWAIT))
  623. return -EINVAL;
  624. if (len > 65535)
  625. return -EMSGSIZE;
  626. lock_sock(sk);
  627. if (addr) {
  628. if (msg->msg_namelen < sizeof(*addr)) {
  629. release_sock(sk);
  630. return -EINVAL;
  631. }
  632. if (addr->sq_family != AF_QIPCRTR) {
  633. release_sock(sk);
  634. return -EINVAL;
  635. }
  636. rc = qrtr_autobind(sock);
  637. if (rc) {
  638. release_sock(sk);
  639. return rc;
  640. }
  641. } else if (sk->sk_state == TCP_ESTABLISHED) {
  642. addr = &ipc->peer;
  643. } else {
  644. release_sock(sk);
  645. return -ENOTCONN;
  646. }
  647. node = NULL;
  648. if (addr->sq_node == QRTR_NODE_BCAST) {
  649. if (addr->sq_port != QRTR_PORT_CTRL &&
  650. qrtr_local_nid != QRTR_NODE_BCAST) {
  651. release_sock(sk);
  652. return -ENOTCONN;
  653. }
  654. enqueue_fn = qrtr_bcast_enqueue;
  655. } else if (addr->sq_node == ipc->us.sq_node) {
  656. enqueue_fn = qrtr_local_enqueue;
  657. } else {
  658. node = qrtr_node_lookup(addr->sq_node);
  659. if (!node) {
  660. release_sock(sk);
  661. return -ECONNRESET;
  662. }
  663. enqueue_fn = qrtr_node_enqueue;
  664. }
  665. plen = (len + 3) & ~3;
  666. skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
  667. msg->msg_flags & MSG_DONTWAIT, &rc);
  668. if (!skb) {
  669. rc = -ENOMEM;
  670. goto out_node;
  671. }
  672. skb_reserve(skb, QRTR_HDR_MAX_SIZE);
  673. rc = memcpy_from_msg(skb_put(skb, len), msg, len);
  674. if (rc) {
  675. kfree_skb(skb);
  676. goto out_node;
  677. }
  678. if (ipc->us.sq_port == QRTR_PORT_CTRL) {
  679. if (len < 4) {
  680. rc = -EINVAL;
  681. kfree_skb(skb);
  682. goto out_node;
  683. }
  684. /* control messages already require the type as 'command' */
  685. skb_copy_bits(skb, 0, &type, 4);
  686. type = le32_to_cpu(type);
  687. }
  688. rc = enqueue_fn(node, skb, type, &ipc->us, addr);
  689. if (rc >= 0)
  690. rc = len;
  691. out_node:
  692. qrtr_node_release(node);
  693. release_sock(sk);
  694. return rc;
  695. }
  696. static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
  697. size_t size, int flags)
  698. {
  699. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  700. struct sock *sk = sock->sk;
  701. struct sk_buff *skb;
  702. struct qrtr_cb *cb;
  703. int copied, rc;
  704. lock_sock(sk);
  705. if (sock_flag(sk, SOCK_ZAPPED)) {
  706. release_sock(sk);
  707. return -EADDRNOTAVAIL;
  708. }
  709. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  710. flags & MSG_DONTWAIT, &rc);
  711. if (!skb) {
  712. release_sock(sk);
  713. return rc;
  714. }
  715. copied = skb->len;
  716. if (copied > size) {
  717. copied = size;
  718. msg->msg_flags |= MSG_TRUNC;
  719. }
  720. rc = skb_copy_datagram_msg(skb, 0, msg, copied);
  721. if (rc < 0)
  722. goto out;
  723. rc = copied;
  724. if (addr) {
  725. /* There is an anonymous 2-byte hole after sq_family,
  726. * make sure to clear it.
  727. */
  728. memset(addr, 0, sizeof(*addr));
  729. cb = (struct qrtr_cb *)skb->cb;
  730. addr->sq_family = AF_QIPCRTR;
  731. addr->sq_node = cb->src_node;
  732. addr->sq_port = cb->src_port;
  733. msg->msg_namelen = sizeof(*addr);
  734. }
  735. out:
  736. skb_free_datagram(sk, skb);
  737. release_sock(sk);
  738. return rc;
  739. }
  740. static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
  741. int len, int flags)
  742. {
  743. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  744. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  745. struct sock *sk = sock->sk;
  746. int rc;
  747. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  748. return -EINVAL;
  749. lock_sock(sk);
  750. sk->sk_state = TCP_CLOSE;
  751. sock->state = SS_UNCONNECTED;
  752. rc = qrtr_autobind(sock);
  753. if (rc) {
  754. release_sock(sk);
  755. return rc;
  756. }
  757. ipc->peer = *addr;
  758. sock->state = SS_CONNECTED;
  759. sk->sk_state = TCP_ESTABLISHED;
  760. release_sock(sk);
  761. return 0;
  762. }
  763. static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
  764. int peer)
  765. {
  766. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  767. struct sockaddr_qrtr qaddr;
  768. struct sock *sk = sock->sk;
  769. lock_sock(sk);
  770. if (peer) {
  771. if (sk->sk_state != TCP_ESTABLISHED) {
  772. release_sock(sk);
  773. return -ENOTCONN;
  774. }
  775. qaddr = ipc->peer;
  776. } else {
  777. qaddr = ipc->us;
  778. }
  779. release_sock(sk);
  780. qaddr.sq_family = AF_QIPCRTR;
  781. memcpy(saddr, &qaddr, sizeof(qaddr));
  782. return sizeof(qaddr);
  783. }
  784. static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  785. {
  786. void __user *argp = (void __user *)arg;
  787. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  788. struct sock *sk = sock->sk;
  789. struct sockaddr_qrtr *sq;
  790. struct sk_buff *skb;
  791. struct ifreq ifr;
  792. long len = 0;
  793. int rc = 0;
  794. lock_sock(sk);
  795. switch (cmd) {
  796. case TIOCOUTQ:
  797. len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
  798. if (len < 0)
  799. len = 0;
  800. rc = put_user(len, (int __user *)argp);
  801. break;
  802. case TIOCINQ:
  803. skb = skb_peek(&sk->sk_receive_queue);
  804. if (skb)
  805. len = skb->len;
  806. rc = put_user(len, (int __user *)argp);
  807. break;
  808. case SIOCGIFADDR:
  809. if (copy_from_user(&ifr, argp, sizeof(ifr))) {
  810. rc = -EFAULT;
  811. break;
  812. }
  813. sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
  814. *sq = ipc->us;
  815. if (copy_to_user(argp, &ifr, sizeof(ifr))) {
  816. rc = -EFAULT;
  817. break;
  818. }
  819. break;
  820. case SIOCGSTAMP:
  821. rc = sock_get_timestamp(sk, argp);
  822. break;
  823. case SIOCADDRT:
  824. case SIOCDELRT:
  825. case SIOCSIFADDR:
  826. case SIOCGIFDSTADDR:
  827. case SIOCSIFDSTADDR:
  828. case SIOCGIFBRDADDR:
  829. case SIOCSIFBRDADDR:
  830. case SIOCGIFNETMASK:
  831. case SIOCSIFNETMASK:
  832. rc = -EINVAL;
  833. break;
  834. default:
  835. rc = -ENOIOCTLCMD;
  836. break;
  837. }
  838. release_sock(sk);
  839. return rc;
  840. }
  841. static int qrtr_release(struct socket *sock)
  842. {
  843. struct sock *sk = sock->sk;
  844. struct qrtr_sock *ipc;
  845. if (!sk)
  846. return 0;
  847. lock_sock(sk);
  848. ipc = qrtr_sk(sk);
  849. sk->sk_shutdown = SHUTDOWN_MASK;
  850. if (!sock_flag(sk, SOCK_DEAD))
  851. sk->sk_state_change(sk);
  852. sock_set_flag(sk, SOCK_DEAD);
  853. sock_orphan(sk);
  854. sock->sk = NULL;
  855. if (!sock_flag(sk, SOCK_ZAPPED))
  856. qrtr_port_remove(ipc);
  857. skb_queue_purge(&sk->sk_receive_queue);
  858. release_sock(sk);
  859. sock_put(sk);
  860. return 0;
  861. }
  862. static const struct proto_ops qrtr_proto_ops = {
  863. .owner = THIS_MODULE,
  864. .family = AF_QIPCRTR,
  865. .bind = qrtr_bind,
  866. .connect = qrtr_connect,
  867. .socketpair = sock_no_socketpair,
  868. .accept = sock_no_accept,
  869. .listen = sock_no_listen,
  870. .sendmsg = qrtr_sendmsg,
  871. .recvmsg = qrtr_recvmsg,
  872. .getname = qrtr_getname,
  873. .ioctl = qrtr_ioctl,
  874. .poll = datagram_poll,
  875. .shutdown = sock_no_shutdown,
  876. .setsockopt = sock_no_setsockopt,
  877. .getsockopt = sock_no_getsockopt,
  878. .release = qrtr_release,
  879. .mmap = sock_no_mmap,
  880. .sendpage = sock_no_sendpage,
  881. };
  882. static struct proto qrtr_proto = {
  883. .name = "QIPCRTR",
  884. .owner = THIS_MODULE,
  885. .obj_size = sizeof(struct qrtr_sock),
  886. };
  887. static int qrtr_create(struct net *net, struct socket *sock,
  888. int protocol, int kern)
  889. {
  890. struct qrtr_sock *ipc;
  891. struct sock *sk;
  892. if (sock->type != SOCK_DGRAM)
  893. return -EPROTOTYPE;
  894. sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
  895. if (!sk)
  896. return -ENOMEM;
  897. sock_set_flag(sk, SOCK_ZAPPED);
  898. sock_init_data(sock, sk);
  899. sock->ops = &qrtr_proto_ops;
  900. ipc = qrtr_sk(sk);
  901. ipc->us.sq_family = AF_QIPCRTR;
  902. ipc->us.sq_node = qrtr_local_nid;
  903. ipc->us.sq_port = 0;
  904. return 0;
  905. }
  906. static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
  907. [IFA_LOCAL] = { .type = NLA_U32 },
  908. };
  909. static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  910. struct netlink_ext_ack *extack)
  911. {
  912. struct nlattr *tb[IFA_MAX + 1];
  913. struct ifaddrmsg *ifm;
  914. int rc;
  915. if (!netlink_capable(skb, CAP_NET_ADMIN))
  916. return -EPERM;
  917. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  918. return -EPERM;
  919. ASSERT_RTNL();
  920. rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack);
  921. if (rc < 0)
  922. return rc;
  923. ifm = nlmsg_data(nlh);
  924. if (!tb[IFA_LOCAL])
  925. return -EINVAL;
  926. qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
  927. return 0;
  928. }
  929. static const struct net_proto_family qrtr_family = {
  930. .owner = THIS_MODULE,
  931. .family = AF_QIPCRTR,
  932. .create = qrtr_create,
  933. };
  934. static int __init qrtr_proto_init(void)
  935. {
  936. int rc;
  937. rc = proto_register(&qrtr_proto, 1);
  938. if (rc)
  939. return rc;
  940. rc = sock_register(&qrtr_family);
  941. if (rc) {
  942. proto_unregister(&qrtr_proto);
  943. return rc;
  944. }
  945. rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
  946. if (rc) {
  947. sock_unregister(qrtr_family.family);
  948. proto_unregister(&qrtr_proto);
  949. }
  950. return rc;
  951. }
  952. postcore_initcall(qrtr_proto_init);
  953. static void __exit qrtr_proto_fini(void)
  954. {
  955. rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
  956. sock_unregister(qrtr_family.family);
  957. proto_unregister(&qrtr_proto);
  958. }
  959. module_exit(qrtr_proto_fini);
  960. MODULE_DESCRIPTION("Qualcomm IPC-router driver");
  961. MODULE_LICENSE("GPL v2");
  962. MODULE_ALIAS_NETPROTO(PF_QIPCRTR);