raw.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105
  1. // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2. /* raw.c - Raw sockets for protocol family CAN
  3. *
  4. * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Volkswagen nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * Alternatively, provided that this notice is retained in full, this
  20. * software may be distributed under the terms of the GNU General
  21. * Public License ("GPL") version 2, in which case the provisions of the
  22. * GPL apply INSTEAD OF those given above.
  23. *
  24. * The provided data structures and external interfaces from this code
  25. * are not restricted to be used by modules with a GPL compatible license.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38. * DAMAGE.
  39. *
  40. */
  41. #include <linux/module.h>
  42. #include <linux/init.h>
  43. #include <linux/uio.h>
  44. #include <linux/net.h>
  45. #include <linux/slab.h>
  46. #include <linux/netdevice.h>
  47. #include <linux/socket.h>
  48. #include <linux/if_arp.h>
  49. #include <linux/skbuff.h>
  50. #include <linux/can.h>
  51. #include <linux/can/core.h>
  52. #include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */
  53. #include <linux/can/skb.h>
  54. #include <linux/can/raw.h>
  55. #include <net/sock.h>
  56. #include <net/net_namespace.h>
  57. MODULE_DESCRIPTION("PF_CAN raw protocol");
  58. MODULE_LICENSE("Dual BSD/GPL");
  59. MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  60. MODULE_ALIAS("can-proto-1");
  61. #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
  62. #define MASK_ALL 0
  63. /* A raw socket has a list of can_filters attached to it, each receiving
  64. * the CAN frames matching that filter. If the filter list is empty,
  65. * no CAN frames will be received by the socket. The default after
  66. * opening the socket, is to have one filter which receives all frames.
  67. * The filter list is allocated dynamically with the exception of the
  68. * list containing only one item. This common case is optimized by
  69. * storing the single filter in dfilter, to avoid using dynamic memory.
  70. */
  71. struct uniqframe {
  72. int skbcnt;
  73. const struct sk_buff *skb;
  74. unsigned int join_rx_count;
  75. };
  76. struct raw_sock {
  77. struct sock sk;
  78. int bound;
  79. int ifindex;
  80. struct net_device *dev;
  81. netdevice_tracker dev_tracker;
  82. struct list_head notifier;
  83. int loopback;
  84. int recv_own_msgs;
  85. int fd_frames;
  86. int xl_frames;
  87. struct can_raw_vcid_options raw_vcid_opts;
  88. canid_t tx_vcid_shifted;
  89. canid_t rx_vcid_shifted;
  90. canid_t rx_vcid_mask_shifted;
  91. int join_filters;
  92. int count; /* number of active filters */
  93. struct can_filter dfilter; /* default/single filter */
  94. struct can_filter *filter; /* pointer to filter(s) */
  95. can_err_mask_t err_mask;
  96. struct uniqframe __percpu *uniq;
  97. };
  98. static LIST_HEAD(raw_notifier_list);
  99. static DEFINE_SPINLOCK(raw_notifier_lock);
  100. static struct raw_sock *raw_busy_notifier;
  101. /* Return pointer to store the extra msg flags for raw_recvmsg().
  102. * We use the space of one unsigned int beyond the 'struct sockaddr_can'
  103. * in skb->cb.
  104. */
  105. static inline unsigned int *raw_flags(struct sk_buff *skb)
  106. {
  107. sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
  108. sizeof(unsigned int));
  109. /* return pointer after struct sockaddr_can */
  110. return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
  111. }
  112. static inline struct raw_sock *raw_sk(const struct sock *sk)
  113. {
  114. return (struct raw_sock *)sk;
  115. }
  116. static void raw_rcv(struct sk_buff *oskb, void *data)
  117. {
  118. struct sock *sk = (struct sock *)data;
  119. struct raw_sock *ro = raw_sk(sk);
  120. struct sockaddr_can *addr;
  121. struct sk_buff *skb;
  122. unsigned int *pflags;
  123. /* check the received tx sock reference */
  124. if (!ro->recv_own_msgs && oskb->sk == sk)
  125. return;
  126. /* make sure to not pass oversized frames to the socket */
  127. if (!ro->fd_frames && can_is_canfd_skb(oskb))
  128. return;
  129. if (can_is_canxl_skb(oskb)) {
  130. struct canxl_frame *cxl = (struct canxl_frame *)oskb->data;
  131. /* make sure to not pass oversized frames to the socket */
  132. if (!ro->xl_frames)
  133. return;
  134. /* filter CAN XL VCID content */
  135. if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_RX_FILTER) {
  136. /* apply VCID filter if user enabled the filter */
  137. if ((cxl->prio & ro->rx_vcid_mask_shifted) !=
  138. (ro->rx_vcid_shifted & ro->rx_vcid_mask_shifted))
  139. return;
  140. } else {
  141. /* no filter => do not forward VCID tagged frames */
  142. if (cxl->prio & CANXL_VCID_MASK)
  143. return;
  144. }
  145. }
  146. /* eliminate multiple filter matches for the same skb */
  147. if (this_cpu_ptr(ro->uniq)->skb == oskb &&
  148. this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
  149. if (!ro->join_filters)
  150. return;
  151. this_cpu_inc(ro->uniq->join_rx_count);
  152. /* drop frame until all enabled filters matched */
  153. if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
  154. return;
  155. } else {
  156. this_cpu_ptr(ro->uniq)->skb = oskb;
  157. this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
  158. this_cpu_ptr(ro->uniq)->join_rx_count = 1;
  159. /* drop first frame to check all enabled filters? */
  160. if (ro->join_filters && ro->count > 1)
  161. return;
  162. }
  163. /* clone the given skb to be able to enqueue it into the rcv queue */
  164. skb = skb_clone(oskb, GFP_ATOMIC);
  165. if (!skb)
  166. return;
  167. /* Put the datagram to the queue so that raw_recvmsg() can get
  168. * it from there. We need to pass the interface index to
  169. * raw_recvmsg(). We pass a whole struct sockaddr_can in
  170. * skb->cb containing the interface index.
  171. */
  172. sock_skb_cb_check_size(sizeof(struct sockaddr_can));
  173. addr = (struct sockaddr_can *)skb->cb;
  174. memset(addr, 0, sizeof(*addr));
  175. addr->can_family = AF_CAN;
  176. addr->can_ifindex = skb->dev->ifindex;
  177. /* add CAN specific message flags for raw_recvmsg() */
  178. pflags = raw_flags(skb);
  179. *pflags = 0;
  180. if (oskb->sk)
  181. *pflags |= MSG_DONTROUTE;
  182. if (oskb->sk == sk)
  183. *pflags |= MSG_CONFIRM;
  184. if (sock_queue_rcv_skb(sk, skb) < 0)
  185. kfree_skb(skb);
  186. }
  187. static int raw_enable_filters(struct net *net, struct net_device *dev,
  188. struct sock *sk, struct can_filter *filter,
  189. int count)
  190. {
  191. int err = 0;
  192. int i;
  193. for (i = 0; i < count; i++) {
  194. err = can_rx_register(net, dev, filter[i].can_id,
  195. filter[i].can_mask,
  196. raw_rcv, sk, "raw", sk);
  197. if (err) {
  198. /* clean up successfully registered filters */
  199. while (--i >= 0)
  200. can_rx_unregister(net, dev, filter[i].can_id,
  201. filter[i].can_mask,
  202. raw_rcv, sk);
  203. break;
  204. }
  205. }
  206. return err;
  207. }
  208. static int raw_enable_errfilter(struct net *net, struct net_device *dev,
  209. struct sock *sk, can_err_mask_t err_mask)
  210. {
  211. int err = 0;
  212. if (err_mask)
  213. err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
  214. raw_rcv, sk, "raw", sk);
  215. return err;
  216. }
  217. static void raw_disable_filters(struct net *net, struct net_device *dev,
  218. struct sock *sk, struct can_filter *filter,
  219. int count)
  220. {
  221. int i;
  222. for (i = 0; i < count; i++)
  223. can_rx_unregister(net, dev, filter[i].can_id,
  224. filter[i].can_mask, raw_rcv, sk);
  225. }
  226. static inline void raw_disable_errfilter(struct net *net,
  227. struct net_device *dev,
  228. struct sock *sk,
  229. can_err_mask_t err_mask)
  230. {
  231. if (err_mask)
  232. can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
  233. raw_rcv, sk);
  234. }
  235. static inline void raw_disable_allfilters(struct net *net,
  236. struct net_device *dev,
  237. struct sock *sk)
  238. {
  239. struct raw_sock *ro = raw_sk(sk);
  240. raw_disable_filters(net, dev, sk, ro->filter, ro->count);
  241. raw_disable_errfilter(net, dev, sk, ro->err_mask);
  242. }
  243. static int raw_enable_allfilters(struct net *net, struct net_device *dev,
  244. struct sock *sk)
  245. {
  246. struct raw_sock *ro = raw_sk(sk);
  247. int err;
  248. err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
  249. if (!err) {
  250. err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
  251. if (err)
  252. raw_disable_filters(net, dev, sk, ro->filter,
  253. ro->count);
  254. }
  255. return err;
  256. }
  257. static void raw_notify(struct raw_sock *ro, unsigned long msg,
  258. struct net_device *dev)
  259. {
  260. struct sock *sk = &ro->sk;
  261. if (!net_eq(dev_net(dev), sock_net(sk)))
  262. return;
  263. if (ro->dev != dev)
  264. return;
  265. switch (msg) {
  266. case NETDEV_UNREGISTER:
  267. lock_sock(sk);
  268. /* remove current filters & unregister */
  269. if (ro->bound) {
  270. raw_disable_allfilters(dev_net(dev), dev, sk);
  271. netdev_put(dev, &ro->dev_tracker);
  272. }
  273. if (ro->count > 1)
  274. kfree(ro->filter);
  275. ro->ifindex = 0;
  276. ro->bound = 0;
  277. ro->dev = NULL;
  278. ro->count = 0;
  279. release_sock(sk);
  280. sk->sk_err = ENODEV;
  281. if (!sock_flag(sk, SOCK_DEAD))
  282. sk_error_report(sk);
  283. break;
  284. case NETDEV_DOWN:
  285. sk->sk_err = ENETDOWN;
  286. if (!sock_flag(sk, SOCK_DEAD))
  287. sk_error_report(sk);
  288. break;
  289. }
  290. }
  291. static int raw_notifier(struct notifier_block *nb, unsigned long msg,
  292. void *ptr)
  293. {
  294. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  295. if (dev->type != ARPHRD_CAN)
  296. return NOTIFY_DONE;
  297. if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
  298. return NOTIFY_DONE;
  299. if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
  300. return NOTIFY_DONE;
  301. spin_lock(&raw_notifier_lock);
  302. list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
  303. spin_unlock(&raw_notifier_lock);
  304. raw_notify(raw_busy_notifier, msg, dev);
  305. spin_lock(&raw_notifier_lock);
  306. }
  307. raw_busy_notifier = NULL;
  308. spin_unlock(&raw_notifier_lock);
  309. return NOTIFY_DONE;
  310. }
  311. static int raw_init(struct sock *sk)
  312. {
  313. struct raw_sock *ro = raw_sk(sk);
  314. ro->bound = 0;
  315. ro->ifindex = 0;
  316. ro->dev = NULL;
  317. /* set default filter to single entry dfilter */
  318. ro->dfilter.can_id = 0;
  319. ro->dfilter.can_mask = MASK_ALL;
  320. ro->filter = &ro->dfilter;
  321. ro->count = 1;
  322. /* set default loopback behaviour */
  323. ro->loopback = 1;
  324. ro->recv_own_msgs = 0;
  325. ro->fd_frames = 0;
  326. ro->xl_frames = 0;
  327. ro->join_filters = 0;
  328. /* alloc_percpu provides zero'ed memory */
  329. ro->uniq = alloc_percpu(struct uniqframe);
  330. if (unlikely(!ro->uniq))
  331. return -ENOMEM;
  332. /* set notifier */
  333. spin_lock(&raw_notifier_lock);
  334. list_add_tail(&ro->notifier, &raw_notifier_list);
  335. spin_unlock(&raw_notifier_lock);
  336. return 0;
  337. }
  338. static int raw_release(struct socket *sock)
  339. {
  340. struct sock *sk = sock->sk;
  341. struct raw_sock *ro;
  342. if (!sk)
  343. return 0;
  344. ro = raw_sk(sk);
  345. spin_lock(&raw_notifier_lock);
  346. while (raw_busy_notifier == ro) {
  347. spin_unlock(&raw_notifier_lock);
  348. schedule_timeout_uninterruptible(1);
  349. spin_lock(&raw_notifier_lock);
  350. }
  351. list_del(&ro->notifier);
  352. spin_unlock(&raw_notifier_lock);
  353. rtnl_lock();
  354. lock_sock(sk);
  355. /* remove current filters & unregister */
  356. if (ro->bound) {
  357. if (ro->dev) {
  358. raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
  359. netdev_put(ro->dev, &ro->dev_tracker);
  360. } else {
  361. raw_disable_allfilters(sock_net(sk), NULL, sk);
  362. }
  363. }
  364. if (ro->count > 1)
  365. kfree(ro->filter);
  366. ro->ifindex = 0;
  367. ro->bound = 0;
  368. ro->dev = NULL;
  369. ro->count = 0;
  370. free_percpu(ro->uniq);
  371. sock_orphan(sk);
  372. sock->sk = NULL;
  373. release_sock(sk);
  374. rtnl_unlock();
  375. sock_put(sk);
  376. return 0;
  377. }
  378. static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
  379. {
  380. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  381. struct sock *sk = sock->sk;
  382. struct raw_sock *ro = raw_sk(sk);
  383. struct net_device *dev = NULL;
  384. int ifindex;
  385. int err = 0;
  386. int notify_enetdown = 0;
  387. if (len < RAW_MIN_NAMELEN)
  388. return -EINVAL;
  389. if (addr->can_family != AF_CAN)
  390. return -EINVAL;
  391. rtnl_lock();
  392. lock_sock(sk);
  393. if (ro->bound && addr->can_ifindex == ro->ifindex)
  394. goto out;
  395. if (addr->can_ifindex) {
  396. dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
  397. if (!dev) {
  398. err = -ENODEV;
  399. goto out;
  400. }
  401. if (dev->type != ARPHRD_CAN) {
  402. err = -ENODEV;
  403. goto out_put_dev;
  404. }
  405. if (!(dev->flags & IFF_UP))
  406. notify_enetdown = 1;
  407. ifindex = dev->ifindex;
  408. /* filters set by default/setsockopt */
  409. err = raw_enable_allfilters(sock_net(sk), dev, sk);
  410. if (err)
  411. goto out_put_dev;
  412. } else {
  413. ifindex = 0;
  414. /* filters set by default/setsockopt */
  415. err = raw_enable_allfilters(sock_net(sk), NULL, sk);
  416. }
  417. if (!err) {
  418. if (ro->bound) {
  419. /* unregister old filters */
  420. if (ro->dev) {
  421. raw_disable_allfilters(dev_net(ro->dev),
  422. ro->dev, sk);
  423. /* drop reference to old ro->dev */
  424. netdev_put(ro->dev, &ro->dev_tracker);
  425. } else {
  426. raw_disable_allfilters(sock_net(sk), NULL, sk);
  427. }
  428. }
  429. ro->ifindex = ifindex;
  430. ro->bound = 1;
  431. /* bind() ok -> hold a reference for new ro->dev */
  432. ro->dev = dev;
  433. if (ro->dev)
  434. netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL);
  435. }
  436. out_put_dev:
  437. /* remove potential reference from dev_get_by_index() */
  438. dev_put(dev);
  439. out:
  440. release_sock(sk);
  441. rtnl_unlock();
  442. if (notify_enetdown) {
  443. sk->sk_err = ENETDOWN;
  444. if (!sock_flag(sk, SOCK_DEAD))
  445. sk_error_report(sk);
  446. }
  447. return err;
  448. }
  449. static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
  450. int peer)
  451. {
  452. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  453. struct sock *sk = sock->sk;
  454. struct raw_sock *ro = raw_sk(sk);
  455. if (peer)
  456. return -EOPNOTSUPP;
  457. memset(addr, 0, RAW_MIN_NAMELEN);
  458. addr->can_family = AF_CAN;
  459. addr->can_ifindex = ro->ifindex;
  460. return RAW_MIN_NAMELEN;
  461. }
  462. static int raw_setsockopt(struct socket *sock, int level, int optname,
  463. sockptr_t optval, unsigned int optlen)
  464. {
  465. struct sock *sk = sock->sk;
  466. struct raw_sock *ro = raw_sk(sk);
  467. struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
  468. struct can_filter sfilter; /* single filter */
  469. struct net_device *dev = NULL;
  470. can_err_mask_t err_mask = 0;
  471. int fd_frames;
  472. int count = 0;
  473. int err = 0;
  474. if (level != SOL_CAN_RAW)
  475. return -EINVAL;
  476. switch (optname) {
  477. case CAN_RAW_FILTER:
  478. if (optlen % sizeof(struct can_filter) != 0)
  479. return -EINVAL;
  480. if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
  481. return -EINVAL;
  482. count = optlen / sizeof(struct can_filter);
  483. if (count > 1) {
  484. /* filter does not fit into dfilter => alloc space */
  485. filter = memdup_sockptr(optval, optlen);
  486. if (IS_ERR(filter))
  487. return PTR_ERR(filter);
  488. } else if (count == 1) {
  489. if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
  490. return -EFAULT;
  491. }
  492. rtnl_lock();
  493. lock_sock(sk);
  494. dev = ro->dev;
  495. if (ro->bound && dev) {
  496. if (dev->reg_state != NETREG_REGISTERED) {
  497. if (count > 1)
  498. kfree(filter);
  499. err = -ENODEV;
  500. goto out_fil;
  501. }
  502. }
  503. if (ro->bound) {
  504. /* (try to) register the new filters */
  505. if (count == 1)
  506. err = raw_enable_filters(sock_net(sk), dev, sk,
  507. &sfilter, 1);
  508. else
  509. err = raw_enable_filters(sock_net(sk), dev, sk,
  510. filter, count);
  511. if (err) {
  512. if (count > 1)
  513. kfree(filter);
  514. goto out_fil;
  515. }
  516. /* remove old filter registrations */
  517. raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
  518. ro->count);
  519. }
  520. /* remove old filter space */
  521. if (ro->count > 1)
  522. kfree(ro->filter);
  523. /* link new filters to the socket */
  524. if (count == 1) {
  525. /* copy filter data for single filter */
  526. ro->dfilter = sfilter;
  527. filter = &ro->dfilter;
  528. }
  529. ro->filter = filter;
  530. ro->count = count;
  531. out_fil:
  532. release_sock(sk);
  533. rtnl_unlock();
  534. break;
  535. case CAN_RAW_ERR_FILTER:
  536. if (optlen != sizeof(err_mask))
  537. return -EINVAL;
  538. if (copy_from_sockptr(&err_mask, optval, optlen))
  539. return -EFAULT;
  540. err_mask &= CAN_ERR_MASK;
  541. rtnl_lock();
  542. lock_sock(sk);
  543. dev = ro->dev;
  544. if (ro->bound && dev) {
  545. if (dev->reg_state != NETREG_REGISTERED) {
  546. err = -ENODEV;
  547. goto out_err;
  548. }
  549. }
  550. /* remove current error mask */
  551. if (ro->bound) {
  552. /* (try to) register the new err_mask */
  553. err = raw_enable_errfilter(sock_net(sk), dev, sk,
  554. err_mask);
  555. if (err)
  556. goto out_err;
  557. /* remove old err_mask registration */
  558. raw_disable_errfilter(sock_net(sk), dev, sk,
  559. ro->err_mask);
  560. }
  561. /* link new err_mask to the socket */
  562. ro->err_mask = err_mask;
  563. out_err:
  564. release_sock(sk);
  565. rtnl_unlock();
  566. break;
  567. case CAN_RAW_LOOPBACK:
  568. if (optlen != sizeof(ro->loopback))
  569. return -EINVAL;
  570. if (copy_from_sockptr(&ro->loopback, optval, optlen))
  571. return -EFAULT;
  572. break;
  573. case CAN_RAW_RECV_OWN_MSGS:
  574. if (optlen != sizeof(ro->recv_own_msgs))
  575. return -EINVAL;
  576. if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
  577. return -EFAULT;
  578. break;
  579. case CAN_RAW_FD_FRAMES:
  580. if (optlen != sizeof(fd_frames))
  581. return -EINVAL;
  582. if (copy_from_sockptr(&fd_frames, optval, optlen))
  583. return -EFAULT;
  584. /* Enabling CAN XL includes CAN FD */
  585. if (ro->xl_frames && !fd_frames)
  586. return -EINVAL;
  587. ro->fd_frames = fd_frames;
  588. break;
  589. case CAN_RAW_XL_FRAMES:
  590. if (optlen != sizeof(ro->xl_frames))
  591. return -EINVAL;
  592. if (copy_from_sockptr(&ro->xl_frames, optval, optlen))
  593. return -EFAULT;
  594. /* Enabling CAN XL includes CAN FD */
  595. if (ro->xl_frames)
  596. ro->fd_frames = ro->xl_frames;
  597. break;
  598. case CAN_RAW_XL_VCID_OPTS:
  599. if (optlen != sizeof(ro->raw_vcid_opts))
  600. return -EINVAL;
  601. if (copy_from_sockptr(&ro->raw_vcid_opts, optval, optlen))
  602. return -EFAULT;
  603. /* prepare 32 bit values for handling in hot path */
  604. ro->tx_vcid_shifted = ro->raw_vcid_opts.tx_vcid << CANXL_VCID_OFFSET;
  605. ro->rx_vcid_shifted = ro->raw_vcid_opts.rx_vcid << CANXL_VCID_OFFSET;
  606. ro->rx_vcid_mask_shifted = ro->raw_vcid_opts.rx_vcid_mask << CANXL_VCID_OFFSET;
  607. break;
  608. case CAN_RAW_JOIN_FILTERS:
  609. if (optlen != sizeof(ro->join_filters))
  610. return -EINVAL;
  611. if (copy_from_sockptr(&ro->join_filters, optval, optlen))
  612. return -EFAULT;
  613. break;
  614. default:
  615. return -ENOPROTOOPT;
  616. }
  617. return err;
  618. }
  619. static int raw_getsockopt(struct socket *sock, int level, int optname,
  620. char __user *optval, int __user *optlen)
  621. {
  622. struct sock *sk = sock->sk;
  623. struct raw_sock *ro = raw_sk(sk);
  624. int len;
  625. void *val;
  626. if (level != SOL_CAN_RAW)
  627. return -EINVAL;
  628. if (get_user(len, optlen))
  629. return -EFAULT;
  630. if (len < 0)
  631. return -EINVAL;
  632. switch (optname) {
  633. case CAN_RAW_FILTER: {
  634. int err = 0;
  635. lock_sock(sk);
  636. if (ro->count > 0) {
  637. int fsize = ro->count * sizeof(struct can_filter);
  638. /* user space buffer to small for filter list? */
  639. if (len < fsize) {
  640. /* return -ERANGE and needed space in optlen */
  641. err = -ERANGE;
  642. if (put_user(fsize, optlen))
  643. err = -EFAULT;
  644. } else {
  645. if (len > fsize)
  646. len = fsize;
  647. if (copy_to_user(optval, ro->filter, len))
  648. err = -EFAULT;
  649. }
  650. } else {
  651. len = 0;
  652. }
  653. release_sock(sk);
  654. if (!err)
  655. err = put_user(len, optlen);
  656. return err;
  657. }
  658. case CAN_RAW_ERR_FILTER:
  659. if (len > sizeof(can_err_mask_t))
  660. len = sizeof(can_err_mask_t);
  661. val = &ro->err_mask;
  662. break;
  663. case CAN_RAW_LOOPBACK:
  664. if (len > sizeof(int))
  665. len = sizeof(int);
  666. val = &ro->loopback;
  667. break;
  668. case CAN_RAW_RECV_OWN_MSGS:
  669. if (len > sizeof(int))
  670. len = sizeof(int);
  671. val = &ro->recv_own_msgs;
  672. break;
  673. case CAN_RAW_FD_FRAMES:
  674. if (len > sizeof(int))
  675. len = sizeof(int);
  676. val = &ro->fd_frames;
  677. break;
  678. case CAN_RAW_XL_FRAMES:
  679. if (len > sizeof(int))
  680. len = sizeof(int);
  681. val = &ro->xl_frames;
  682. break;
  683. case CAN_RAW_XL_VCID_OPTS: {
  684. int err = 0;
  685. /* user space buffer to small for VCID opts? */
  686. if (len < sizeof(ro->raw_vcid_opts)) {
  687. /* return -ERANGE and needed space in optlen */
  688. err = -ERANGE;
  689. if (put_user(sizeof(ro->raw_vcid_opts), optlen))
  690. err = -EFAULT;
  691. } else {
  692. if (len > sizeof(ro->raw_vcid_opts))
  693. len = sizeof(ro->raw_vcid_opts);
  694. if (copy_to_user(optval, &ro->raw_vcid_opts, len))
  695. err = -EFAULT;
  696. }
  697. if (!err)
  698. err = put_user(len, optlen);
  699. return err;
  700. }
  701. case CAN_RAW_JOIN_FILTERS:
  702. if (len > sizeof(int))
  703. len = sizeof(int);
  704. val = &ro->join_filters;
  705. break;
  706. default:
  707. return -ENOPROTOOPT;
  708. }
  709. if (put_user(len, optlen))
  710. return -EFAULT;
  711. if (copy_to_user(optval, val, len))
  712. return -EFAULT;
  713. return 0;
  714. }
  715. static void raw_put_canxl_vcid(struct raw_sock *ro, struct sk_buff *skb)
  716. {
  717. struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
  718. /* sanitize non CAN XL bits */
  719. cxl->prio &= (CANXL_PRIO_MASK | CANXL_VCID_MASK);
  720. /* clear VCID in CAN XL frame if pass through is disabled */
  721. if (!(ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_PASS))
  722. cxl->prio &= CANXL_PRIO_MASK;
  723. /* set VCID in CAN XL frame if enabled */
  724. if (ro->raw_vcid_opts.flags & CAN_RAW_XL_VCID_TX_SET) {
  725. cxl->prio &= CANXL_PRIO_MASK;
  726. cxl->prio |= ro->tx_vcid_shifted;
  727. }
  728. }
  729. static unsigned int raw_check_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu)
  730. {
  731. /* Classical CAN -> no checks for flags and device capabilities */
  732. if (can_is_can_skb(skb))
  733. return CAN_MTU;
  734. /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */
  735. if (ro->fd_frames && can_is_canfd_skb(skb) &&
  736. (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu)))
  737. return CANFD_MTU;
  738. /* CAN XL -> needs to be enabled and a CAN XL device */
  739. if (ro->xl_frames && can_is_canxl_skb(skb) &&
  740. can_is_canxl_dev_mtu(mtu))
  741. return CANXL_MTU;
  742. return 0;
  743. }
  744. static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  745. {
  746. struct sock *sk = sock->sk;
  747. struct raw_sock *ro = raw_sk(sk);
  748. struct sockcm_cookie sockc;
  749. struct sk_buff *skb;
  750. struct net_device *dev;
  751. unsigned int txmtu;
  752. int ifindex;
  753. int err = -EINVAL;
  754. /* check for valid CAN frame sizes */
  755. if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU)
  756. return -EINVAL;
  757. if (msg->msg_name) {
  758. DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
  759. if (msg->msg_namelen < RAW_MIN_NAMELEN)
  760. return -EINVAL;
  761. if (addr->can_family != AF_CAN)
  762. return -EINVAL;
  763. ifindex = addr->can_ifindex;
  764. } else {
  765. ifindex = ro->ifindex;
  766. }
  767. dev = dev_get_by_index(sock_net(sk), ifindex);
  768. if (!dev)
  769. return -ENXIO;
  770. skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
  771. msg->msg_flags & MSG_DONTWAIT, &err);
  772. if (!skb)
  773. goto put_dev;
  774. can_skb_reserve(skb);
  775. can_skb_prv(skb)->ifindex = dev->ifindex;
  776. can_skb_prv(skb)->skbcnt = 0;
  777. /* fill the skb before testing for valid CAN frames */
  778. err = memcpy_from_msg(skb_put(skb, size), msg, size);
  779. if (err < 0)
  780. goto free_skb;
  781. err = -EINVAL;
  782. /* check for valid CAN (CC/FD/XL) frame content */
  783. txmtu = raw_check_txframe(ro, skb, dev->mtu);
  784. if (!txmtu)
  785. goto free_skb;
  786. /* only CANXL: clear/forward/set VCID value */
  787. if (txmtu == CANXL_MTU)
  788. raw_put_canxl_vcid(ro, skb);
  789. sockcm_init(&sockc, sk);
  790. if (msg->msg_controllen) {
  791. err = sock_cmsg_send(sk, msg, &sockc);
  792. if (unlikely(err))
  793. goto free_skb;
  794. }
  795. skb->dev = dev;
  796. skb->priority = READ_ONCE(sk->sk_priority);
  797. skb->mark = READ_ONCE(sk->sk_mark);
  798. skb->tstamp = sockc.transmit_time;
  799. skb_setup_tx_timestamp(skb, sockc.tsflags);
  800. err = can_send(skb, ro->loopback);
  801. dev_put(dev);
  802. if (err)
  803. goto send_failed;
  804. return size;
  805. free_skb:
  806. kfree_skb(skb);
  807. put_dev:
  808. dev_put(dev);
  809. send_failed:
  810. return err;
  811. }
  812. static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  813. int flags)
  814. {
  815. struct sock *sk = sock->sk;
  816. struct sk_buff *skb;
  817. int err = 0;
  818. if (flags & MSG_ERRQUEUE)
  819. return sock_recv_errqueue(sk, msg, size,
  820. SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
  821. skb = skb_recv_datagram(sk, flags, &err);
  822. if (!skb)
  823. return err;
  824. if (size < skb->len)
  825. msg->msg_flags |= MSG_TRUNC;
  826. else
  827. size = skb->len;
  828. err = memcpy_to_msg(msg, skb->data, size);
  829. if (err < 0) {
  830. skb_free_datagram(sk, skb);
  831. return err;
  832. }
  833. sock_recv_cmsgs(msg, sk, skb);
  834. if (msg->msg_name) {
  835. __sockaddr_check_size(RAW_MIN_NAMELEN);
  836. msg->msg_namelen = RAW_MIN_NAMELEN;
  837. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  838. }
  839. /* assign the flags that have been recorded in raw_rcv() */
  840. msg->msg_flags |= *(raw_flags(skb));
  841. skb_free_datagram(sk, skb);
  842. return size;
  843. }
  844. static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
  845. unsigned long arg)
  846. {
  847. /* no ioctls for socket layer -> hand it down to NIC layer */
  848. return -ENOIOCTLCMD;
  849. }
  850. static const struct proto_ops raw_ops = {
  851. .family = PF_CAN,
  852. .release = raw_release,
  853. .bind = raw_bind,
  854. .connect = sock_no_connect,
  855. .socketpair = sock_no_socketpair,
  856. .accept = sock_no_accept,
  857. .getname = raw_getname,
  858. .poll = datagram_poll,
  859. .ioctl = raw_sock_no_ioctlcmd,
  860. .gettstamp = sock_gettstamp,
  861. .listen = sock_no_listen,
  862. .shutdown = sock_no_shutdown,
  863. .setsockopt = raw_setsockopt,
  864. .getsockopt = raw_getsockopt,
  865. .sendmsg = raw_sendmsg,
  866. .recvmsg = raw_recvmsg,
  867. .mmap = sock_no_mmap,
  868. };
  869. static struct proto raw_proto __read_mostly = {
  870. .name = "CAN_RAW",
  871. .owner = THIS_MODULE,
  872. .obj_size = sizeof(struct raw_sock),
  873. .init = raw_init,
  874. };
  875. static const struct can_proto raw_can_proto = {
  876. .type = SOCK_RAW,
  877. .protocol = CAN_RAW,
  878. .ops = &raw_ops,
  879. .prot = &raw_proto,
  880. };
  881. static struct notifier_block canraw_notifier = {
  882. .notifier_call = raw_notifier
  883. };
  884. static __init int raw_module_init(void)
  885. {
  886. int err;
  887. pr_info("can: raw protocol\n");
  888. err = register_netdevice_notifier(&canraw_notifier);
  889. if (err)
  890. return err;
  891. err = can_proto_register(&raw_can_proto);
  892. if (err < 0) {
  893. pr_err("can: registration of raw protocol failed\n");
  894. goto register_proto_failed;
  895. }
  896. return 0;
  897. register_proto_failed:
  898. unregister_netdevice_notifier(&canraw_notifier);
  899. return err;
  900. }
  901. static __exit void raw_module_exit(void)
  902. {
  903. can_proto_unregister(&raw_can_proto);
  904. unregister_netdevice_notifier(&canraw_notifier);
  905. }
  906. module_init(raw_module_init);
  907. module_exit(raw_module_exit);