socket.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2010-2011 EIA Electronics,
  3. // Pieter Beyens <pieter.beyens@eia.be>
  4. // Copyright (c) 2010-2011 EIA Electronics,
  5. // Kurt Van Dijck <kurt.van.dijck@eia.be>
  6. // Copyright (c) 2018 Protonic,
  7. // Robin van der Gracht <robin@protonic.nl>
  8. // Copyright (c) 2017-2019 Pengutronix,
  9. // Marc Kleine-Budde <kernel@pengutronix.de>
  10. // Copyright (c) 2017-2019 Pengutronix,
  11. // Oleksij Rempel <kernel@pengutronix.de>
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/can/can-ml.h>
  14. #include <linux/can/core.h>
  15. #include <linux/can/skb.h>
  16. #include <linux/errqueue.h>
  17. #include <linux/if_arp.h>
  18. #include "j1939-priv.h"
  19. #define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
  20. /* conversion function between struct sock::sk_priority from linux and
  21. * j1939 priority field
  22. */
  23. static inline priority_t j1939_prio(u32 sk_priority)
  24. {
  25. sk_priority = min(sk_priority, 7U);
  26. return 7 - sk_priority;
  27. }
  28. static inline u32 j1939_to_sk_priority(priority_t prio)
  29. {
  30. return 7 - prio;
  31. }
  32. /* function to see if pgn is to be evaluated */
  33. static inline bool j1939_pgn_is_valid(pgn_t pgn)
  34. {
  35. return pgn <= J1939_PGN_MAX;
  36. }
  37. /* test function to avoid non-zero DA placeholder for pdu1 pgn's */
  38. static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
  39. {
  40. if (j1939_pgn_is_pdu1(pgn))
  41. return !(pgn & 0xff);
  42. else
  43. return true;
  44. }
  45. static inline void j1939_sock_pending_add(struct sock *sk)
  46. {
  47. struct j1939_sock *jsk = j1939_sk(sk);
  48. atomic_inc(&jsk->skb_pending);
  49. }
  50. static int j1939_sock_pending_get(struct sock *sk)
  51. {
  52. struct j1939_sock *jsk = j1939_sk(sk);
  53. return atomic_read(&jsk->skb_pending);
  54. }
  55. void j1939_sock_pending_del(struct sock *sk)
  56. {
  57. struct j1939_sock *jsk = j1939_sk(sk);
  58. /* atomic_dec_return returns the new value */
  59. if (!atomic_dec_return(&jsk->skb_pending))
  60. wake_up(&jsk->waitq); /* no pending SKB's */
  61. }
  62. static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
  63. {
  64. jsk->state |= J1939_SOCK_BOUND;
  65. j1939_priv_get(priv);
  66. write_lock_bh(&priv->j1939_socks_lock);
  67. list_add_tail(&jsk->list, &priv->j1939_socks);
  68. write_unlock_bh(&priv->j1939_socks_lock);
  69. }
  70. static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
  71. {
  72. write_lock_bh(&priv->j1939_socks_lock);
  73. list_del_init(&jsk->list);
  74. write_unlock_bh(&priv->j1939_socks_lock);
  75. j1939_priv_put(priv);
  76. jsk->state &= ~J1939_SOCK_BOUND;
  77. }
  78. static bool j1939_sk_queue_session(struct j1939_session *session)
  79. {
  80. struct j1939_sock *jsk = j1939_sk(session->sk);
  81. bool empty;
  82. spin_lock_bh(&jsk->sk_session_queue_lock);
  83. empty = list_empty(&jsk->sk_session_queue);
  84. j1939_session_get(session);
  85. list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
  86. spin_unlock_bh(&jsk->sk_session_queue_lock);
  87. j1939_sock_pending_add(&jsk->sk);
  88. return empty;
  89. }
  90. static struct
  91. j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
  92. {
  93. struct j1939_session *session = NULL;
  94. spin_lock_bh(&jsk->sk_session_queue_lock);
  95. if (!list_empty(&jsk->sk_session_queue)) {
  96. session = list_last_entry(&jsk->sk_session_queue,
  97. struct j1939_session,
  98. sk_session_queue_entry);
  99. if (session->total_queued_size == session->total_message_size)
  100. session = NULL;
  101. else
  102. j1939_session_get(session);
  103. }
  104. spin_unlock_bh(&jsk->sk_session_queue_lock);
  105. return session;
  106. }
  107. static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
  108. struct j1939_sock *jsk, int err)
  109. {
  110. struct j1939_session *session, *tmp;
  111. netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
  112. spin_lock_bh(&jsk->sk_session_queue_lock);
  113. list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
  114. sk_session_queue_entry) {
  115. list_del_init(&session->sk_session_queue_entry);
  116. session->err = err;
  117. j1939_session_put(session);
  118. }
  119. spin_unlock_bh(&jsk->sk_session_queue_lock);
  120. }
  121. static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
  122. {
  123. struct j1939_sock *jsk;
  124. struct j1939_session *first;
  125. int err;
  126. /* RX-Session don't have a socket (yet) */
  127. if (!session->sk)
  128. return;
  129. jsk = j1939_sk(session->sk);
  130. lockdep_assert_held(&jsk->sk_session_queue_lock);
  131. err = session->err;
  132. first = list_first_entry_or_null(&jsk->sk_session_queue,
  133. struct j1939_session,
  134. sk_session_queue_entry);
  135. /* Some else has already activated the next session */
  136. if (first != session)
  137. return;
  138. activate_next:
  139. list_del_init(&first->sk_session_queue_entry);
  140. j1939_session_put(first);
  141. first = list_first_entry_or_null(&jsk->sk_session_queue,
  142. struct j1939_session,
  143. sk_session_queue_entry);
  144. if (!first)
  145. return;
  146. if (j1939_session_activate(first)) {
  147. netdev_warn_once(first->priv->ndev,
  148. "%s: 0x%p: Identical session is already activated.\n",
  149. __func__, first);
  150. first->err = -EBUSY;
  151. goto activate_next;
  152. } else {
  153. /* Give receiver some time (arbitrary chosen) to recover */
  154. int time_ms = 0;
  155. if (err)
  156. time_ms = 10 + get_random_u32_below(16);
  157. j1939_tp_schedule_txtimer(first, time_ms);
  158. }
  159. }
  160. void j1939_sk_queue_activate_next(struct j1939_session *session)
  161. {
  162. struct j1939_sock *jsk;
  163. if (!session->sk)
  164. return;
  165. jsk = j1939_sk(session->sk);
  166. spin_lock_bh(&jsk->sk_session_queue_lock);
  167. j1939_sk_queue_activate_next_locked(session);
  168. spin_unlock_bh(&jsk->sk_session_queue_lock);
  169. }
  170. static bool j1939_sk_match_dst(struct j1939_sock *jsk,
  171. const struct j1939_sk_buff_cb *skcb)
  172. {
  173. if ((jsk->state & J1939_SOCK_PROMISC))
  174. return true;
  175. /* Destination address filter */
  176. if (jsk->addr.src_name && skcb->addr.dst_name) {
  177. if (jsk->addr.src_name != skcb->addr.dst_name)
  178. return false;
  179. } else {
  180. /* receive (all sockets) if
  181. * - all packages that match our bind() address
  182. * - all broadcast on a socket if SO_BROADCAST
  183. * is set
  184. */
  185. if (j1939_address_is_unicast(skcb->addr.da)) {
  186. if (jsk->addr.sa != skcb->addr.da)
  187. return false;
  188. } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
  189. /* receiving broadcast without SO_BROADCAST
  190. * flag is not allowed
  191. */
  192. return false;
  193. }
  194. }
  195. /* Source address filter */
  196. if (jsk->state & J1939_SOCK_CONNECTED) {
  197. /* receive (all sockets) if
  198. * - all packages that match our connect() name or address
  199. */
  200. if (jsk->addr.dst_name && skcb->addr.src_name) {
  201. if (jsk->addr.dst_name != skcb->addr.src_name)
  202. return false;
  203. } else {
  204. if (jsk->addr.da != skcb->addr.sa)
  205. return false;
  206. }
  207. }
  208. /* PGN filter */
  209. if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
  210. jsk->pgn_rx_filter != skcb->addr.pgn)
  211. return false;
  212. return true;
  213. }
  214. /* matches skb control buffer (addr) with a j1939 filter */
  215. static bool j1939_sk_match_filter(struct j1939_sock *jsk,
  216. const struct j1939_sk_buff_cb *skcb)
  217. {
  218. const struct j1939_filter *f;
  219. int nfilter;
  220. spin_lock_bh(&jsk->filters_lock);
  221. f = jsk->filters;
  222. nfilter = jsk->nfilters;
  223. if (!nfilter)
  224. /* receive all when no filters are assigned */
  225. goto filter_match_found;
  226. for (; nfilter; ++f, --nfilter) {
  227. if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
  228. continue;
  229. if ((skcb->addr.sa & f->addr_mask) != f->addr)
  230. continue;
  231. if ((skcb->addr.src_name & f->name_mask) != f->name)
  232. continue;
  233. goto filter_match_found;
  234. }
  235. spin_unlock_bh(&jsk->filters_lock);
  236. return false;
  237. filter_match_found:
  238. spin_unlock_bh(&jsk->filters_lock);
  239. return true;
  240. }
  241. static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
  242. const struct j1939_sk_buff_cb *skcb)
  243. {
  244. if (!(jsk->state & J1939_SOCK_BOUND))
  245. return false;
  246. if (!j1939_sk_match_dst(jsk, skcb))
  247. return false;
  248. if (!j1939_sk_match_filter(jsk, skcb))
  249. return false;
  250. return true;
  251. }
  252. static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
  253. {
  254. const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
  255. struct j1939_sk_buff_cb *skcb;
  256. struct sk_buff *skb;
  257. if (oskb->sk == &jsk->sk)
  258. return;
  259. if (!j1939_sk_recv_match_one(jsk, oskcb))
  260. return;
  261. skb = skb_clone(oskb, GFP_ATOMIC);
  262. if (!skb) {
  263. pr_warn("skb clone failed\n");
  264. return;
  265. }
  266. can_skb_set_owner(skb, oskb->sk);
  267. skcb = j1939_skb_to_cb(skb);
  268. skcb->msg_flags &= ~(MSG_DONTROUTE);
  269. if (skb->sk)
  270. skcb->msg_flags |= MSG_DONTROUTE;
  271. if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
  272. kfree_skb(skb);
  273. }
  274. bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
  275. {
  276. struct j1939_sock *jsk;
  277. bool match = false;
  278. read_lock_bh(&priv->j1939_socks_lock);
  279. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  280. match = j1939_sk_recv_match_one(jsk, skcb);
  281. if (match)
  282. break;
  283. }
  284. read_unlock_bh(&priv->j1939_socks_lock);
  285. return match;
  286. }
  287. void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
  288. {
  289. struct j1939_sock *jsk;
  290. read_lock_bh(&priv->j1939_socks_lock);
  291. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  292. j1939_sk_recv_one(jsk, skb);
  293. }
  294. read_unlock_bh(&priv->j1939_socks_lock);
  295. }
  296. static void j1939_sk_sock_destruct(struct sock *sk)
  297. {
  298. struct j1939_sock *jsk = j1939_sk(sk);
  299. /* This function will be called by the generic networking code, when
  300. * the socket is ultimately closed (sk->sk_destruct).
  301. *
  302. * The race between
  303. * - processing a received CAN frame
  304. * (can_receive -> j1939_can_recv)
  305. * and accessing j1939_priv
  306. * ... and ...
  307. * - closing a socket
  308. * (j1939_can_rx_unregister -> can_rx_unregister)
  309. * and calling the final j1939_priv_put()
  310. *
  311. * is avoided by calling the final j1939_priv_put() from this
  312. * RCU deferred cleanup call.
  313. */
  314. if (jsk->priv) {
  315. j1939_priv_put(jsk->priv);
  316. jsk->priv = NULL;
  317. }
  318. /* call generic CAN sock destruct */
  319. can_sock_destruct(sk);
  320. }
  321. static int j1939_sk_init(struct sock *sk)
  322. {
  323. struct j1939_sock *jsk = j1939_sk(sk);
  324. /* Ensure that "sk" is first member in "struct j1939_sock", so that we
  325. * can skip it during memset().
  326. */
  327. BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
  328. memset((void *)jsk + sizeof(jsk->sk), 0x0,
  329. sizeof(*jsk) - sizeof(jsk->sk));
  330. INIT_LIST_HEAD(&jsk->list);
  331. init_waitqueue_head(&jsk->waitq);
  332. jsk->sk.sk_priority = j1939_to_sk_priority(6);
  333. jsk->sk.sk_reuse = 1; /* per default */
  334. jsk->addr.sa = J1939_NO_ADDR;
  335. jsk->addr.da = J1939_NO_ADDR;
  336. jsk->addr.pgn = J1939_NO_PGN;
  337. jsk->pgn_rx_filter = J1939_NO_PGN;
  338. atomic_set(&jsk->skb_pending, 0);
  339. spin_lock_init(&jsk->sk_session_queue_lock);
  340. INIT_LIST_HEAD(&jsk->sk_session_queue);
  341. spin_lock_init(&jsk->filters_lock);
  342. /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
  343. sock_set_flag(sk, SOCK_RCU_FREE);
  344. sk->sk_destruct = j1939_sk_sock_destruct;
  345. sk->sk_protocol = CAN_J1939;
  346. return 0;
  347. }
  348. static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
  349. {
  350. if (!addr)
  351. return -EDESTADDRREQ;
  352. if (len < J1939_MIN_NAMELEN)
  353. return -EINVAL;
  354. if (addr->can_family != AF_CAN)
  355. return -EINVAL;
  356. if (!addr->can_ifindex)
  357. return -ENODEV;
  358. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
  359. !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
  360. return -EINVAL;
  361. return 0;
  362. }
  363. static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
  364. {
  365. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  366. struct j1939_sock *jsk = j1939_sk(sock->sk);
  367. struct j1939_priv *priv;
  368. struct sock *sk;
  369. struct net *net;
  370. int ret = 0;
  371. ret = j1939_sk_sanity_check(addr, len);
  372. if (ret)
  373. return ret;
  374. lock_sock(sock->sk);
  375. priv = jsk->priv;
  376. sk = sock->sk;
  377. net = sock_net(sk);
  378. /* Already bound to an interface? */
  379. if (jsk->state & J1939_SOCK_BOUND) {
  380. /* A re-bind() to a different interface is not
  381. * supported.
  382. */
  383. if (jsk->ifindex != addr->can_ifindex) {
  384. ret = -EINVAL;
  385. goto out_release_sock;
  386. }
  387. /* drop old references */
  388. j1939_jsk_del(priv, jsk);
  389. j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
  390. } else {
  391. struct can_ml_priv *can_ml;
  392. struct net_device *ndev;
  393. ndev = dev_get_by_index(net, addr->can_ifindex);
  394. if (!ndev) {
  395. ret = -ENODEV;
  396. goto out_release_sock;
  397. }
  398. can_ml = can_get_ml_priv(ndev);
  399. if (!can_ml) {
  400. dev_put(ndev);
  401. ret = -ENODEV;
  402. goto out_release_sock;
  403. }
  404. if (!(ndev->flags & IFF_UP)) {
  405. dev_put(ndev);
  406. ret = -ENETDOWN;
  407. goto out_release_sock;
  408. }
  409. priv = j1939_netdev_start(ndev);
  410. dev_put(ndev);
  411. if (IS_ERR(priv)) {
  412. ret = PTR_ERR(priv);
  413. goto out_release_sock;
  414. }
  415. jsk->ifindex = addr->can_ifindex;
  416. /* the corresponding j1939_priv_put() is called via
  417. * sk->sk_destruct, which points to j1939_sk_sock_destruct()
  418. */
  419. j1939_priv_get(priv);
  420. jsk->priv = priv;
  421. }
  422. /* set default transmit pgn */
  423. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
  424. jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
  425. jsk->addr.src_name = addr->can_addr.j1939.name;
  426. jsk->addr.sa = addr->can_addr.j1939.addr;
  427. /* get new references */
  428. ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
  429. if (ret) {
  430. j1939_netdev_stop(priv);
  431. goto out_release_sock;
  432. }
  433. j1939_jsk_add(priv, jsk);
  434. out_release_sock: /* fall through */
  435. release_sock(sock->sk);
  436. return ret;
  437. }
  438. static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
  439. int len, int flags)
  440. {
  441. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  442. struct j1939_sock *jsk = j1939_sk(sock->sk);
  443. int ret = 0;
  444. ret = j1939_sk_sanity_check(addr, len);
  445. if (ret)
  446. return ret;
  447. lock_sock(sock->sk);
  448. /* bind() before connect() is mandatory */
  449. if (!(jsk->state & J1939_SOCK_BOUND)) {
  450. ret = -EINVAL;
  451. goto out_release_sock;
  452. }
  453. /* A connect() to a different interface is not supported. */
  454. if (jsk->ifindex != addr->can_ifindex) {
  455. ret = -EINVAL;
  456. goto out_release_sock;
  457. }
  458. if (!addr->can_addr.j1939.name &&
  459. addr->can_addr.j1939.addr == J1939_NO_ADDR &&
  460. !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
  461. /* broadcast, but SO_BROADCAST not set */
  462. ret = -EACCES;
  463. goto out_release_sock;
  464. }
  465. jsk->addr.dst_name = addr->can_addr.j1939.name;
  466. jsk->addr.da = addr->can_addr.j1939.addr;
  467. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
  468. jsk->addr.pgn = addr->can_addr.j1939.pgn;
  469. jsk->state |= J1939_SOCK_CONNECTED;
  470. out_release_sock: /* fall through */
  471. release_sock(sock->sk);
  472. return ret;
  473. }
  474. static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
  475. const struct j1939_sock *jsk, int peer)
  476. {
  477. /* There are two holes (2 bytes and 3 bytes) to clear to avoid
  478. * leaking kernel information to user space.
  479. */
  480. memset(addr, 0, J1939_MIN_NAMELEN);
  481. addr->can_family = AF_CAN;
  482. addr->can_ifindex = jsk->ifindex;
  483. addr->can_addr.j1939.pgn = jsk->addr.pgn;
  484. if (peer) {
  485. addr->can_addr.j1939.name = jsk->addr.dst_name;
  486. addr->can_addr.j1939.addr = jsk->addr.da;
  487. } else {
  488. addr->can_addr.j1939.name = jsk->addr.src_name;
  489. addr->can_addr.j1939.addr = jsk->addr.sa;
  490. }
  491. }
  492. static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
  493. int peer)
  494. {
  495. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  496. struct sock *sk = sock->sk;
  497. struct j1939_sock *jsk = j1939_sk(sk);
  498. int ret = 0;
  499. lock_sock(sk);
  500. if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
  501. ret = -EADDRNOTAVAIL;
  502. goto failure;
  503. }
  504. j1939_sk_sock2sockaddr_can(addr, jsk, peer);
  505. ret = J1939_MIN_NAMELEN;
  506. failure:
  507. release_sock(sk);
  508. return ret;
  509. }
  510. static int j1939_sk_release(struct socket *sock)
  511. {
  512. struct sock *sk = sock->sk;
  513. struct j1939_sock *jsk;
  514. if (!sk)
  515. return 0;
  516. lock_sock(sk);
  517. jsk = j1939_sk(sk);
  518. if (jsk->state & J1939_SOCK_BOUND) {
  519. struct j1939_priv *priv = jsk->priv;
  520. if (wait_event_interruptible(jsk->waitq,
  521. !j1939_sock_pending_get(&jsk->sk))) {
  522. j1939_cancel_active_session(priv, sk);
  523. j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
  524. }
  525. j1939_jsk_del(priv, jsk);
  526. j1939_local_ecu_put(priv, jsk->addr.src_name,
  527. jsk->addr.sa);
  528. j1939_netdev_stop(priv);
  529. }
  530. kfree(jsk->filters);
  531. sock_orphan(sk);
  532. sock->sk = NULL;
  533. release_sock(sk);
  534. sock_put(sk);
  535. return 0;
  536. }
  537. static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
  538. unsigned int optlen, int flag)
  539. {
  540. int tmp;
  541. if (optlen != sizeof(tmp))
  542. return -EINVAL;
  543. if (copy_from_sockptr(&tmp, optval, optlen))
  544. return -EFAULT;
  545. lock_sock(&jsk->sk);
  546. if (tmp)
  547. jsk->state |= flag;
  548. else
  549. jsk->state &= ~flag;
  550. release_sock(&jsk->sk);
  551. return tmp;
  552. }
  553. static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
  554. sockptr_t optval, unsigned int optlen)
  555. {
  556. struct sock *sk = sock->sk;
  557. struct j1939_sock *jsk = j1939_sk(sk);
  558. int tmp, count = 0, ret = 0;
  559. struct j1939_filter *filters = NULL, *ofilters;
  560. if (level != SOL_CAN_J1939)
  561. return -EINVAL;
  562. switch (optname) {
  563. case SO_J1939_FILTER:
  564. if (!sockptr_is_null(optval) && optlen != 0) {
  565. struct j1939_filter *f;
  566. int c;
  567. if (optlen % sizeof(*filters) != 0)
  568. return -EINVAL;
  569. if (optlen > J1939_FILTER_MAX *
  570. sizeof(struct j1939_filter))
  571. return -EINVAL;
  572. count = optlen / sizeof(*filters);
  573. filters = memdup_sockptr(optval, optlen);
  574. if (IS_ERR(filters))
  575. return PTR_ERR(filters);
  576. for (f = filters, c = count; c; f++, c--) {
  577. f->name &= f->name_mask;
  578. f->pgn &= f->pgn_mask;
  579. f->addr &= f->addr_mask;
  580. }
  581. }
  582. lock_sock(&jsk->sk);
  583. spin_lock_bh(&jsk->filters_lock);
  584. ofilters = jsk->filters;
  585. jsk->filters = filters;
  586. jsk->nfilters = count;
  587. spin_unlock_bh(&jsk->filters_lock);
  588. release_sock(&jsk->sk);
  589. kfree(ofilters);
  590. return 0;
  591. case SO_J1939_PROMISC:
  592. return j1939_sk_setsockopt_flag(jsk, optval, optlen,
  593. J1939_SOCK_PROMISC);
  594. case SO_J1939_ERRQUEUE:
  595. ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
  596. J1939_SOCK_ERRQUEUE);
  597. if (ret < 0)
  598. return ret;
  599. if (!(jsk->state & J1939_SOCK_ERRQUEUE))
  600. skb_queue_purge(&sk->sk_error_queue);
  601. return ret;
  602. case SO_J1939_SEND_PRIO:
  603. if (optlen != sizeof(tmp))
  604. return -EINVAL;
  605. if (copy_from_sockptr(&tmp, optval, optlen))
  606. return -EFAULT;
  607. if (tmp < 0 || tmp > 7)
  608. return -EDOM;
  609. if (tmp < 2 && !capable(CAP_NET_ADMIN))
  610. return -EPERM;
  611. lock_sock(&jsk->sk);
  612. jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
  613. release_sock(&jsk->sk);
  614. return 0;
  615. default:
  616. return -ENOPROTOOPT;
  617. }
  618. }
  619. static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
  620. char __user *optval, int __user *optlen)
  621. {
  622. struct sock *sk = sock->sk;
  623. struct j1939_sock *jsk = j1939_sk(sk);
  624. int ret, ulen;
  625. /* set defaults for using 'int' properties */
  626. int tmp = 0;
  627. int len = sizeof(tmp);
  628. void *val = &tmp;
  629. if (level != SOL_CAN_J1939)
  630. return -EINVAL;
  631. if (get_user(ulen, optlen))
  632. return -EFAULT;
  633. if (ulen < 0)
  634. return -EINVAL;
  635. lock_sock(&jsk->sk);
  636. switch (optname) {
  637. case SO_J1939_PROMISC:
  638. tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
  639. break;
  640. case SO_J1939_ERRQUEUE:
  641. tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
  642. break;
  643. case SO_J1939_SEND_PRIO:
  644. tmp = j1939_prio(jsk->sk.sk_priority);
  645. break;
  646. default:
  647. ret = -ENOPROTOOPT;
  648. goto no_copy;
  649. }
  650. /* copy to user, based on 'len' & 'val'
  651. * but most sockopt's are 'int' properties, and have 'len' & 'val'
  652. * left unchanged, but instead modified 'tmp'
  653. */
  654. if (len > ulen)
  655. ret = -EFAULT;
  656. else if (put_user(len, optlen))
  657. ret = -EFAULT;
  658. else if (copy_to_user(optval, val, len))
  659. ret = -EFAULT;
  660. else
  661. ret = 0;
  662. no_copy:
  663. release_sock(&jsk->sk);
  664. return ret;
  665. }
  666. static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
  667. size_t size, int flags)
  668. {
  669. struct sock *sk = sock->sk;
  670. struct sk_buff *skb;
  671. struct j1939_sk_buff_cb *skcb;
  672. int ret = 0;
  673. if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
  674. return -EINVAL;
  675. if (flags & MSG_ERRQUEUE)
  676. return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
  677. SCM_J1939_ERRQUEUE);
  678. skb = skb_recv_datagram(sk, flags, &ret);
  679. if (!skb)
  680. return ret;
  681. if (size < skb->len)
  682. msg->msg_flags |= MSG_TRUNC;
  683. else
  684. size = skb->len;
  685. ret = memcpy_to_msg(msg, skb->data, size);
  686. if (ret < 0) {
  687. skb_free_datagram(sk, skb);
  688. return ret;
  689. }
  690. skcb = j1939_skb_to_cb(skb);
  691. if (j1939_address_is_valid(skcb->addr.da))
  692. put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
  693. sizeof(skcb->addr.da), &skcb->addr.da);
  694. if (skcb->addr.dst_name)
  695. put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
  696. sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
  697. put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
  698. sizeof(skcb->priority), &skcb->priority);
  699. if (msg->msg_name) {
  700. struct sockaddr_can *paddr = msg->msg_name;
  701. msg->msg_namelen = J1939_MIN_NAMELEN;
  702. memset(msg->msg_name, 0, msg->msg_namelen);
  703. paddr->can_family = AF_CAN;
  704. paddr->can_ifindex = skb->skb_iif;
  705. paddr->can_addr.j1939.name = skcb->addr.src_name;
  706. paddr->can_addr.j1939.addr = skcb->addr.sa;
  707. paddr->can_addr.j1939.pgn = skcb->addr.pgn;
  708. }
  709. sock_recv_cmsgs(msg, sk, skb);
  710. msg->msg_flags |= skcb->msg_flags;
  711. skb_free_datagram(sk, skb);
  712. return size;
  713. }
  714. static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
  715. struct sock *sk,
  716. struct msghdr *msg, size_t size,
  717. int *errcode)
  718. {
  719. struct j1939_sock *jsk = j1939_sk(sk);
  720. struct j1939_sk_buff_cb *skcb;
  721. struct sk_buff *skb;
  722. int ret;
  723. skb = sock_alloc_send_skb(sk,
  724. size +
  725. sizeof(struct can_frame) -
  726. sizeof(((struct can_frame *)NULL)->data) +
  727. sizeof(struct can_skb_priv),
  728. msg->msg_flags & MSG_DONTWAIT, &ret);
  729. if (!skb)
  730. goto failure;
  731. can_skb_reserve(skb);
  732. can_skb_prv(skb)->ifindex = ndev->ifindex;
  733. can_skb_prv(skb)->skbcnt = 0;
  734. skb_reserve(skb, offsetof(struct can_frame, data));
  735. ret = memcpy_from_msg(skb_put(skb, size), msg, size);
  736. if (ret < 0)
  737. goto free_skb;
  738. skb->dev = ndev;
  739. skcb = j1939_skb_to_cb(skb);
  740. memset(skcb, 0, sizeof(*skcb));
  741. skcb->addr = jsk->addr;
  742. skcb->priority = j1939_prio(READ_ONCE(sk->sk_priority));
  743. if (msg->msg_name) {
  744. struct sockaddr_can *addr = msg->msg_name;
  745. if (addr->can_addr.j1939.name ||
  746. addr->can_addr.j1939.addr != J1939_NO_ADDR) {
  747. skcb->addr.dst_name = addr->can_addr.j1939.name;
  748. skcb->addr.da = addr->can_addr.j1939.addr;
  749. }
  750. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
  751. skcb->addr.pgn = addr->can_addr.j1939.pgn;
  752. }
  753. *errcode = ret;
  754. return skb;
  755. free_skb:
  756. kfree_skb(skb);
  757. failure:
  758. *errcode = ret;
  759. return NULL;
  760. }
  761. static size_t j1939_sk_opt_stats_get_size(enum j1939_sk_errqueue_type type)
  762. {
  763. switch (type) {
  764. case J1939_ERRQUEUE_RX_RTS:
  765. return
  766. nla_total_size(sizeof(u32)) + /* J1939_NLA_TOTAL_SIZE */
  767. nla_total_size(sizeof(u32)) + /* J1939_NLA_PGN */
  768. nla_total_size(sizeof(u64)) + /* J1939_NLA_SRC_NAME */
  769. nla_total_size(sizeof(u64)) + /* J1939_NLA_DEST_NAME */
  770. nla_total_size(sizeof(u8)) + /* J1939_NLA_SRC_ADDR */
  771. nla_total_size(sizeof(u8)) + /* J1939_NLA_DEST_ADDR */
  772. 0;
  773. default:
  774. return
  775. nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
  776. 0;
  777. }
  778. }
  779. static struct sk_buff *
  780. j1939_sk_get_timestamping_opt_stats(struct j1939_session *session,
  781. enum j1939_sk_errqueue_type type)
  782. {
  783. struct sk_buff *stats;
  784. u32 size;
  785. stats = alloc_skb(j1939_sk_opt_stats_get_size(type), GFP_ATOMIC);
  786. if (!stats)
  787. return NULL;
  788. if (session->skcb.addr.type == J1939_SIMPLE)
  789. size = session->total_message_size;
  790. else
  791. size = min(session->pkt.tx_acked * 7,
  792. session->total_message_size);
  793. switch (type) {
  794. case J1939_ERRQUEUE_RX_RTS:
  795. nla_put_u32(stats, J1939_NLA_TOTAL_SIZE,
  796. session->total_message_size);
  797. nla_put_u32(stats, J1939_NLA_PGN,
  798. session->skcb.addr.pgn);
  799. nla_put_u64_64bit(stats, J1939_NLA_SRC_NAME,
  800. session->skcb.addr.src_name, J1939_NLA_PAD);
  801. nla_put_u64_64bit(stats, J1939_NLA_DEST_NAME,
  802. session->skcb.addr.dst_name, J1939_NLA_PAD);
  803. nla_put_u8(stats, J1939_NLA_SRC_ADDR,
  804. session->skcb.addr.sa);
  805. nla_put_u8(stats, J1939_NLA_DEST_ADDR,
  806. session->skcb.addr.da);
  807. break;
  808. default:
  809. nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
  810. }
  811. return stats;
  812. }
  813. static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
  814. enum j1939_sk_errqueue_type type)
  815. {
  816. struct j1939_priv *priv = session->priv;
  817. struct j1939_sock *jsk;
  818. struct sock_exterr_skb *serr;
  819. struct sk_buff *skb;
  820. char *state = "UNK";
  821. u32 tsflags;
  822. int err;
  823. jsk = j1939_sk(sk);
  824. if (!(jsk->state & J1939_SOCK_ERRQUEUE))
  825. return;
  826. tsflags = READ_ONCE(sk->sk_tsflags);
  827. switch (type) {
  828. case J1939_ERRQUEUE_TX_ACK:
  829. if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
  830. return;
  831. break;
  832. case J1939_ERRQUEUE_TX_SCHED:
  833. if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
  834. return;
  835. break;
  836. case J1939_ERRQUEUE_TX_ABORT:
  837. break;
  838. case J1939_ERRQUEUE_RX_RTS:
  839. fallthrough;
  840. case J1939_ERRQUEUE_RX_DPO:
  841. fallthrough;
  842. case J1939_ERRQUEUE_RX_ABORT:
  843. if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
  844. return;
  845. break;
  846. default:
  847. netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
  848. }
  849. skb = j1939_sk_get_timestamping_opt_stats(session, type);
  850. if (!skb)
  851. return;
  852. skb->tstamp = ktime_get_real();
  853. BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
  854. serr = SKB_EXT_ERR(skb);
  855. memset(serr, 0, sizeof(*serr));
  856. switch (type) {
  857. case J1939_ERRQUEUE_TX_ACK:
  858. serr->ee.ee_errno = ENOMSG;
  859. serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
  860. serr->ee.ee_info = SCM_TSTAMP_ACK;
  861. state = "TX ACK";
  862. break;
  863. case J1939_ERRQUEUE_TX_SCHED:
  864. serr->ee.ee_errno = ENOMSG;
  865. serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
  866. serr->ee.ee_info = SCM_TSTAMP_SCHED;
  867. state = "TX SCH";
  868. break;
  869. case J1939_ERRQUEUE_TX_ABORT:
  870. serr->ee.ee_errno = session->err;
  871. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  872. serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
  873. state = "TX ABT";
  874. break;
  875. case J1939_ERRQUEUE_RX_RTS:
  876. serr->ee.ee_errno = ENOMSG;
  877. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  878. serr->ee.ee_info = J1939_EE_INFO_RX_RTS;
  879. state = "RX RTS";
  880. break;
  881. case J1939_ERRQUEUE_RX_DPO:
  882. serr->ee.ee_errno = ENOMSG;
  883. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  884. serr->ee.ee_info = J1939_EE_INFO_RX_DPO;
  885. state = "RX DPO";
  886. break;
  887. case J1939_ERRQUEUE_RX_ABORT:
  888. serr->ee.ee_errno = session->err;
  889. serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
  890. serr->ee.ee_info = J1939_EE_INFO_RX_ABORT;
  891. state = "RX ABT";
  892. break;
  893. }
  894. serr->opt_stats = true;
  895. if (tsflags & SOF_TIMESTAMPING_OPT_ID)
  896. serr->ee.ee_data = session->tskey;
  897. netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
  898. __func__, session, session->tskey, state);
  899. err = sock_queue_err_skb(sk, skb);
  900. if (err)
  901. kfree_skb(skb);
  902. };
  903. void j1939_sk_errqueue(struct j1939_session *session,
  904. enum j1939_sk_errqueue_type type)
  905. {
  906. struct j1939_priv *priv = session->priv;
  907. struct j1939_sock *jsk;
  908. if (session->sk) {
  909. /* send TX notifications to the socket of origin */
  910. __j1939_sk_errqueue(session, session->sk, type);
  911. return;
  912. }
  913. /* spread RX notifications to all sockets subscribed to this session */
  914. read_lock_bh(&priv->j1939_socks_lock);
  915. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  916. if (j1939_sk_recv_match_one(jsk, &session->skcb))
  917. __j1939_sk_errqueue(session, &jsk->sk, type);
  918. }
  919. read_unlock_bh(&priv->j1939_socks_lock);
  920. };
  921. void j1939_sk_send_loop_abort(struct sock *sk, int err)
  922. {
  923. struct j1939_sock *jsk = j1939_sk(sk);
  924. if (jsk->state & J1939_SOCK_ERRQUEUE)
  925. return;
  926. sk->sk_err = err;
  927. sk_error_report(sk);
  928. }
  929. static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
  930. struct msghdr *msg, size_t size)
  931. {
  932. struct j1939_sock *jsk = j1939_sk(sk);
  933. struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
  934. struct sk_buff *skb;
  935. size_t segment_size, todo_size;
  936. int ret = 0;
  937. if (session &&
  938. session->total_message_size != session->total_queued_size + size) {
  939. j1939_session_put(session);
  940. return -EIO;
  941. }
  942. todo_size = size;
  943. do {
  944. struct j1939_sk_buff_cb *skcb;
  945. segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
  946. todo_size);
  947. /* Allocate skb for one segment */
  948. skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
  949. &ret);
  950. if (ret)
  951. break;
  952. skcb = j1939_skb_to_cb(skb);
  953. if (!session) {
  954. /* at this point the size should be full size
  955. * of the session
  956. */
  957. skcb->offset = 0;
  958. session = j1939_tp_send(priv, skb, size);
  959. if (IS_ERR(session)) {
  960. ret = PTR_ERR(session);
  961. goto kfree_skb;
  962. }
  963. if (j1939_sk_queue_session(session)) {
  964. /* try to activate session if we a
  965. * fist in the queue
  966. */
  967. if (!j1939_session_activate(session)) {
  968. j1939_tp_schedule_txtimer(session, 0);
  969. } else {
  970. ret = -EBUSY;
  971. session->err = ret;
  972. j1939_sk_queue_drop_all(priv, jsk,
  973. EBUSY);
  974. break;
  975. }
  976. }
  977. } else {
  978. skcb->offset = session->total_queued_size;
  979. j1939_session_skb_queue(session, skb);
  980. }
  981. todo_size -= segment_size;
  982. session->total_queued_size += segment_size;
  983. } while (todo_size);
  984. switch (ret) {
  985. case 0: /* OK */
  986. if (todo_size)
  987. netdev_warn(priv->ndev,
  988. "no error found and not completely queued?! %zu\n",
  989. todo_size);
  990. ret = size;
  991. break;
  992. case -ERESTARTSYS:
  993. ret = -EINTR;
  994. fallthrough;
  995. case -EAGAIN: /* OK */
  996. if (todo_size != size)
  997. ret = size - todo_size;
  998. break;
  999. default: /* ERROR */
  1000. break;
  1001. }
  1002. if (session)
  1003. j1939_session_put(session);
  1004. return ret;
  1005. kfree_skb:
  1006. kfree_skb(skb);
  1007. return ret;
  1008. }
  1009. static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
  1010. size_t size)
  1011. {
  1012. struct sock *sk = sock->sk;
  1013. struct j1939_sock *jsk = j1939_sk(sk);
  1014. struct j1939_priv *priv;
  1015. int ifindex;
  1016. int ret;
  1017. lock_sock(sock->sk);
  1018. /* various socket state tests */
  1019. if (!(jsk->state & J1939_SOCK_BOUND)) {
  1020. ret = -EBADFD;
  1021. goto sendmsg_done;
  1022. }
  1023. priv = jsk->priv;
  1024. ifindex = jsk->ifindex;
  1025. if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
  1026. /* no source address assigned yet */
  1027. ret = -EBADFD;
  1028. goto sendmsg_done;
  1029. }
  1030. /* deal with provided destination address info */
  1031. if (msg->msg_name) {
  1032. struct sockaddr_can *addr = msg->msg_name;
  1033. if (msg->msg_namelen < J1939_MIN_NAMELEN) {
  1034. ret = -EINVAL;
  1035. goto sendmsg_done;
  1036. }
  1037. if (addr->can_family != AF_CAN) {
  1038. ret = -EINVAL;
  1039. goto sendmsg_done;
  1040. }
  1041. if (addr->can_ifindex && addr->can_ifindex != ifindex) {
  1042. ret = -EBADFD;
  1043. goto sendmsg_done;
  1044. }
  1045. if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
  1046. !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
  1047. ret = -EINVAL;
  1048. goto sendmsg_done;
  1049. }
  1050. if (!addr->can_addr.j1939.name &&
  1051. addr->can_addr.j1939.addr == J1939_NO_ADDR &&
  1052. !sock_flag(sk, SOCK_BROADCAST)) {
  1053. /* broadcast, but SO_BROADCAST not set */
  1054. ret = -EACCES;
  1055. goto sendmsg_done;
  1056. }
  1057. } else {
  1058. if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
  1059. !sock_flag(sk, SOCK_BROADCAST)) {
  1060. /* broadcast, but SO_BROADCAST not set */
  1061. ret = -EACCES;
  1062. goto sendmsg_done;
  1063. }
  1064. }
  1065. ret = j1939_sk_send_loop(priv, sk, msg, size);
  1066. sendmsg_done:
  1067. release_sock(sock->sk);
  1068. return ret;
  1069. }
  1070. void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
  1071. {
  1072. struct j1939_sock *jsk;
  1073. int error_code = ENETDOWN;
  1074. read_lock_bh(&priv->j1939_socks_lock);
  1075. list_for_each_entry(jsk, &priv->j1939_socks, list) {
  1076. jsk->sk.sk_err = error_code;
  1077. if (!sock_flag(&jsk->sk, SOCK_DEAD))
  1078. sk_error_report(&jsk->sk);
  1079. j1939_sk_queue_drop_all(priv, jsk, error_code);
  1080. }
  1081. read_unlock_bh(&priv->j1939_socks_lock);
  1082. }
  1083. static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
  1084. unsigned long arg)
  1085. {
  1086. /* no ioctls for socket layer -> hand it down to NIC layer */
  1087. return -ENOIOCTLCMD;
  1088. }
  1089. static const struct proto_ops j1939_ops = {
  1090. .family = PF_CAN,
  1091. .release = j1939_sk_release,
  1092. .bind = j1939_sk_bind,
  1093. .connect = j1939_sk_connect,
  1094. .socketpair = sock_no_socketpair,
  1095. .accept = sock_no_accept,
  1096. .getname = j1939_sk_getname,
  1097. .poll = datagram_poll,
  1098. .ioctl = j1939_sk_no_ioctlcmd,
  1099. .listen = sock_no_listen,
  1100. .shutdown = sock_no_shutdown,
  1101. .setsockopt = j1939_sk_setsockopt,
  1102. .getsockopt = j1939_sk_getsockopt,
  1103. .sendmsg = j1939_sk_sendmsg,
  1104. .recvmsg = j1939_sk_recvmsg,
  1105. .mmap = sock_no_mmap,
  1106. };
  1107. static struct proto j1939_proto __read_mostly = {
  1108. .name = "CAN_J1939",
  1109. .owner = THIS_MODULE,
  1110. .obj_size = sizeof(struct j1939_sock),
  1111. .init = j1939_sk_init,
  1112. };
  1113. const struct can_proto j1939_can_proto = {
  1114. .type = SOCK_DGRAM,
  1115. .protocol = CAN_J1939,
  1116. .ops = &j1939_ops,
  1117. .prot = &j1939_proto,
  1118. };