protocol.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* SCTP kernel implementation
  3. * (C) Copyright IBM Corp. 2001, 2004
  4. * Copyright (c) 1999-2000 Cisco, Inc.
  5. * Copyright (c) 1999-2001 Motorola, Inc.
  6. * Copyright (c) 2001 Intel Corp.
  7. * Copyright (c) 2001 Nokia, Inc.
  8. * Copyright (c) 2001 La Monte H.P. Yarroll
  9. *
  10. * This file is part of the SCTP kernel implementation
  11. *
  12. * Initialization/cleanup for SCTP protocol support.
  13. *
  14. * Please send any bug reports or fixes you make to the
  15. * email address(es):
  16. * lksctp developers <linux-sctp@vger.kernel.org>
  17. *
  18. * Written or modified by:
  19. * La Monte H.P. Yarroll <piggy@acm.org>
  20. * Karl Knutson <karl@athena.chicago.il.us>
  21. * Jon Grimm <jgrimm@us.ibm.com>
  22. * Sridhar Samudrala <sri@us.ibm.com>
  23. * Daisy Chang <daisyc@us.ibm.com>
  24. * Ardelle Fan <ardelle.fan@intel.com>
  25. */
  26. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27. #include <linux/module.h>
  28. #include <linux/init.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/inetdevice.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/memblock.h>
  33. #include <linux/highmem.h>
  34. #include <linux/slab.h>
  35. #include <net/net_namespace.h>
  36. #include <net/protocol.h>
  37. #include <net/ip.h>
  38. #include <net/ipv6.h>
  39. #include <net/route.h>
  40. #include <net/sctp/sctp.h>
  41. #include <net/addrconf.h>
  42. #include <net/inet_common.h>
  43. #include <net/inet_ecn.h>
  44. #include <net/udp_tunnel.h>
  45. #include <net/inet_dscp.h>
  46. #define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
  47. /* Global data structures. */
  48. struct sctp_globals sctp_globals __read_mostly;
  49. struct idr sctp_assocs_id;
  50. DEFINE_SPINLOCK(sctp_assocs_id_lock);
  51. static struct sctp_pf *sctp_pf_inet6_specific;
  52. static struct sctp_pf *sctp_pf_inet_specific;
  53. static struct sctp_af *sctp_af_v4_specific;
  54. static struct sctp_af *sctp_af_v6_specific;
  55. struct kmem_cache *sctp_chunk_cachep __read_mostly;
  56. struct kmem_cache *sctp_bucket_cachep __read_mostly;
  57. long sysctl_sctp_mem[3];
  58. int sysctl_sctp_rmem[3];
  59. int sysctl_sctp_wmem[3];
  60. /* Private helper to extract ipv4 address and stash them in
  61. * the protocol structure.
  62. */
  63. static void sctp_v4_copy_addrlist(struct list_head *addrlist,
  64. struct net_device *dev)
  65. {
  66. struct in_device *in_dev;
  67. struct in_ifaddr *ifa;
  68. struct sctp_sockaddr_entry *addr;
  69. rcu_read_lock();
  70. if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
  71. rcu_read_unlock();
  72. return;
  73. }
  74. in_dev_for_each_ifa_rcu(ifa, in_dev) {
  75. /* Add the address to the local list. */
  76. addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
  77. if (addr) {
  78. addr->a.v4.sin_family = AF_INET;
  79. addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
  80. addr->valid = 1;
  81. INIT_LIST_HEAD(&addr->list);
  82. list_add_tail(&addr->list, addrlist);
  83. }
  84. }
  85. rcu_read_unlock();
  86. }
  87. /* Extract our IP addresses from the system and stash them in the
  88. * protocol structure.
  89. */
  90. static void sctp_get_local_addr_list(struct net *net)
  91. {
  92. struct net_device *dev;
  93. struct list_head *pos;
  94. struct sctp_af *af;
  95. rcu_read_lock();
  96. for_each_netdev_rcu(net, dev) {
  97. list_for_each(pos, &sctp_address_families) {
  98. af = list_entry(pos, struct sctp_af, list);
  99. af->copy_addrlist(&net->sctp.local_addr_list, dev);
  100. }
  101. }
  102. rcu_read_unlock();
  103. }
  104. /* Free the existing local addresses. */
  105. static void sctp_free_local_addr_list(struct net *net)
  106. {
  107. struct sctp_sockaddr_entry *addr;
  108. struct list_head *pos, *temp;
  109. list_for_each_safe(pos, temp, &net->sctp.local_addr_list) {
  110. addr = list_entry(pos, struct sctp_sockaddr_entry, list);
  111. list_del(pos);
  112. kfree(addr);
  113. }
  114. }
  115. /* Copy the local addresses which are valid for 'scope' into 'bp'. */
  116. int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
  117. enum sctp_scope scope, gfp_t gfp, int copy_flags)
  118. {
  119. struct sctp_sockaddr_entry *addr;
  120. union sctp_addr laddr;
  121. int error = 0;
  122. rcu_read_lock();
  123. list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
  124. if (!addr->valid)
  125. continue;
  126. if (!sctp_in_scope(net, &addr->a, scope))
  127. continue;
  128. /* Now that the address is in scope, check to see if
  129. * the address type is really supported by the local
  130. * sock as well as the remote peer.
  131. */
  132. if (addr->a.sa.sa_family == AF_INET &&
  133. (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
  134. !(copy_flags & SCTP_ADDR4_PEERSUPP)))
  135. continue;
  136. if (addr->a.sa.sa_family == AF_INET6 &&
  137. (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
  138. !(copy_flags & SCTP_ADDR6_PEERSUPP)))
  139. continue;
  140. laddr = addr->a;
  141. /* also works for setting ipv6 address port */
  142. laddr.v4.sin_port = htons(bp->port);
  143. if (sctp_bind_addr_state(bp, &laddr) != -1)
  144. continue;
  145. error = sctp_add_bind_addr(bp, &addr->a, sizeof(addr->a),
  146. SCTP_ADDR_SRC, GFP_ATOMIC);
  147. if (error)
  148. break;
  149. }
  150. rcu_read_unlock();
  151. return error;
  152. }
  153. /* Copy over any ip options */
  154. static void sctp_v4_copy_ip_options(struct sock *sk, struct sock *newsk)
  155. {
  156. struct inet_sock *newinet, *inet = inet_sk(sk);
  157. struct ip_options_rcu *inet_opt, *newopt = NULL;
  158. newinet = inet_sk(newsk);
  159. rcu_read_lock();
  160. inet_opt = rcu_dereference(inet->inet_opt);
  161. if (inet_opt) {
  162. newopt = sock_kmalloc(newsk, sizeof(*inet_opt) +
  163. inet_opt->opt.optlen, GFP_ATOMIC);
  164. if (newopt)
  165. memcpy(newopt, inet_opt, sizeof(*inet_opt) +
  166. inet_opt->opt.optlen);
  167. else
  168. pr_err("%s: Failed to copy ip options\n", __func__);
  169. }
  170. RCU_INIT_POINTER(newinet->inet_opt, newopt);
  171. rcu_read_unlock();
  172. }
  173. /* Account for the IP options */
  174. static int sctp_v4_ip_options_len(struct sock *sk)
  175. {
  176. struct inet_sock *inet = inet_sk(sk);
  177. struct ip_options_rcu *inet_opt;
  178. int len = 0;
  179. rcu_read_lock();
  180. inet_opt = rcu_dereference(inet->inet_opt);
  181. if (inet_opt)
  182. len = inet_opt->opt.optlen;
  183. rcu_read_unlock();
  184. return len;
  185. }
  186. /* Initialize a sctp_addr from in incoming skb. */
  187. static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
  188. int is_saddr)
  189. {
  190. /* Always called on head skb, so this is safe */
  191. struct sctphdr *sh = sctp_hdr(skb);
  192. struct sockaddr_in *sa = &addr->v4;
  193. addr->v4.sin_family = AF_INET;
  194. if (is_saddr) {
  195. sa->sin_port = sh->source;
  196. sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
  197. } else {
  198. sa->sin_port = sh->dest;
  199. sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
  200. }
  201. memset(sa->sin_zero, 0, sizeof(sa->sin_zero));
  202. }
  203. /* Initialize an sctp_addr from a socket. */
  204. static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
  205. {
  206. addr->v4.sin_family = AF_INET;
  207. addr->v4.sin_port = 0;
  208. addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
  209. memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
  210. }
  211. /* Initialize sk->sk_rcv_saddr from sctp_addr. */
  212. static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
  213. {
  214. inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr;
  215. }
  216. /* Initialize sk->sk_daddr from sctp_addr. */
  217. static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
  218. {
  219. inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr;
  220. }
  221. /* Initialize a sctp_addr from an address parameter. */
  222. static bool sctp_v4_from_addr_param(union sctp_addr *addr,
  223. union sctp_addr_param *param,
  224. __be16 port, int iif)
  225. {
  226. if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param))
  227. return false;
  228. addr->v4.sin_family = AF_INET;
  229. addr->v4.sin_port = port;
  230. addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
  231. memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
  232. return true;
  233. }
  234. /* Initialize an address parameter from a sctp_addr and return the length
  235. * of the address parameter.
  236. */
  237. static int sctp_v4_to_addr_param(const union sctp_addr *addr,
  238. union sctp_addr_param *param)
  239. {
  240. int length = sizeof(struct sctp_ipv4addr_param);
  241. param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS;
  242. param->v4.param_hdr.length = htons(length);
  243. param->v4.addr.s_addr = addr->v4.sin_addr.s_addr;
  244. return length;
  245. }
  246. /* Initialize a sctp_addr from a dst_entry. */
  247. static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
  248. __be16 port)
  249. {
  250. saddr->v4.sin_family = AF_INET;
  251. saddr->v4.sin_port = port;
  252. saddr->v4.sin_addr.s_addr = fl4->saddr;
  253. memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
  254. }
  255. /* Compare two addresses exactly. */
  256. static int sctp_v4_cmp_addr(const union sctp_addr *addr1,
  257. const union sctp_addr *addr2)
  258. {
  259. if (addr1->sa.sa_family != addr2->sa.sa_family)
  260. return 0;
  261. if (addr1->v4.sin_port != addr2->v4.sin_port)
  262. return 0;
  263. if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr)
  264. return 0;
  265. return 1;
  266. }
  267. /* Initialize addr struct to INADDR_ANY. */
  268. static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
  269. {
  270. addr->v4.sin_family = AF_INET;
  271. addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
  272. addr->v4.sin_port = port;
  273. memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
  274. }
  275. /* Is this a wildcard address? */
  276. static int sctp_v4_is_any(const union sctp_addr *addr)
  277. {
  278. return htonl(INADDR_ANY) == addr->v4.sin_addr.s_addr;
  279. }
  280. /* This function checks if the address is a valid address to be used for
  281. * SCTP binding.
  282. *
  283. * Output:
  284. * Return 0 - If the address is a non-unicast or an illegal address.
  285. * Return 1 - If the address is a unicast.
  286. */
  287. static int sctp_v4_addr_valid(union sctp_addr *addr,
  288. struct sctp_sock *sp,
  289. const struct sk_buff *skb)
  290. {
  291. /* IPv4 addresses not allowed */
  292. if (sp && ipv6_only_sock(sctp_opt2sk(sp)))
  293. return 0;
  294. /* Is this a non-unicast address or a unusable SCTP address? */
  295. if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr))
  296. return 0;
  297. /* Is this a broadcast address? */
  298. if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST)
  299. return 0;
  300. return 1;
  301. }
  302. /* Should this be available for binding? */
  303. static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
  304. {
  305. struct sock *sk = &sp->inet.sk;
  306. struct net *net = sock_net(sk);
  307. int tb_id = RT_TABLE_LOCAL;
  308. int ret;
  309. tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ?: tb_id;
  310. ret = inet_addr_type_table(net, addr->v4.sin_addr.s_addr, tb_id);
  311. if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
  312. ret != RTN_LOCAL &&
  313. !inet_test_bit(FREEBIND, sk) &&
  314. !READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind))
  315. return 0;
  316. if (ipv6_only_sock(sctp_opt2sk(sp)))
  317. return 0;
  318. return 1;
  319. }
  320. /* Checking the loopback, private and other address scopes as defined in
  321. * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4
  322. * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>.
  323. *
  324. * Level 0 - unusable SCTP addresses
  325. * Level 1 - loopback address
  326. * Level 2 - link-local addresses
  327. * Level 3 - private addresses.
  328. * Level 4 - global addresses
  329. * For INIT and INIT-ACK address list, let L be the level of
  330. * requested destination address, sender and receiver
  331. * SHOULD include all of its addresses with level greater
  332. * than or equal to L.
  333. *
  334. * IPv4 scoping can be controlled through sysctl option
  335. * net.sctp.addr_scope_policy
  336. */
  337. static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
  338. {
  339. enum sctp_scope retval;
  340. /* Check for unusable SCTP addresses. */
  341. if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) {
  342. retval = SCTP_SCOPE_UNUSABLE;
  343. } else if (ipv4_is_loopback(addr->v4.sin_addr.s_addr)) {
  344. retval = SCTP_SCOPE_LOOPBACK;
  345. } else if (ipv4_is_linklocal_169(addr->v4.sin_addr.s_addr)) {
  346. retval = SCTP_SCOPE_LINK;
  347. } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
  348. ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
  349. ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
  350. ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
  351. retval = SCTP_SCOPE_PRIVATE;
  352. } else {
  353. retval = SCTP_SCOPE_GLOBAL;
  354. }
  355. return retval;
  356. }
  357. /* Returns a valid dst cache entry for the given source and destination ip
  358. * addresses. If an association is passed, trys to get a dst entry with a
  359. * source address that matches an address in the bind address list.
  360. */
  361. static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
  362. struct flowi *fl, struct sock *sk)
  363. {
  364. struct sctp_association *asoc = t->asoc;
  365. struct rtable *rt;
  366. struct flowi _fl;
  367. struct flowi4 *fl4 = &_fl.u.ip4;
  368. struct sctp_bind_addr *bp;
  369. struct sctp_sockaddr_entry *laddr;
  370. struct dst_entry *dst = NULL;
  371. union sctp_addr *daddr = &t->ipaddr;
  372. union sctp_addr dst_saddr;
  373. u8 tos = READ_ONCE(inet_sk(sk)->tos);
  374. if (t->dscp & SCTP_DSCP_SET_MASK)
  375. tos = t->dscp & SCTP_DSCP_VAL_MASK;
  376. memset(&_fl, 0x0, sizeof(_fl));
  377. fl4->daddr = daddr->v4.sin_addr.s_addr;
  378. fl4->fl4_dport = daddr->v4.sin_port;
  379. fl4->flowi4_proto = IPPROTO_SCTP;
  380. if (asoc) {
  381. fl4->flowi4_tos = tos & INET_DSCP_MASK;
  382. fl4->flowi4_scope = ip_sock_rt_scope(asoc->base.sk);
  383. fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
  384. fl4->fl4_sport = htons(asoc->base.bind_addr.port);
  385. }
  386. if (saddr) {
  387. fl4->saddr = saddr->v4.sin_addr.s_addr;
  388. if (!fl4->fl4_sport)
  389. fl4->fl4_sport = saddr->v4.sin_port;
  390. }
  391. pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
  392. &fl4->saddr);
  393. rt = ip_route_output_key(sock_net(sk), fl4);
  394. if (!IS_ERR(rt)) {
  395. dst = &rt->dst;
  396. t->dst = dst;
  397. memcpy(fl, &_fl, sizeof(_fl));
  398. }
  399. /* If there is no association or if a source address is passed, no
  400. * more validation is required.
  401. */
  402. if (!asoc || saddr)
  403. goto out;
  404. bp = &asoc->base.bind_addr;
  405. if (dst) {
  406. /* Walk through the bind address list and look for a bind
  407. * address that matches the source address of the returned dst.
  408. */
  409. sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
  410. rcu_read_lock();
  411. list_for_each_entry_rcu(laddr, &bp->address_list, list) {
  412. if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) ||
  413. (laddr->state != SCTP_ADDR_SRC &&
  414. !asoc->src_out_of_asoc_ok))
  415. continue;
  416. if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
  417. goto out_unlock;
  418. }
  419. rcu_read_unlock();
  420. /* None of the bound addresses match the source address of the
  421. * dst. So release it.
  422. */
  423. dst_release(dst);
  424. dst = NULL;
  425. }
  426. /* Walk through the bind address list and try to get a dst that
  427. * matches a bind address as the source address.
  428. */
  429. rcu_read_lock();
  430. list_for_each_entry_rcu(laddr, &bp->address_list, list) {
  431. struct net_device *odev;
  432. if (!laddr->valid)
  433. continue;
  434. if (laddr->state != SCTP_ADDR_SRC ||
  435. AF_INET != laddr->a.sa.sa_family)
  436. continue;
  437. fl4->fl4_sport = laddr->a.v4.sin_port;
  438. flowi4_update_output(fl4, asoc->base.sk->sk_bound_dev_if,
  439. daddr->v4.sin_addr.s_addr,
  440. laddr->a.v4.sin_addr.s_addr);
  441. rt = ip_route_output_key(sock_net(sk), fl4);
  442. if (IS_ERR(rt))
  443. continue;
  444. /* Ensure the src address belongs to the output
  445. * interface.
  446. */
  447. odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
  448. false);
  449. if (!odev || odev->ifindex != fl4->flowi4_oif) {
  450. if (!dst) {
  451. dst = &rt->dst;
  452. t->dst = dst;
  453. memcpy(fl, &_fl, sizeof(_fl));
  454. } else {
  455. dst_release(&rt->dst);
  456. }
  457. continue;
  458. }
  459. dst_release(dst);
  460. dst = &rt->dst;
  461. t->dst = dst;
  462. memcpy(fl, &_fl, sizeof(_fl));
  463. break;
  464. }
  465. out_unlock:
  466. rcu_read_unlock();
  467. out:
  468. if (dst) {
  469. pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
  470. &fl->u.ip4.daddr, &fl->u.ip4.saddr);
  471. } else {
  472. t->dst = NULL;
  473. pr_debug("no route\n");
  474. }
  475. }
  476. /* For v4, the source address is cached in the route entry(dst). So no need
  477. * to cache it separately and hence this is an empty routine.
  478. */
  479. static void sctp_v4_get_saddr(struct sctp_sock *sk,
  480. struct sctp_transport *t,
  481. struct flowi *fl)
  482. {
  483. union sctp_addr *saddr = &t->saddr;
  484. struct rtable *rt = dst_rtable(t->dst);
  485. if (rt) {
  486. saddr->v4.sin_family = AF_INET;
  487. saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr;
  488. }
  489. }
  490. /* What interface did this skb arrive on? */
  491. static int sctp_v4_skb_iif(const struct sk_buff *skb)
  492. {
  493. return inet_iif(skb);
  494. }
  495. static int sctp_v4_skb_sdif(const struct sk_buff *skb)
  496. {
  497. return inet_sdif(skb);
  498. }
  499. /* Was this packet marked by Explicit Congestion Notification? */
  500. static int sctp_v4_is_ce(const struct sk_buff *skb)
  501. {
  502. return INET_ECN_is_ce(ip_hdr(skb)->tos);
  503. }
  504. /* Create and initialize a new sk for the socket returned by accept(). */
  505. static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
  506. struct sctp_association *asoc,
  507. bool kern)
  508. {
  509. struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
  510. sk->sk_prot, kern);
  511. struct inet_sock *newinet;
  512. if (!newsk)
  513. goto out;
  514. sock_init_data(NULL, newsk);
  515. sctp_copy_sock(newsk, sk, asoc);
  516. sock_reset_flag(newsk, SOCK_ZAPPED);
  517. sctp_v4_copy_ip_options(sk, newsk);
  518. newinet = inet_sk(newsk);
  519. newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
  520. if (newsk->sk_prot->init(newsk)) {
  521. sk_common_release(newsk);
  522. newsk = NULL;
  523. }
  524. out:
  525. return newsk;
  526. }
  527. static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
  528. {
  529. /* No address mapping for V4 sockets */
  530. memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
  531. return sizeof(struct sockaddr_in);
  532. }
  533. /* Dump the v4 addr to the seq file. */
  534. static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
  535. {
  536. seq_printf(seq, "%pI4 ", &addr->v4.sin_addr);
  537. }
  538. static void sctp_v4_ecn_capable(struct sock *sk)
  539. {
  540. INET_ECN_xmit(sk);
  541. }
  542. static void sctp_addr_wq_timeout_handler(struct timer_list *t)
  543. {
  544. struct net *net = from_timer(net, t, sctp.addr_wq_timer);
  545. struct sctp_sockaddr_entry *addrw, *temp;
  546. struct sctp_sock *sp;
  547. spin_lock_bh(&net->sctp.addr_wq_lock);
  548. list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
  549. pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at "
  550. "entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa,
  551. addrw->state, addrw);
  552. #if IS_ENABLED(CONFIG_IPV6)
  553. /* Now we send an ASCONF for each association */
  554. /* Note. we currently don't handle link local IPv6 addressees */
  555. if (addrw->a.sa.sa_family == AF_INET6) {
  556. struct in6_addr *in6;
  557. if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
  558. IPV6_ADDR_LINKLOCAL)
  559. goto free_next;
  560. in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
  561. if (ipv6_chk_addr(net, in6, NULL, 0) == 0 &&
  562. addrw->state == SCTP_ADDR_NEW) {
  563. unsigned long timeo_val;
  564. pr_debug("%s: this is on DAD, trying %d sec "
  565. "later\n", __func__,
  566. SCTP_ADDRESS_TICK_DELAY);
  567. timeo_val = jiffies;
  568. timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
  569. mod_timer(&net->sctp.addr_wq_timer, timeo_val);
  570. break;
  571. }
  572. }
  573. #endif
  574. list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) {
  575. struct sock *sk;
  576. sk = sctp_opt2sk(sp);
  577. /* ignore bound-specific endpoints */
  578. if (!sctp_is_ep_boundall(sk))
  579. continue;
  580. bh_lock_sock(sk);
  581. if (sctp_asconf_mgmt(sp, addrw) < 0)
  582. pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
  583. bh_unlock_sock(sk);
  584. }
  585. #if IS_ENABLED(CONFIG_IPV6)
  586. free_next:
  587. #endif
  588. list_del(&addrw->list);
  589. kfree(addrw);
  590. }
  591. spin_unlock_bh(&net->sctp.addr_wq_lock);
  592. }
  593. static void sctp_free_addr_wq(struct net *net)
  594. {
  595. struct sctp_sockaddr_entry *addrw;
  596. struct sctp_sockaddr_entry *temp;
  597. spin_lock_bh(&net->sctp.addr_wq_lock);
  598. del_timer(&net->sctp.addr_wq_timer);
  599. list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) {
  600. list_del(&addrw->list);
  601. kfree(addrw);
  602. }
  603. spin_unlock_bh(&net->sctp.addr_wq_lock);
  604. }
  605. /* lookup the entry for the same address in the addr_waitq
  606. * sctp_addr_wq MUST be locked
  607. */
  608. static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net,
  609. struct sctp_sockaddr_entry *addr)
  610. {
  611. struct sctp_sockaddr_entry *addrw;
  612. list_for_each_entry(addrw, &net->sctp.addr_waitq, list) {
  613. if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
  614. continue;
  615. if (addrw->a.sa.sa_family == AF_INET) {
  616. if (addrw->a.v4.sin_addr.s_addr ==
  617. addr->a.v4.sin_addr.s_addr)
  618. return addrw;
  619. } else if (addrw->a.sa.sa_family == AF_INET6) {
  620. if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
  621. &addr->a.v6.sin6_addr))
  622. return addrw;
  623. }
  624. }
  625. return NULL;
  626. }
  627. void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd)
  628. {
  629. struct sctp_sockaddr_entry *addrw;
  630. unsigned long timeo_val;
  631. /* first, we check if an opposite message already exist in the queue.
  632. * If we found such message, it is removed.
  633. * This operation is a bit stupid, but the DHCP client attaches the
  634. * new address after a couple of addition and deletion of that address
  635. */
  636. spin_lock_bh(&net->sctp.addr_wq_lock);
  637. /* Offsets existing events in addr_wq */
  638. addrw = sctp_addr_wq_lookup(net, addr);
  639. if (addrw) {
  640. if (addrw->state != cmd) {
  641. pr_debug("%s: offsets existing entry for %d, addr:%pISc "
  642. "in wq:%p\n", __func__, addrw->state, &addrw->a.sa,
  643. &net->sctp.addr_waitq);
  644. list_del(&addrw->list);
  645. kfree(addrw);
  646. }
  647. spin_unlock_bh(&net->sctp.addr_wq_lock);
  648. return;
  649. }
  650. /* OK, we have to add the new address to the wait queue */
  651. addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
  652. if (addrw == NULL) {
  653. spin_unlock_bh(&net->sctp.addr_wq_lock);
  654. return;
  655. }
  656. addrw->state = cmd;
  657. list_add_tail(&addrw->list, &net->sctp.addr_waitq);
  658. pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n",
  659. __func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq);
  660. if (!timer_pending(&net->sctp.addr_wq_timer)) {
  661. timeo_val = jiffies;
  662. timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
  663. mod_timer(&net->sctp.addr_wq_timer, timeo_val);
  664. }
  665. spin_unlock_bh(&net->sctp.addr_wq_lock);
  666. }
  667. /* Event handler for inet address addition/deletion events.
  668. * The sctp_local_addr_list needs to be protocted by a spin lock since
  669. * multiple notifiers (say IPv4 and IPv6) may be running at the same
  670. * time and thus corrupt the list.
  671. * The reader side is protected with RCU.
  672. */
  673. static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
  674. void *ptr)
  675. {
  676. struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
  677. struct sctp_sockaddr_entry *addr = NULL;
  678. struct sctp_sockaddr_entry *temp;
  679. struct net *net = dev_net(ifa->ifa_dev->dev);
  680. int found = 0;
  681. switch (ev) {
  682. case NETDEV_UP:
  683. addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
  684. if (addr) {
  685. addr->a.v4.sin_family = AF_INET;
  686. addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
  687. addr->valid = 1;
  688. spin_lock_bh(&net->sctp.local_addr_lock);
  689. list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list);
  690. sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW);
  691. spin_unlock_bh(&net->sctp.local_addr_lock);
  692. }
  693. break;
  694. case NETDEV_DOWN:
  695. spin_lock_bh(&net->sctp.local_addr_lock);
  696. list_for_each_entry_safe(addr, temp,
  697. &net->sctp.local_addr_list, list) {
  698. if (addr->a.sa.sa_family == AF_INET &&
  699. addr->a.v4.sin_addr.s_addr ==
  700. ifa->ifa_local) {
  701. sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL);
  702. found = 1;
  703. addr->valid = 0;
  704. list_del_rcu(&addr->list);
  705. break;
  706. }
  707. }
  708. spin_unlock_bh(&net->sctp.local_addr_lock);
  709. if (found)
  710. kfree_rcu(addr, rcu);
  711. break;
  712. }
  713. return NOTIFY_DONE;
  714. }
  715. /*
  716. * Initialize the control inode/socket with a control endpoint data
  717. * structure. This endpoint is reserved exclusively for the OOTB processing.
  718. */
  719. static int sctp_ctl_sock_init(struct net *net)
  720. {
  721. int err;
  722. sa_family_t family = PF_INET;
  723. if (sctp_get_pf_specific(PF_INET6))
  724. family = PF_INET6;
  725. err = inet_ctl_sock_create(&net->sctp.ctl_sock, family,
  726. SOCK_SEQPACKET, IPPROTO_SCTP, net);
  727. /* If IPv6 socket could not be created, try the IPv4 socket */
  728. if (err < 0 && family == PF_INET6)
  729. err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET,
  730. SOCK_SEQPACKET, IPPROTO_SCTP,
  731. net);
  732. if (err < 0) {
  733. pr_err("Failed to create the SCTP control socket\n");
  734. return err;
  735. }
  736. return 0;
  737. }
  738. static int sctp_udp_rcv(struct sock *sk, struct sk_buff *skb)
  739. {
  740. SCTP_INPUT_CB(skb)->encap_port = udp_hdr(skb)->source;
  741. skb_set_transport_header(skb, sizeof(struct udphdr));
  742. sctp_rcv(skb);
  743. return 0;
  744. }
  745. int sctp_udp_sock_start(struct net *net)
  746. {
  747. struct udp_tunnel_sock_cfg tuncfg = {NULL};
  748. struct udp_port_cfg udp_conf = {0};
  749. struct socket *sock;
  750. int err;
  751. udp_conf.family = AF_INET;
  752. udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
  753. udp_conf.local_udp_port = htons(net->sctp.udp_port);
  754. err = udp_sock_create(net, &udp_conf, &sock);
  755. if (err) {
  756. pr_err("Failed to create the SCTP UDP tunneling v4 sock\n");
  757. return err;
  758. }
  759. tuncfg.encap_type = 1;
  760. tuncfg.encap_rcv = sctp_udp_rcv;
  761. tuncfg.encap_err_lookup = sctp_udp_v4_err;
  762. setup_udp_tunnel_sock(net, sock, &tuncfg);
  763. net->sctp.udp4_sock = sock->sk;
  764. #if IS_ENABLED(CONFIG_IPV6)
  765. memset(&udp_conf, 0, sizeof(udp_conf));
  766. udp_conf.family = AF_INET6;
  767. udp_conf.local_ip6 = in6addr_any;
  768. udp_conf.local_udp_port = htons(net->sctp.udp_port);
  769. udp_conf.use_udp6_rx_checksums = true;
  770. udp_conf.ipv6_v6only = true;
  771. err = udp_sock_create(net, &udp_conf, &sock);
  772. if (err) {
  773. pr_err("Failed to create the SCTP UDP tunneling v6 sock\n");
  774. udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
  775. net->sctp.udp4_sock = NULL;
  776. return err;
  777. }
  778. tuncfg.encap_type = 1;
  779. tuncfg.encap_rcv = sctp_udp_rcv;
  780. tuncfg.encap_err_lookup = sctp_udp_v6_err;
  781. setup_udp_tunnel_sock(net, sock, &tuncfg);
  782. net->sctp.udp6_sock = sock->sk;
  783. #endif
  784. return 0;
  785. }
  786. void sctp_udp_sock_stop(struct net *net)
  787. {
  788. if (net->sctp.udp4_sock) {
  789. udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
  790. net->sctp.udp4_sock = NULL;
  791. }
  792. if (net->sctp.udp6_sock) {
  793. udp_tunnel_sock_release(net->sctp.udp6_sock->sk_socket);
  794. net->sctp.udp6_sock = NULL;
  795. }
  796. }
  797. /* Register address family specific functions. */
  798. int sctp_register_af(struct sctp_af *af)
  799. {
  800. switch (af->sa_family) {
  801. case AF_INET:
  802. if (sctp_af_v4_specific)
  803. return 0;
  804. sctp_af_v4_specific = af;
  805. break;
  806. case AF_INET6:
  807. if (sctp_af_v6_specific)
  808. return 0;
  809. sctp_af_v6_specific = af;
  810. break;
  811. default:
  812. return 0;
  813. }
  814. INIT_LIST_HEAD(&af->list);
  815. list_add_tail(&af->list, &sctp_address_families);
  816. return 1;
  817. }
  818. /* Get the table of functions for manipulating a particular address
  819. * family.
  820. */
  821. struct sctp_af *sctp_get_af_specific(sa_family_t family)
  822. {
  823. switch (family) {
  824. case AF_INET:
  825. return sctp_af_v4_specific;
  826. case AF_INET6:
  827. return sctp_af_v6_specific;
  828. default:
  829. return NULL;
  830. }
  831. }
  832. /* Common code to initialize a AF_INET msg_name. */
  833. static void sctp_inet_msgname(char *msgname, int *addr_len)
  834. {
  835. struct sockaddr_in *sin;
  836. sin = (struct sockaddr_in *)msgname;
  837. *addr_len = sizeof(struct sockaddr_in);
  838. sin->sin_family = AF_INET;
  839. memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
  840. }
  841. /* Copy the primary address of the peer primary address as the msg_name. */
  842. static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname,
  843. int *addr_len)
  844. {
  845. struct sockaddr_in *sin, *sinfrom;
  846. if (msgname) {
  847. struct sctp_association *asoc;
  848. asoc = event->asoc;
  849. sctp_inet_msgname(msgname, addr_len);
  850. sin = (struct sockaddr_in *)msgname;
  851. sinfrom = &asoc->peer.primary_addr.v4;
  852. sin->sin_port = htons(asoc->peer.port);
  853. sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr;
  854. }
  855. }
  856. /* Initialize and copy out a msgname from an inbound skb. */
  857. static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
  858. {
  859. if (msgname) {
  860. struct sctphdr *sh = sctp_hdr(skb);
  861. struct sockaddr_in *sin = (struct sockaddr_in *)msgname;
  862. sctp_inet_msgname(msgname, len);
  863. sin->sin_port = sh->source;
  864. sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
  865. }
  866. }
  867. /* Do we support this AF? */
  868. static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp)
  869. {
  870. /* PF_INET only supports AF_INET addresses. */
  871. return AF_INET == family;
  872. }
  873. /* Address matching with wildcards allowed. */
  874. static int sctp_inet_cmp_addr(const union sctp_addr *addr1,
  875. const union sctp_addr *addr2,
  876. struct sctp_sock *opt)
  877. {
  878. /* PF_INET only supports AF_INET addresses. */
  879. if (addr1->sa.sa_family != addr2->sa.sa_family)
  880. return 0;
  881. if (htonl(INADDR_ANY) == addr1->v4.sin_addr.s_addr ||
  882. htonl(INADDR_ANY) == addr2->v4.sin_addr.s_addr)
  883. return 1;
  884. if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr)
  885. return 1;
  886. return 0;
  887. }
  888. /* Verify that provided sockaddr looks bindable. Common verification has
  889. * already been taken care of.
  890. */
  891. static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
  892. {
  893. return sctp_v4_available(addr, opt);
  894. }
  895. /* Verify that sockaddr looks sendable. Common verification has already
  896. * been taken care of.
  897. */
  898. static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
  899. {
  900. return 1;
  901. }
  902. /* Fill in Supported Address Type information for INIT and INIT-ACK
  903. * chunks. Returns number of addresses supported.
  904. */
  905. static int sctp_inet_supported_addrs(const struct sctp_sock *opt,
  906. __be16 *types)
  907. {
  908. types[0] = SCTP_PARAM_IPV4_ADDRESS;
  909. return 1;
  910. }
  911. /* Wrapper routine that calls the ip transmit routine. */
  912. static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
  913. {
  914. struct dst_entry *dst = dst_clone(t->dst);
  915. struct flowi4 *fl4 = &t->fl.u.ip4;
  916. struct sock *sk = skb->sk;
  917. struct inet_sock *inet = inet_sk(sk);
  918. __u8 dscp = READ_ONCE(inet->tos);
  919. __be16 df = 0;
  920. pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb,
  921. skb->len, &fl4->saddr, &fl4->daddr);
  922. if (t->dscp & SCTP_DSCP_SET_MASK)
  923. dscp = t->dscp & SCTP_DSCP_VAL_MASK;
  924. inet->pmtudisc = t->param_flags & SPP_PMTUD_ENABLE ? IP_PMTUDISC_DO
  925. : IP_PMTUDISC_DONT;
  926. SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
  927. if (!t->encap_port || !sctp_sk(sk)->udp_port) {
  928. skb_dst_set(skb, dst);
  929. return __ip_queue_xmit(sk, skb, &t->fl, dscp);
  930. }
  931. if (skb_is_gso(skb))
  932. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
  933. if (ip_dont_fragment(sk, dst) && !skb->ignore_df)
  934. df = htons(IP_DF);
  935. skb->encapsulation = 1;
  936. skb_reset_inner_mac_header(skb);
  937. skb_reset_inner_transport_header(skb);
  938. skb_set_inner_ipproto(skb, IPPROTO_SCTP);
  939. udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
  940. fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
  941. sctp_sk(sk)->udp_port, t->encap_port, false, false);
  942. return 0;
  943. }
  944. static struct sctp_af sctp_af_inet;
  945. static struct sctp_pf sctp_pf_inet = {
  946. .event_msgname = sctp_inet_event_msgname,
  947. .skb_msgname = sctp_inet_skb_msgname,
  948. .af_supported = sctp_inet_af_supported,
  949. .cmp_addr = sctp_inet_cmp_addr,
  950. .bind_verify = sctp_inet_bind_verify,
  951. .send_verify = sctp_inet_send_verify,
  952. .supported_addrs = sctp_inet_supported_addrs,
  953. .create_accept_sk = sctp_v4_create_accept_sk,
  954. .addr_to_user = sctp_v4_addr_to_user,
  955. .to_sk_saddr = sctp_v4_to_sk_saddr,
  956. .to_sk_daddr = sctp_v4_to_sk_daddr,
  957. .copy_ip_options = sctp_v4_copy_ip_options,
  958. .af = &sctp_af_inet
  959. };
  960. /* Notifier for inetaddr addition/deletion events. */
  961. static struct notifier_block sctp_inetaddr_notifier = {
  962. .notifier_call = sctp_inetaddr_event,
  963. };
  964. /* Socket operations. */
  965. static const struct proto_ops inet_seqpacket_ops = {
  966. .family = PF_INET,
  967. .owner = THIS_MODULE,
  968. .release = inet_release, /* Needs to be wrapped... */
  969. .bind = inet_bind,
  970. .connect = sctp_inet_connect,
  971. .socketpair = sock_no_socketpair,
  972. .accept = inet_accept,
  973. .getname = inet_getname, /* Semantics are different. */
  974. .poll = sctp_poll,
  975. .ioctl = inet_ioctl,
  976. .gettstamp = sock_gettstamp,
  977. .listen = sctp_inet_listen,
  978. .shutdown = inet_shutdown, /* Looks harmless. */
  979. .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem */
  980. .getsockopt = sock_common_getsockopt,
  981. .sendmsg = inet_sendmsg,
  982. .recvmsg = inet_recvmsg,
  983. .mmap = sock_no_mmap,
  984. };
  985. /* Registration with AF_INET family. */
  986. static struct inet_protosw sctp_seqpacket_protosw = {
  987. .type = SOCK_SEQPACKET,
  988. .protocol = IPPROTO_SCTP,
  989. .prot = &sctp_prot,
  990. .ops = &inet_seqpacket_ops,
  991. .flags = SCTP_PROTOSW_FLAG
  992. };
  993. static struct inet_protosw sctp_stream_protosw = {
  994. .type = SOCK_STREAM,
  995. .protocol = IPPROTO_SCTP,
  996. .prot = &sctp_prot,
  997. .ops = &inet_seqpacket_ops,
  998. .flags = SCTP_PROTOSW_FLAG
  999. };
  1000. static int sctp4_rcv(struct sk_buff *skb)
  1001. {
  1002. SCTP_INPUT_CB(skb)->encap_port = 0;
  1003. return sctp_rcv(skb);
  1004. }
  1005. /* Register with IP layer. */
  1006. static const struct net_protocol sctp_protocol = {
  1007. .handler = sctp4_rcv,
  1008. .err_handler = sctp_v4_err,
  1009. .no_policy = 1,
  1010. .icmp_strict_tag_validation = 1,
  1011. };
  1012. /* IPv4 address related functions. */
  1013. static struct sctp_af sctp_af_inet = {
  1014. .sa_family = AF_INET,
  1015. .sctp_xmit = sctp_v4_xmit,
  1016. .setsockopt = ip_setsockopt,
  1017. .getsockopt = ip_getsockopt,
  1018. .get_dst = sctp_v4_get_dst,
  1019. .get_saddr = sctp_v4_get_saddr,
  1020. .copy_addrlist = sctp_v4_copy_addrlist,
  1021. .from_skb = sctp_v4_from_skb,
  1022. .from_sk = sctp_v4_from_sk,
  1023. .from_addr_param = sctp_v4_from_addr_param,
  1024. .to_addr_param = sctp_v4_to_addr_param,
  1025. .cmp_addr = sctp_v4_cmp_addr,
  1026. .addr_valid = sctp_v4_addr_valid,
  1027. .inaddr_any = sctp_v4_inaddr_any,
  1028. .is_any = sctp_v4_is_any,
  1029. .available = sctp_v4_available,
  1030. .scope = sctp_v4_scope,
  1031. .skb_iif = sctp_v4_skb_iif,
  1032. .skb_sdif = sctp_v4_skb_sdif,
  1033. .is_ce = sctp_v4_is_ce,
  1034. .seq_dump_addr = sctp_v4_seq_dump_addr,
  1035. .ecn_capable = sctp_v4_ecn_capable,
  1036. .net_header_len = sizeof(struct iphdr),
  1037. .sockaddr_len = sizeof(struct sockaddr_in),
  1038. .ip_options_len = sctp_v4_ip_options_len,
  1039. };
  1040. struct sctp_pf *sctp_get_pf_specific(sa_family_t family)
  1041. {
  1042. switch (family) {
  1043. case PF_INET:
  1044. return sctp_pf_inet_specific;
  1045. case PF_INET6:
  1046. return sctp_pf_inet6_specific;
  1047. default:
  1048. return NULL;
  1049. }
  1050. }
  1051. /* Register the PF specific function table. */
  1052. int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
  1053. {
  1054. switch (family) {
  1055. case PF_INET:
  1056. if (sctp_pf_inet_specific)
  1057. return 0;
  1058. sctp_pf_inet_specific = pf;
  1059. break;
  1060. case PF_INET6:
  1061. if (sctp_pf_inet6_specific)
  1062. return 0;
  1063. sctp_pf_inet6_specific = pf;
  1064. break;
  1065. default:
  1066. return 0;
  1067. }
  1068. return 1;
  1069. }
  1070. static inline int init_sctp_mibs(struct net *net)
  1071. {
  1072. net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib);
  1073. if (!net->sctp.sctp_statistics)
  1074. return -ENOMEM;
  1075. return 0;
  1076. }
  1077. static inline void cleanup_sctp_mibs(struct net *net)
  1078. {
  1079. free_percpu(net->sctp.sctp_statistics);
  1080. }
  1081. static void sctp_v4_pf_init(void)
  1082. {
  1083. /* Initialize the SCTP specific PF functions. */
  1084. sctp_register_pf(&sctp_pf_inet, PF_INET);
  1085. sctp_register_af(&sctp_af_inet);
  1086. }
  1087. static void sctp_v4_pf_exit(void)
  1088. {
  1089. list_del(&sctp_af_inet.list);
  1090. }
  1091. static int sctp_v4_protosw_init(void)
  1092. {
  1093. int rc;
  1094. rc = proto_register(&sctp_prot, 1);
  1095. if (rc)
  1096. return rc;
  1097. /* Register SCTP(UDP and TCP style) with socket layer. */
  1098. inet_register_protosw(&sctp_seqpacket_protosw);
  1099. inet_register_protosw(&sctp_stream_protosw);
  1100. return 0;
  1101. }
  1102. static void sctp_v4_protosw_exit(void)
  1103. {
  1104. inet_unregister_protosw(&sctp_stream_protosw);
  1105. inet_unregister_protosw(&sctp_seqpacket_protosw);
  1106. proto_unregister(&sctp_prot);
  1107. }
  1108. static int sctp_v4_add_protocol(void)
  1109. {
  1110. /* Register notifier for inet address additions/deletions. */
  1111. register_inetaddr_notifier(&sctp_inetaddr_notifier);
  1112. /* Register SCTP with inet layer. */
  1113. if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0)
  1114. return -EAGAIN;
  1115. return 0;
  1116. }
  1117. static void sctp_v4_del_protocol(void)
  1118. {
  1119. inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
  1120. unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
  1121. }
  1122. static int __net_init sctp_defaults_init(struct net *net)
  1123. {
  1124. int status;
  1125. /*
  1126. * 14. Suggested SCTP Protocol Parameter Values
  1127. */
  1128. /* The following protocol parameters are RECOMMENDED: */
  1129. /* RTO.Initial - 3 seconds */
  1130. net->sctp.rto_initial = SCTP_RTO_INITIAL;
  1131. /* RTO.Min - 1 second */
  1132. net->sctp.rto_min = SCTP_RTO_MIN;
  1133. /* RTO.Max - 60 seconds */
  1134. net->sctp.rto_max = SCTP_RTO_MAX;
  1135. /* RTO.Alpha - 1/8 */
  1136. net->sctp.rto_alpha = SCTP_RTO_ALPHA;
  1137. /* RTO.Beta - 1/4 */
  1138. net->sctp.rto_beta = SCTP_RTO_BETA;
  1139. /* Valid.Cookie.Life - 60 seconds */
  1140. net->sctp.valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE;
  1141. /* Whether Cookie Preservative is enabled(1) or not(0) */
  1142. net->sctp.cookie_preserve_enable = 1;
  1143. /* Default sctp sockets to use md5 as their hmac alg */
  1144. #if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5)
  1145. net->sctp.sctp_hmac_alg = "md5";
  1146. #elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1)
  1147. net->sctp.sctp_hmac_alg = "sha1";
  1148. #else
  1149. net->sctp.sctp_hmac_alg = NULL;
  1150. #endif
  1151. /* Max.Burst - 4 */
  1152. net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST;
  1153. /* Disable of Primary Path Switchover by default */
  1154. net->sctp.ps_retrans = SCTP_PS_RETRANS_MAX;
  1155. /* Enable pf state by default */
  1156. net->sctp.pf_enable = 1;
  1157. /* Ignore pf exposure feature by default */
  1158. net->sctp.pf_expose = SCTP_PF_EXPOSE_UNSET;
  1159. /* Association.Max.Retrans - 10 attempts
  1160. * Path.Max.Retrans - 5 attempts (per destination address)
  1161. * Max.Init.Retransmits - 8 attempts
  1162. */
  1163. net->sctp.max_retrans_association = 10;
  1164. net->sctp.max_retrans_path = 5;
  1165. net->sctp.max_retrans_init = 8;
  1166. /* Sendbuffer growth - do per-socket accounting */
  1167. net->sctp.sndbuf_policy = 0;
  1168. /* Rcvbuffer growth - do per-socket accounting */
  1169. net->sctp.rcvbuf_policy = 0;
  1170. /* HB.interval - 30 seconds */
  1171. net->sctp.hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
  1172. /* delayed SACK timeout */
  1173. net->sctp.sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK;
  1174. /* Disable ADDIP by default. */
  1175. net->sctp.addip_enable = 0;
  1176. net->sctp.addip_noauth = 0;
  1177. net->sctp.default_auto_asconf = 0;
  1178. /* Enable PR-SCTP by default. */
  1179. net->sctp.prsctp_enable = 1;
  1180. /* Disable RECONF by default. */
  1181. net->sctp.reconf_enable = 0;
  1182. /* Disable AUTH by default. */
  1183. net->sctp.auth_enable = 0;
  1184. /* Enable ECN by default. */
  1185. net->sctp.ecn_enable = 1;
  1186. /* Set UDP tunneling listening port to 0 by default */
  1187. net->sctp.udp_port = 0;
  1188. /* Set remote encap port to 0 by default */
  1189. net->sctp.encap_port = 0;
  1190. /* Set SCOPE policy to enabled */
  1191. net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE;
  1192. /* Set the default rwnd update threshold */
  1193. net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
  1194. /* Initialize maximum autoclose timeout. */
  1195. net->sctp.max_autoclose = INT_MAX / HZ;
  1196. #ifdef CONFIG_NET_L3_MASTER_DEV
  1197. net->sctp.l3mdev_accept = 1;
  1198. #endif
  1199. status = sctp_sysctl_net_register(net);
  1200. if (status)
  1201. goto err_sysctl_register;
  1202. /* Allocate and initialise sctp mibs. */
  1203. status = init_sctp_mibs(net);
  1204. if (status)
  1205. goto err_init_mibs;
  1206. #ifdef CONFIG_PROC_FS
  1207. /* Initialize proc fs directory. */
  1208. status = sctp_proc_init(net);
  1209. if (status)
  1210. goto err_init_proc;
  1211. #endif
  1212. sctp_dbg_objcnt_init(net);
  1213. /* Initialize the local address list. */
  1214. INIT_LIST_HEAD(&net->sctp.local_addr_list);
  1215. spin_lock_init(&net->sctp.local_addr_lock);
  1216. sctp_get_local_addr_list(net);
  1217. /* Initialize the address event list */
  1218. INIT_LIST_HEAD(&net->sctp.addr_waitq);
  1219. INIT_LIST_HEAD(&net->sctp.auto_asconf_splist);
  1220. spin_lock_init(&net->sctp.addr_wq_lock);
  1221. net->sctp.addr_wq_timer.expires = 0;
  1222. timer_setup(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
  1223. return 0;
  1224. #ifdef CONFIG_PROC_FS
  1225. err_init_proc:
  1226. cleanup_sctp_mibs(net);
  1227. #endif
  1228. err_init_mibs:
  1229. sctp_sysctl_net_unregister(net);
  1230. err_sysctl_register:
  1231. return status;
  1232. }
  1233. static void __net_exit sctp_defaults_exit(struct net *net)
  1234. {
  1235. /* Free the local address list */
  1236. sctp_free_addr_wq(net);
  1237. sctp_free_local_addr_list(net);
  1238. #ifdef CONFIG_PROC_FS
  1239. remove_proc_subtree("sctp", net->proc_net);
  1240. net->sctp.proc_net_sctp = NULL;
  1241. #endif
  1242. cleanup_sctp_mibs(net);
  1243. sctp_sysctl_net_unregister(net);
  1244. }
  1245. static struct pernet_operations sctp_defaults_ops = {
  1246. .init = sctp_defaults_init,
  1247. .exit = sctp_defaults_exit,
  1248. };
  1249. static int __net_init sctp_ctrlsock_init(struct net *net)
  1250. {
  1251. int status;
  1252. /* Initialize the control inode/socket for handling OOTB packets. */
  1253. status = sctp_ctl_sock_init(net);
  1254. if (status)
  1255. pr_err("Failed to initialize the SCTP control sock\n");
  1256. return status;
  1257. }
  1258. static void __net_exit sctp_ctrlsock_exit(struct net *net)
  1259. {
  1260. /* Free the control endpoint. */
  1261. inet_ctl_sock_destroy(net->sctp.ctl_sock);
  1262. }
  1263. static struct pernet_operations sctp_ctrlsock_ops = {
  1264. .init = sctp_ctrlsock_init,
  1265. .exit = sctp_ctrlsock_exit,
  1266. };
  1267. /* Initialize the universe into something sensible. */
  1268. static __init int sctp_init(void)
  1269. {
  1270. unsigned long nr_pages = totalram_pages();
  1271. unsigned long limit;
  1272. unsigned long goal;
  1273. int max_entry_order;
  1274. int num_entries;
  1275. int max_share;
  1276. int status;
  1277. int order;
  1278. int i;
  1279. sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
  1280. /* Allocate bind_bucket and chunk caches. */
  1281. status = -ENOBUFS;
  1282. sctp_bucket_cachep = KMEM_CACHE(sctp_bind_bucket, SLAB_HWCACHE_ALIGN);
  1283. if (!sctp_bucket_cachep)
  1284. goto out;
  1285. sctp_chunk_cachep = KMEM_CACHE(sctp_chunk, SLAB_HWCACHE_ALIGN);
  1286. if (!sctp_chunk_cachep)
  1287. goto err_chunk_cachep;
  1288. status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL);
  1289. if (status)
  1290. goto err_percpu_counter_init;
  1291. /* Implementation specific variables. */
  1292. /* Initialize default stream count setup information. */
  1293. sctp_max_instreams = SCTP_DEFAULT_INSTREAMS;
  1294. sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS;
  1295. /* Initialize handle used for association ids. */
  1296. idr_init(&sctp_assocs_id);
  1297. limit = nr_free_buffer_pages() / 8;
  1298. limit = max(limit, 128UL);
  1299. sysctl_sctp_mem[0] = limit / 4 * 3;
  1300. sysctl_sctp_mem[1] = limit;
  1301. sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2;
  1302. /* Set per-socket limits to no more than 1/128 the pressure threshold*/
  1303. limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7);
  1304. max_share = min(4UL*1024*1024, limit);
  1305. sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */
  1306. sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
  1307. sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
  1308. sysctl_sctp_wmem[0] = PAGE_SIZE;
  1309. sysctl_sctp_wmem[1] = 16*1024;
  1310. sysctl_sctp_wmem[2] = max(64*1024, max_share);
  1311. /* Size and allocate the association hash table.
  1312. * The methodology is similar to that of the tcp hash tables.
  1313. * Though not identical. Start by getting a goal size
  1314. */
  1315. if (nr_pages >= (128 * 1024))
  1316. goal = nr_pages >> (22 - PAGE_SHIFT);
  1317. else
  1318. goal = nr_pages >> (24 - PAGE_SHIFT);
  1319. /* Then compute the page order for said goal */
  1320. order = get_order(goal);
  1321. /* Now compute the required page order for the maximum sized table we
  1322. * want to create
  1323. */
  1324. max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
  1325. sizeof(struct sctp_bind_hashbucket));
  1326. /* Limit the page order by that maximum hash table size */
  1327. order = min(order, max_entry_order);
  1328. /* Allocate and initialize the endpoint hash table. */
  1329. sctp_ep_hashsize = 64;
  1330. sctp_ep_hashtable =
  1331. kmalloc_array(64, sizeof(struct sctp_hashbucket), GFP_KERNEL);
  1332. if (!sctp_ep_hashtable) {
  1333. pr_err("Failed endpoint_hash alloc\n");
  1334. status = -ENOMEM;
  1335. goto err_ehash_alloc;
  1336. }
  1337. for (i = 0; i < sctp_ep_hashsize; i++) {
  1338. rwlock_init(&sctp_ep_hashtable[i].lock);
  1339. INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
  1340. }
  1341. /* Allocate and initialize the SCTP port hash table.
  1342. * Note that order is initalized to start at the max sized
  1343. * table we want to support. If we can't get that many pages
  1344. * reduce the order and try again
  1345. */
  1346. do {
  1347. sctp_port_hashtable = (struct sctp_bind_hashbucket *)
  1348. __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
  1349. } while (!sctp_port_hashtable && --order > 0);
  1350. if (!sctp_port_hashtable) {
  1351. pr_err("Failed bind hash alloc\n");
  1352. status = -ENOMEM;
  1353. goto err_bhash_alloc;
  1354. }
  1355. /* Now compute the number of entries that will fit in the
  1356. * port hash space we allocated
  1357. */
  1358. num_entries = (1UL << order) * PAGE_SIZE /
  1359. sizeof(struct sctp_bind_hashbucket);
  1360. /* And finish by rounding it down to the nearest power of two.
  1361. * This wastes some memory of course, but it's needed because
  1362. * the hash function operates based on the assumption that
  1363. * the number of entries is a power of two.
  1364. */
  1365. sctp_port_hashsize = rounddown_pow_of_two(num_entries);
  1366. for (i = 0; i < sctp_port_hashsize; i++) {
  1367. spin_lock_init(&sctp_port_hashtable[i].lock);
  1368. INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
  1369. }
  1370. status = sctp_transport_hashtable_init();
  1371. if (status)
  1372. goto err_thash_alloc;
  1373. pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
  1374. num_entries);
  1375. sctp_sysctl_register();
  1376. INIT_LIST_HEAD(&sctp_address_families);
  1377. sctp_v4_pf_init();
  1378. sctp_v6_pf_init();
  1379. sctp_sched_ops_init();
  1380. status = register_pernet_subsys(&sctp_defaults_ops);
  1381. if (status)
  1382. goto err_register_defaults;
  1383. status = sctp_v4_protosw_init();
  1384. if (status)
  1385. goto err_protosw_init;
  1386. status = sctp_v6_protosw_init();
  1387. if (status)
  1388. goto err_v6_protosw_init;
  1389. status = register_pernet_subsys(&sctp_ctrlsock_ops);
  1390. if (status)
  1391. goto err_register_ctrlsock;
  1392. status = sctp_v4_add_protocol();
  1393. if (status)
  1394. goto err_add_protocol;
  1395. /* Register SCTP with inet6 layer. */
  1396. status = sctp_v6_add_protocol();
  1397. if (status)
  1398. goto err_v6_add_protocol;
  1399. if (sctp_offload_init() < 0)
  1400. pr_crit("%s: Cannot add SCTP protocol offload\n", __func__);
  1401. out:
  1402. return status;
  1403. err_v6_add_protocol:
  1404. sctp_v4_del_protocol();
  1405. err_add_protocol:
  1406. unregister_pernet_subsys(&sctp_ctrlsock_ops);
  1407. err_register_ctrlsock:
  1408. sctp_v6_protosw_exit();
  1409. err_v6_protosw_init:
  1410. sctp_v4_protosw_exit();
  1411. err_protosw_init:
  1412. unregister_pernet_subsys(&sctp_defaults_ops);
  1413. err_register_defaults:
  1414. sctp_v4_pf_exit();
  1415. sctp_v6_pf_exit();
  1416. sctp_sysctl_unregister();
  1417. free_pages((unsigned long)sctp_port_hashtable,
  1418. get_order(sctp_port_hashsize *
  1419. sizeof(struct sctp_bind_hashbucket)));
  1420. err_bhash_alloc:
  1421. sctp_transport_hashtable_destroy();
  1422. err_thash_alloc:
  1423. kfree(sctp_ep_hashtable);
  1424. err_ehash_alloc:
  1425. percpu_counter_destroy(&sctp_sockets_allocated);
  1426. err_percpu_counter_init:
  1427. kmem_cache_destroy(sctp_chunk_cachep);
  1428. err_chunk_cachep:
  1429. kmem_cache_destroy(sctp_bucket_cachep);
  1430. goto out;
  1431. }
  1432. /* Exit handler for the SCTP protocol. */
  1433. static __exit void sctp_exit(void)
  1434. {
  1435. /* BUG. This should probably do something useful like clean
  1436. * up all the remaining associations and all that memory.
  1437. */
  1438. /* Unregister with inet6/inet layers. */
  1439. sctp_v6_del_protocol();
  1440. sctp_v4_del_protocol();
  1441. unregister_pernet_subsys(&sctp_ctrlsock_ops);
  1442. /* Free protosw registrations */
  1443. sctp_v6_protosw_exit();
  1444. sctp_v4_protosw_exit();
  1445. unregister_pernet_subsys(&sctp_defaults_ops);
  1446. /* Unregister with socket layer. */
  1447. sctp_v6_pf_exit();
  1448. sctp_v4_pf_exit();
  1449. sctp_sysctl_unregister();
  1450. free_pages((unsigned long)sctp_port_hashtable,
  1451. get_order(sctp_port_hashsize *
  1452. sizeof(struct sctp_bind_hashbucket)));
  1453. kfree(sctp_ep_hashtable);
  1454. sctp_transport_hashtable_destroy();
  1455. percpu_counter_destroy(&sctp_sockets_allocated);
  1456. rcu_barrier(); /* Wait for completion of call_rcu()'s */
  1457. kmem_cache_destroy(sctp_chunk_cachep);
  1458. kmem_cache_destroy(sctp_bucket_cachep);
  1459. }
  1460. module_init(sctp_init);
  1461. module_exit(sctp_exit);
  1462. /*
  1463. * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly.
  1464. */
  1465. MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
  1466. MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
  1467. MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
  1468. MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
  1469. module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
  1470. MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
  1471. MODULE_LICENSE("GPL");