gtp.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
  3. *
  4. * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
  5. * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
  6. *
  7. * Author: Harald Welte <hwelte@sysmocom.de>
  8. * Pablo Neira Ayuso <pablo@netfilter.org>
  9. * Andreas Schultz <aschultz@travelping.com>
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/udp.h>
  15. #include <linux/rculist.h>
  16. #include <linux/jhash.h>
  17. #include <linux/if_tunnel.h>
  18. #include <linux/net.h>
  19. #include <linux/file.h>
  20. #include <linux/gtp.h>
  21. #include <net/net_namespace.h>
  22. #include <net/protocol.h>
  23. #include <net/ip.h>
  24. #include <net/ipv6.h>
  25. #include <net/udp.h>
  26. #include <net/udp_tunnel.h>
  27. #include <net/icmp.h>
  28. #include <net/xfrm.h>
  29. #include <net/genetlink.h>
  30. #include <net/netns/generic.h>
  31. #include <net/gtp.h>
  32. /* An active session for the subscriber. */
  33. struct pdp_ctx {
  34. struct hlist_node hlist_tid;
  35. struct hlist_node hlist_addr;
  36. union {
  37. struct {
  38. u64 tid;
  39. u16 flow;
  40. } v0;
  41. struct {
  42. u32 i_tei;
  43. u32 o_tei;
  44. } v1;
  45. } u;
  46. u8 gtp_version;
  47. u16 af;
  48. union {
  49. struct in_addr addr;
  50. struct in6_addr addr6;
  51. } ms;
  52. union {
  53. struct in_addr addr;
  54. struct in6_addr addr6;
  55. } peer;
  56. struct sock *sk;
  57. struct net_device *dev;
  58. atomic_t tx_seq;
  59. struct rcu_head rcu_head;
  60. };
  61. /* One instance of the GTP device. */
  62. struct gtp_dev {
  63. struct list_head list;
  64. struct sock *sk0;
  65. struct sock *sk1u;
  66. u8 sk_created;
  67. struct net_device *dev;
  68. struct net *net;
  69. unsigned int role;
  70. unsigned int hash_size;
  71. struct hlist_head *tid_hash;
  72. struct hlist_head *addr_hash;
  73. u8 restart_count;
  74. };
  75. struct echo_info {
  76. u16 af;
  77. u8 gtp_version;
  78. union {
  79. struct in_addr addr;
  80. } ms;
  81. union {
  82. struct in_addr addr;
  83. } peer;
  84. };
  85. static unsigned int gtp_net_id __read_mostly;
  86. struct gtp_net {
  87. struct list_head gtp_dev_list;
  88. };
  89. static u32 gtp_h_initval;
  90. static struct genl_family gtp_genl_family;
  91. enum gtp_multicast_groups {
  92. GTP_GENL_MCGRP,
  93. };
  94. static const struct genl_multicast_group gtp_genl_mcgrps[] = {
  95. [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
  96. };
  97. static void pdp_context_delete(struct pdp_ctx *pctx);
  98. static inline u32 gtp0_hashfn(u64 tid)
  99. {
  100. u32 *tid32 = (u32 *) &tid;
  101. return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
  102. }
  103. static inline u32 gtp1u_hashfn(u32 tid)
  104. {
  105. return jhash_1word(tid, gtp_h_initval);
  106. }
  107. static inline u32 ipv4_hashfn(__be32 ip)
  108. {
  109. return jhash_1word((__force u32)ip, gtp_h_initval);
  110. }
  111. static u32 ipv6_hashfn(const struct in6_addr *ip6)
  112. {
  113. return jhash_2words((__force u32)ip6->s6_addr32[0],
  114. (__force u32)ip6->s6_addr32[1], gtp_h_initval);
  115. }
  116. /* Resolve a PDP context structure based on the 64bit TID. */
  117. static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)
  118. {
  119. struct hlist_head *head;
  120. struct pdp_ctx *pdp;
  121. head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
  122. hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
  123. if (pdp->af == family &&
  124. pdp->gtp_version == GTP_V0 &&
  125. pdp->u.v0.tid == tid)
  126. return pdp;
  127. }
  128. return NULL;
  129. }
  130. /* Resolve a PDP context structure based on the 32bit TEI. */
  131. static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)
  132. {
  133. struct hlist_head *head;
  134. struct pdp_ctx *pdp;
  135. head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
  136. hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
  137. if (pdp->af == family &&
  138. pdp->gtp_version == GTP_V1 &&
  139. pdp->u.v1.i_tei == tid)
  140. return pdp;
  141. }
  142. return NULL;
  143. }
  144. /* Resolve a PDP context based on IPv4 address of MS. */
  145. static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
  146. {
  147. struct hlist_head *head;
  148. struct pdp_ctx *pdp;
  149. head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
  150. hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
  151. if (pdp->af == AF_INET &&
  152. pdp->ms.addr.s_addr == ms_addr)
  153. return pdp;
  154. }
  155. return NULL;
  156. }
  157. /* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
  158. * [...] one IPv6 *prefix* and a PDN represented by an APN.
  159. *
  160. * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
  161. * according to the maximum prefix length for a global IPv6 address as
  162. * specified in the IPv6 Addressing Architecture, see RFC 4291.
  163. *
  164. * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
  165. * than those that start with binary 000 have a 64-bit interface ID field
  166. * (i.e., n + m = 64).
  167. */
  168. static bool ipv6_pdp_addr_equal(const struct in6_addr *a,
  169. const struct in6_addr *b)
  170. {
  171. return a->s6_addr32[0] == b->s6_addr32[0] &&
  172. a->s6_addr32[1] == b->s6_addr32[1];
  173. }
  174. static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp,
  175. const struct in6_addr *ms_addr)
  176. {
  177. struct hlist_head *head;
  178. struct pdp_ctx *pdp;
  179. head = &gtp->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size];
  180. hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
  181. if (pdp->af == AF_INET6 &&
  182. ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))
  183. return pdp;
  184. }
  185. return NULL;
  186. }
  187. static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
  188. unsigned int hdrlen, unsigned int role)
  189. {
  190. struct iphdr *iph;
  191. if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
  192. return false;
  193. iph = (struct iphdr *)(skb->data + hdrlen);
  194. if (role == GTP_ROLE_SGSN)
  195. return iph->daddr == pctx->ms.addr.s_addr;
  196. else
  197. return iph->saddr == pctx->ms.addr.s_addr;
  198. }
  199. static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx,
  200. unsigned int hdrlen, unsigned int role)
  201. {
  202. struct ipv6hdr *ip6h;
  203. int ret;
  204. if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr)))
  205. return false;
  206. ip6h = (struct ipv6hdr *)(skb->data + hdrlen);
  207. if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) ||
  208. (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL))
  209. return false;
  210. if (role == GTP_ROLE_SGSN) {
  211. ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6);
  212. } else {
  213. ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6);
  214. }
  215. return ret;
  216. }
  217. /* Check if the inner IP address in this packet is assigned to any
  218. * existing mobile subscriber.
  219. */
  220. static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
  221. unsigned int hdrlen, unsigned int role,
  222. __u16 inner_proto)
  223. {
  224. switch (inner_proto) {
  225. case ETH_P_IP:
  226. return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
  227. case ETH_P_IPV6:
  228. return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);
  229. }
  230. return false;
  231. }
  232. static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen,
  233. __u16 *inner_proto)
  234. {
  235. __u8 *ip_version, _ip_version;
  236. ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version),
  237. &_ip_version);
  238. if (!ip_version)
  239. return -1;
  240. switch (*ip_version & 0xf0) {
  241. case 0x40:
  242. *inner_proto = ETH_P_IP;
  243. break;
  244. case 0x60:
  245. *inner_proto = ETH_P_IPV6;
  246. break;
  247. default:
  248. return -1;
  249. }
  250. return 0;
  251. }
  252. static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
  253. unsigned int hdrlen, unsigned int role, __u16 inner_proto)
  254. {
  255. if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {
  256. netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
  257. return 1;
  258. }
  259. /* Get rid of the GTP + UDP headers. */
  260. if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),
  261. !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
  262. pctx->dev->stats.rx_length_errors++;
  263. goto err;
  264. }
  265. netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
  266. /* Now that the UDP and the GTP header have been removed, set up the
  267. * new network header. This is required by the upper layer to
  268. * calculate the transport header.
  269. */
  270. skb_reset_network_header(skb);
  271. skb_reset_mac_header(skb);
  272. skb->dev = pctx->dev;
  273. dev_sw_netstats_rx_add(pctx->dev, skb->len);
  274. __netif_rx(skb);
  275. return 0;
  276. err:
  277. pctx->dev->stats.rx_dropped++;
  278. return -1;
  279. }
  280. static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
  281. const struct sock *sk,
  282. __be32 daddr, __be32 saddr)
  283. {
  284. memset(fl4, 0, sizeof(*fl4));
  285. fl4->flowi4_oif = sk->sk_bound_dev_if;
  286. fl4->daddr = daddr;
  287. fl4->saddr = saddr;
  288. fl4->flowi4_tos = ip_sock_rt_tos(sk);
  289. fl4->flowi4_scope = ip_sock_rt_scope(sk);
  290. fl4->flowi4_proto = sk->sk_protocol;
  291. return ip_route_output_key(sock_net(sk), fl4);
  292. }
  293. static struct rt6_info *ip6_route_output_gtp(struct net *net,
  294. struct flowi6 *fl6,
  295. const struct sock *sk,
  296. const struct in6_addr *daddr,
  297. struct in6_addr *saddr)
  298. {
  299. struct dst_entry *dst;
  300. memset(fl6, 0, sizeof(*fl6));
  301. fl6->flowi6_oif = sk->sk_bound_dev_if;
  302. fl6->daddr = *daddr;
  303. fl6->saddr = *saddr;
  304. fl6->flowi6_proto = sk->sk_protocol;
  305. dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL);
  306. if (IS_ERR(dst))
  307. return ERR_PTR(-ENETUNREACH);
  308. return (struct rt6_info *)dst;
  309. }
  310. /* GSM TS 09.60. 7.3
  311. * In all Path Management messages:
  312. * - TID: is not used and shall be set to 0.
  313. * - Flow Label is not used and shall be set to 0
  314. * In signalling messages:
  315. * - number: this field is not yet used in signalling messages.
  316. * It shall be set to 255 by the sender and shall be ignored
  317. * by the receiver
  318. * Returns true if the echo req was correct, false otherwise.
  319. */
  320. static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
  321. {
  322. return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
  323. gtp0->number != 0xff || gtp0->flow);
  324. }
  325. /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
  326. static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
  327. {
  328. int len_pkt, len_hdr;
  329. hdr->flags = 0x1e; /* v0, GTP-non-prime. */
  330. hdr->type = msg_type;
  331. /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
  332. * are not used and shall be set to 0.
  333. */
  334. hdr->flow = 0;
  335. hdr->tid = 0;
  336. hdr->number = 0xff;
  337. hdr->spare[0] = 0xff;
  338. hdr->spare[1] = 0xff;
  339. hdr->spare[2] = 0xff;
  340. len_pkt = sizeof(struct gtp0_packet);
  341. len_hdr = sizeof(struct gtp0_header);
  342. if (msg_type == GTP_ECHO_RSP)
  343. hdr->length = htons(len_pkt - len_hdr);
  344. else
  345. hdr->length = 0;
  346. }
  347. static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
  348. {
  349. struct iphdr *iph = ip_hdr(skb);
  350. struct flowi4 fl4;
  351. struct rtable *rt;
  352. /* find route to the sender,
  353. * src address becomes dst address and vice versa.
  354. */
  355. rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
  356. if (IS_ERR(rt)) {
  357. netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
  358. &iph->saddr);
  359. return -1;
  360. }
  361. udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
  362. fl4.saddr, fl4.daddr,
  363. iph->tos,
  364. ip4_dst_hoplimit(&rt->dst),
  365. 0,
  366. htons(GTP0_PORT), htons(GTP0_PORT),
  367. !net_eq(sock_net(gtp->sk1u),
  368. dev_net(gtp->dev)),
  369. false);
  370. return 0;
  371. }
  372. static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
  373. {
  374. struct gtp0_packet *gtp_pkt;
  375. struct gtp0_header *gtp0;
  376. __be16 seq;
  377. gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
  378. if (!gtp0_validate_echo_hdr(gtp0))
  379. return -1;
  380. seq = gtp0->seq;
  381. /* pull GTP and UDP headers */
  382. skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
  383. gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
  384. memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
  385. gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP);
  386. /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
  387. * message shall be copied from the signalling request message
  388. * that the GSN is replying to.
  389. */
  390. gtp_pkt->gtp0_h.seq = seq;
  391. gtp_pkt->ie.tag = GTPIE_RECOVERY;
  392. gtp_pkt->ie.val = gtp->restart_count;
  393. switch (gtp->sk0->sk_family) {
  394. case AF_INET:
  395. if (gtp0_send_echo_resp_ip(gtp, skb) < 0)
  396. return -1;
  397. break;
  398. case AF_INET6:
  399. return -1;
  400. }
  401. return 0;
  402. }
  403. static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
  404. int flags, u32 type, struct echo_info echo)
  405. {
  406. void *genlh;
  407. genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
  408. type);
  409. if (!genlh)
  410. goto failure;
  411. if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
  412. nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) ||
  413. nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))
  414. goto failure;
  415. genlmsg_end(skb, genlh);
  416. return 0;
  417. failure:
  418. genlmsg_cancel(skb, genlh);
  419. return -EMSGSIZE;
  420. }
  421. static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo)
  422. {
  423. struct iphdr *iph = ip_hdr(skb);
  424. echo->ms.addr.s_addr = iph->daddr;
  425. echo->peer.addr.s_addr = iph->saddr;
  426. echo->gtp_version = GTP_V0;
  427. }
  428. static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
  429. {
  430. struct gtp0_header *gtp0;
  431. struct echo_info echo;
  432. struct sk_buff *msg;
  433. int ret;
  434. gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
  435. if (!gtp0_validate_echo_hdr(gtp0))
  436. return -1;
  437. switch (gtp->sk0->sk_family) {
  438. case AF_INET:
  439. gtp0_handle_echo_resp_ip(skb, &echo);
  440. break;
  441. case AF_INET6:
  442. return -1;
  443. }
  444. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  445. if (!msg)
  446. return -ENOMEM;
  447. ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
  448. if (ret < 0) {
  449. nlmsg_free(msg);
  450. return ret;
  451. }
  452. return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
  453. msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
  454. }
  455. static int gtp_proto_to_family(__u16 proto)
  456. {
  457. switch (proto) {
  458. case ETH_P_IP:
  459. return AF_INET;
  460. case ETH_P_IPV6:
  461. return AF_INET6;
  462. default:
  463. WARN_ON_ONCE(1);
  464. break;
  465. }
  466. return AF_UNSPEC;
  467. }
  468. /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
  469. static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
  470. {
  471. unsigned int hdrlen = sizeof(struct udphdr) +
  472. sizeof(struct gtp0_header);
  473. struct gtp0_header *gtp0;
  474. struct pdp_ctx *pctx;
  475. __u16 inner_proto;
  476. if (!pskb_may_pull(skb, hdrlen))
  477. return -1;
  478. gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
  479. if ((gtp0->flags >> 5) != GTP_V0)
  480. return 1;
  481. /* If the sockets were created in kernel, it means that
  482. * there is no daemon running in userspace which would
  483. * handle echo request.
  484. */
  485. if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
  486. return gtp0_send_echo_resp(gtp, skb);
  487. if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
  488. return gtp0_handle_echo_resp(gtp, skb);
  489. if (gtp0->type != GTP_TPDU)
  490. return 1;
  491. if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
  492. netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
  493. return -1;
  494. }
  495. pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid),
  496. gtp_proto_to_family(inner_proto));
  497. if (!pctx) {
  498. netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
  499. return 1;
  500. }
  501. return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
  502. }
  503. /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
  504. static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
  505. {
  506. int len_pkt, len_hdr;
  507. /* S flag must be set to 1 */
  508. hdr->flags = 0x32; /* v1, GTP-non-prime. */
  509. hdr->type = msg_type;
  510. /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
  511. hdr->tid = 0;
  512. /* seq, npdu and next should be counted to the length of the GTP packet
  513. * that's why szie of gtp1_header should be subtracted,
  514. * not size of gtp1_header_long.
  515. */
  516. len_hdr = sizeof(struct gtp1_header);
  517. if (msg_type == GTP_ECHO_RSP) {
  518. len_pkt = sizeof(struct gtp1u_packet);
  519. hdr->length = htons(len_pkt - len_hdr);
  520. } else {
  521. /* GTP_ECHO_REQ does not carry GTP Information Element,
  522. * the why gtp1_header_long is used here.
  523. */
  524. len_pkt = sizeof(struct gtp1_header_long);
  525. hdr->length = htons(len_pkt - len_hdr);
  526. }
  527. }
  528. static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
  529. {
  530. struct gtp1_header_long *gtp1u;
  531. struct gtp1u_packet *gtp_pkt;
  532. struct rtable *rt;
  533. struct flowi4 fl4;
  534. struct iphdr *iph;
  535. gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
  536. /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
  537. * Error Indication and Supported Extension Headers Notification
  538. * messages, the S flag shall be set to 1 and TEID shall be set to 0.
  539. */
  540. if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
  541. return -1;
  542. /* pull GTP and UDP headers */
  543. skb_pull_data(skb,
  544. sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
  545. gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
  546. memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
  547. gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP);
  548. /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
  549. * Recovery information element shall not be used, i.e. it shall
  550. * be set to zero by the sender and shall be ignored by the receiver.
  551. * The Recovery information element is mandatory due to backwards
  552. * compatibility reasons.
  553. */
  554. gtp_pkt->ie.tag = GTPIE_RECOVERY;
  555. gtp_pkt->ie.val = 0;
  556. iph = ip_hdr(skb);
  557. /* find route to the sender,
  558. * src address becomes dst address and vice versa.
  559. */
  560. rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
  561. if (IS_ERR(rt)) {
  562. netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
  563. &iph->saddr);
  564. return -1;
  565. }
  566. udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
  567. fl4.saddr, fl4.daddr,
  568. iph->tos,
  569. ip4_dst_hoplimit(&rt->dst),
  570. 0,
  571. htons(GTP1U_PORT), htons(GTP1U_PORT),
  572. !net_eq(sock_net(gtp->sk1u),
  573. dev_net(gtp->dev)),
  574. false);
  575. return 0;
  576. }
  577. static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
  578. {
  579. struct gtp1_header_long *gtp1u;
  580. struct echo_info echo;
  581. struct sk_buff *msg;
  582. struct iphdr *iph;
  583. int ret;
  584. gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
  585. /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
  586. * Error Indication and Supported Extension Headers Notification
  587. * messages, the S flag shall be set to 1 and TEID shall be set to 0.
  588. */
  589. if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
  590. return -1;
  591. iph = ip_hdr(skb);
  592. echo.ms.addr.s_addr = iph->daddr;
  593. echo.peer.addr.s_addr = iph->saddr;
  594. echo.gtp_version = GTP_V1;
  595. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  596. if (!msg)
  597. return -ENOMEM;
  598. ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
  599. if (ret < 0) {
  600. nlmsg_free(msg);
  601. return ret;
  602. }
  603. return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev),
  604. msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
  605. }
  606. static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen)
  607. {
  608. struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr;
  609. unsigned int offset = *hdrlen;
  610. __u8 *next_type, _next_type;
  611. /* From 29.060: "The Extension Header Length field specifies the length
  612. * of the particular Extension header in 4 octets units."
  613. *
  614. * This length field includes length field size itself (1 byte),
  615. * payload (variable length) and next type (1 byte). The extension
  616. * header is aligned to to 4 bytes.
  617. */
  618. do {
  619. gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr),
  620. &_gtp_exthdr);
  621. if (!gtp_exthdr || !gtp_exthdr->len)
  622. return -1;
  623. offset += gtp_exthdr->len * 4;
  624. /* From 29.060: "If no such Header follows, then the value of
  625. * the Next Extension Header Type shall be 0."
  626. */
  627. next_type = skb_header_pointer(skb, offset - 1,
  628. sizeof(_next_type), &_next_type);
  629. if (!next_type)
  630. return -1;
  631. } while (*next_type != 0);
  632. *hdrlen = offset;
  633. return 0;
  634. }
  635. static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
  636. {
  637. unsigned int hdrlen = sizeof(struct udphdr) +
  638. sizeof(struct gtp1_header);
  639. struct gtp1_header *gtp1;
  640. struct pdp_ctx *pctx;
  641. __u16 inner_proto;
  642. if (!pskb_may_pull(skb, hdrlen))
  643. return -1;
  644. gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
  645. if ((gtp1->flags >> 5) != GTP_V1)
  646. return 1;
  647. /* If the sockets were created in kernel, it means that
  648. * there is no daemon running in userspace which would
  649. * handle echo request.
  650. */
  651. if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
  652. return gtp1u_send_echo_resp(gtp, skb);
  653. if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
  654. return gtp1u_handle_echo_resp(gtp, skb);
  655. if (gtp1->type != GTP_TPDU)
  656. return 1;
  657. /* From 29.060: "This field shall be present if and only if any one or
  658. * more of the S, PN and E flags are set.".
  659. *
  660. * If any of the bit is set, then the remaining ones also have to be
  661. * set.
  662. */
  663. if (gtp1->flags & GTP1_F_MASK)
  664. hdrlen += 4;
  665. /* Make sure the header is larger enough, including extensions. */
  666. if (!pskb_may_pull(skb, hdrlen))
  667. return -1;
  668. if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
  669. netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
  670. return -1;
  671. }
  672. gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
  673. pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid),
  674. gtp_proto_to_family(inner_proto));
  675. if (!pctx) {
  676. netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
  677. return 1;
  678. }
  679. if (gtp1->flags & GTP1_F_EXTHDR &&
  680. gtp_parse_exthdrs(skb, &hdrlen) < 0)
  681. return -1;
  682. return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
  683. }
  684. static void __gtp_encap_destroy(struct sock *sk)
  685. {
  686. struct gtp_dev *gtp;
  687. lock_sock(sk);
  688. gtp = sk->sk_user_data;
  689. if (gtp) {
  690. if (gtp->sk0 == sk)
  691. gtp->sk0 = NULL;
  692. else
  693. gtp->sk1u = NULL;
  694. WRITE_ONCE(udp_sk(sk)->encap_type, 0);
  695. rcu_assign_sk_user_data(sk, NULL);
  696. release_sock(sk);
  697. sock_put(sk);
  698. return;
  699. }
  700. release_sock(sk);
  701. }
  702. static void gtp_encap_destroy(struct sock *sk)
  703. {
  704. rtnl_lock();
  705. __gtp_encap_destroy(sk);
  706. rtnl_unlock();
  707. }
  708. static void gtp_encap_disable_sock(struct sock *sk)
  709. {
  710. if (!sk)
  711. return;
  712. __gtp_encap_destroy(sk);
  713. }
  714. static void gtp_encap_disable(struct gtp_dev *gtp)
  715. {
  716. if (gtp->sk_created) {
  717. udp_tunnel_sock_release(gtp->sk0->sk_socket);
  718. udp_tunnel_sock_release(gtp->sk1u->sk_socket);
  719. gtp->sk_created = false;
  720. gtp->sk0 = NULL;
  721. gtp->sk1u = NULL;
  722. } else {
  723. gtp_encap_disable_sock(gtp->sk0);
  724. gtp_encap_disable_sock(gtp->sk1u);
  725. }
  726. }
  727. /* UDP encapsulation receive handler. See net/ipv4/udp.c.
  728. * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
  729. */
  730. static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
  731. {
  732. struct gtp_dev *gtp;
  733. int ret = 0;
  734. gtp = rcu_dereference_sk_user_data(sk);
  735. if (!gtp)
  736. return 1;
  737. netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
  738. switch (READ_ONCE(udp_sk(sk)->encap_type)) {
  739. case UDP_ENCAP_GTP0:
  740. netdev_dbg(gtp->dev, "received GTP0 packet\n");
  741. ret = gtp0_udp_encap_recv(gtp, skb);
  742. break;
  743. case UDP_ENCAP_GTP1U:
  744. netdev_dbg(gtp->dev, "received GTP1U packet\n");
  745. ret = gtp1u_udp_encap_recv(gtp, skb);
  746. break;
  747. default:
  748. ret = -1; /* Shouldn't happen. */
  749. }
  750. switch (ret) {
  751. case 1:
  752. netdev_dbg(gtp->dev, "pass up to the process\n");
  753. break;
  754. case 0:
  755. break;
  756. case -1:
  757. netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
  758. kfree_skb(skb);
  759. ret = 0;
  760. break;
  761. }
  762. return ret;
  763. }
  764. static void gtp_dev_uninit(struct net_device *dev)
  765. {
  766. struct gtp_dev *gtp = netdev_priv(dev);
  767. gtp_encap_disable(gtp);
  768. }
  769. static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
  770. {
  771. int payload_len = skb->len;
  772. struct gtp0_header *gtp0;
  773. gtp0 = skb_push(skb, sizeof(*gtp0));
  774. gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
  775. gtp0->type = GTP_TPDU;
  776. gtp0->length = htons(payload_len);
  777. gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
  778. gtp0->flow = htons(pctx->u.v0.flow);
  779. gtp0->number = 0xff;
  780. gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
  781. gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
  782. }
  783. static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
  784. {
  785. int payload_len = skb->len;
  786. struct gtp1_header *gtp1;
  787. gtp1 = skb_push(skb, sizeof(*gtp1));
  788. /* Bits 8 7 6 5 4 3 2 1
  789. * +--+--+--+--+--+--+--+--+
  790. * |version |PT| 0| E| S|PN|
  791. * +--+--+--+--+--+--+--+--+
  792. * 0 0 1 1 1 0 0 0
  793. */
  794. gtp1->flags = 0x30; /* v1, GTP-non-prime. */
  795. gtp1->type = GTP_TPDU;
  796. gtp1->length = htons(payload_len);
  797. gtp1->tid = htonl(pctx->u.v1.o_tei);
  798. /* TODO: Support for extension header, sequence number and N-PDU.
  799. * Update the length field if any of them is available.
  800. */
  801. }
  802. struct gtp_pktinfo {
  803. struct sock *sk;
  804. union {
  805. struct flowi4 fl4;
  806. struct flowi6 fl6;
  807. };
  808. union {
  809. struct rtable *rt;
  810. struct rt6_info *rt6;
  811. };
  812. struct pdp_ctx *pctx;
  813. struct net_device *dev;
  814. __u8 tos;
  815. __be16 gtph_port;
  816. };
  817. static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
  818. {
  819. switch (pktinfo->pctx->gtp_version) {
  820. case GTP_V0:
  821. pktinfo->gtph_port = htons(GTP0_PORT);
  822. gtp0_push_header(skb, pktinfo->pctx);
  823. break;
  824. case GTP_V1:
  825. pktinfo->gtph_port = htons(GTP1U_PORT);
  826. gtp1_push_header(skb, pktinfo->pctx);
  827. break;
  828. }
  829. }
  830. static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
  831. struct sock *sk, __u8 tos,
  832. struct pdp_ctx *pctx, struct rtable *rt,
  833. struct flowi4 *fl4,
  834. struct net_device *dev)
  835. {
  836. pktinfo->sk = sk;
  837. pktinfo->tos = tos;
  838. pktinfo->pctx = pctx;
  839. pktinfo->rt = rt;
  840. pktinfo->fl4 = *fl4;
  841. pktinfo->dev = dev;
  842. }
  843. static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo,
  844. struct sock *sk, __u8 tos,
  845. struct pdp_ctx *pctx, struct rt6_info *rt6,
  846. struct flowi6 *fl6,
  847. struct net_device *dev)
  848. {
  849. pktinfo->sk = sk;
  850. pktinfo->tos = tos;
  851. pktinfo->pctx = pctx;
  852. pktinfo->rt6 = rt6;
  853. pktinfo->fl6 = *fl6;
  854. pktinfo->dev = dev;
  855. }
  856. static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev,
  857. struct gtp_pktinfo *pktinfo,
  858. struct pdp_ctx *pctx, __u8 tos,
  859. __be16 frag_off)
  860. {
  861. struct rtable *rt;
  862. struct flowi4 fl4;
  863. __be16 df;
  864. int mtu;
  865. rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,
  866. inet_sk(pctx->sk)->inet_saddr);
  867. if (IS_ERR(rt)) {
  868. netdev_dbg(dev, "no route to SSGN %pI4\n",
  869. &pctx->peer.addr.s_addr);
  870. dev->stats.tx_carrier_errors++;
  871. goto err;
  872. }
  873. if (rt->dst.dev == dev) {
  874. netdev_dbg(dev, "circular route to SSGN %pI4\n",
  875. &pctx->peer.addr.s_addr);
  876. dev->stats.collisions++;
  877. goto err_rt;
  878. }
  879. /* This is similar to tnl_update_pmtu(). */
  880. df = frag_off;
  881. if (df) {
  882. mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
  883. sizeof(struct iphdr) - sizeof(struct udphdr);
  884. switch (pctx->gtp_version) {
  885. case GTP_V0:
  886. mtu -= sizeof(struct gtp0_header);
  887. break;
  888. case GTP_V1:
  889. mtu -= sizeof(struct gtp1_header);
  890. break;
  891. }
  892. } else {
  893. mtu = dst_mtu(&rt->dst);
  894. }
  895. skb_dst_update_pmtu_no_confirm(skb, mtu);
  896. if (frag_off & htons(IP_DF) &&
  897. ((!skb_is_gso(skb) && skb->len > mtu) ||
  898. (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
  899. netdev_dbg(dev, "packet too big, fragmentation needed\n");
  900. icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  901. htonl(mtu));
  902. goto err_rt;
  903. }
  904. gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);
  905. gtp_push_header(skb, pktinfo);
  906. return 0;
  907. err_rt:
  908. ip_rt_put(rt);
  909. err:
  910. return -EBADMSG;
  911. }
  912. static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb,
  913. struct net_device *dev,
  914. struct gtp_pktinfo *pktinfo,
  915. struct pdp_ctx *pctx, __u8 tos)
  916. {
  917. struct dst_entry *dst;
  918. struct rt6_info *rt;
  919. struct flowi6 fl6;
  920. int mtu;
  921. rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6,
  922. &inet6_sk(pctx->sk)->saddr);
  923. if (IS_ERR(rt)) {
  924. netdev_dbg(dev, "no route to SSGN %pI6\n",
  925. &pctx->peer.addr6);
  926. dev->stats.tx_carrier_errors++;
  927. goto err;
  928. }
  929. dst = &rt->dst;
  930. if (rt->dst.dev == dev) {
  931. netdev_dbg(dev, "circular route to SSGN %pI6\n",
  932. &pctx->peer.addr6);
  933. dev->stats.collisions++;
  934. goto err_rt;
  935. }
  936. mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
  937. sizeof(struct ipv6hdr) - sizeof(struct udphdr);
  938. switch (pctx->gtp_version) {
  939. case GTP_V0:
  940. mtu -= sizeof(struct gtp0_header);
  941. break;
  942. case GTP_V1:
  943. mtu -= sizeof(struct gtp1_header);
  944. break;
  945. }
  946. skb_dst_update_pmtu_no_confirm(skb, mtu);
  947. if ((!skb_is_gso(skb) && skb->len > mtu) ||
  948. (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
  949. netdev_dbg(dev, "packet too big, fragmentation needed\n");
  950. icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  951. goto err_rt;
  952. }
  953. gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev);
  954. gtp_push_header(skb, pktinfo);
  955. return 0;
  956. err_rt:
  957. dst_release(dst);
  958. err:
  959. return -EBADMSG;
  960. }
  961. static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
  962. struct gtp_pktinfo *pktinfo)
  963. {
  964. struct gtp_dev *gtp = netdev_priv(dev);
  965. struct net *net = gtp->net;
  966. struct pdp_ctx *pctx;
  967. struct iphdr *iph;
  968. int ret;
  969. /* Read the IP destination address and resolve the PDP context.
  970. * Prepend PDP header with TEI/TID from PDP ctx.
  971. */
  972. iph = ip_hdr(skb);
  973. if (gtp->role == GTP_ROLE_SGSN)
  974. pctx = ipv4_pdp_find(gtp, iph->saddr);
  975. else
  976. pctx = ipv4_pdp_find(gtp, iph->daddr);
  977. if (!pctx) {
  978. netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
  979. &iph->daddr);
  980. return -ENOENT;
  981. }
  982. netdev_dbg(dev, "found PDP context %p\n", pctx);
  983. switch (pctx->sk->sk_family) {
  984. case AF_INET:
  985. ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx,
  986. iph->tos, iph->frag_off);
  987. break;
  988. case AF_INET6:
  989. ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx,
  990. iph->tos);
  991. break;
  992. default:
  993. ret = -1;
  994. WARN_ON_ONCE(1);
  995. break;
  996. }
  997. if (ret < 0)
  998. return ret;
  999. netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
  1000. &iph->saddr, &iph->daddr);
  1001. return 0;
  1002. }
  1003. static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev,
  1004. struct gtp_pktinfo *pktinfo)
  1005. {
  1006. struct gtp_dev *gtp = netdev_priv(dev);
  1007. struct net *net = gtp->net;
  1008. struct pdp_ctx *pctx;
  1009. struct ipv6hdr *ip6h;
  1010. __u8 tos;
  1011. int ret;
  1012. /* Read the IP destination address and resolve the PDP context.
  1013. * Prepend PDP header with TEI/TID from PDP ctx.
  1014. */
  1015. ip6h = ipv6_hdr(skb);
  1016. if (gtp->role == GTP_ROLE_SGSN)
  1017. pctx = ipv6_pdp_find(gtp, &ip6h->saddr);
  1018. else
  1019. pctx = ipv6_pdp_find(gtp, &ip6h->daddr);
  1020. if (!pctx) {
  1021. netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n",
  1022. &ip6h->daddr);
  1023. return -ENOENT;
  1024. }
  1025. netdev_dbg(dev, "found PDP context %p\n", pctx);
  1026. tos = ipv6_get_dsfield(ip6h);
  1027. switch (pctx->sk->sk_family) {
  1028. case AF_INET:
  1029. ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0);
  1030. break;
  1031. case AF_INET6:
  1032. ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos);
  1033. break;
  1034. default:
  1035. ret = -1;
  1036. WARN_ON_ONCE(1);
  1037. break;
  1038. }
  1039. if (ret < 0)
  1040. return ret;
  1041. netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n",
  1042. &ip6h->saddr, &ip6h->daddr);
  1043. return 0;
  1044. }
  1045. static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
  1046. {
  1047. unsigned int proto = ntohs(skb->protocol);
  1048. struct gtp_pktinfo pktinfo;
  1049. int err;
  1050. /* Ensure there is sufficient headroom. */
  1051. if (skb_cow_head(skb, dev->needed_headroom))
  1052. goto tx_err;
  1053. if (!pskb_inet_may_pull(skb))
  1054. goto tx_err;
  1055. skb_reset_inner_headers(skb);
  1056. /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
  1057. rcu_read_lock();
  1058. switch (proto) {
  1059. case ETH_P_IP:
  1060. err = gtp_build_skb_ip4(skb, dev, &pktinfo);
  1061. break;
  1062. case ETH_P_IPV6:
  1063. err = gtp_build_skb_ip6(skb, dev, &pktinfo);
  1064. break;
  1065. default:
  1066. err = -EOPNOTSUPP;
  1067. break;
  1068. }
  1069. rcu_read_unlock();
  1070. if (err < 0)
  1071. goto tx_err;
  1072. switch (pktinfo.pctx->sk->sk_family) {
  1073. case AF_INET:
  1074. udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
  1075. pktinfo.fl4.saddr, pktinfo.fl4.daddr,
  1076. pktinfo.tos,
  1077. ip4_dst_hoplimit(&pktinfo.rt->dst),
  1078. 0,
  1079. pktinfo.gtph_port, pktinfo.gtph_port,
  1080. !net_eq(sock_net(pktinfo.pctx->sk),
  1081. dev_net(dev)),
  1082. false);
  1083. break;
  1084. case AF_INET6:
  1085. #if IS_ENABLED(CONFIG_IPV6)
  1086. udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev,
  1087. &pktinfo.fl6.saddr, &pktinfo.fl6.daddr,
  1088. pktinfo.tos,
  1089. ip6_dst_hoplimit(&pktinfo.rt->dst),
  1090. 0,
  1091. pktinfo.gtph_port, pktinfo.gtph_port,
  1092. false);
  1093. #else
  1094. goto tx_err;
  1095. #endif
  1096. break;
  1097. }
  1098. return NETDEV_TX_OK;
  1099. tx_err:
  1100. dev->stats.tx_errors++;
  1101. dev_kfree_skb(skb);
  1102. return NETDEV_TX_OK;
  1103. }
  1104. static const struct net_device_ops gtp_netdev_ops = {
  1105. .ndo_uninit = gtp_dev_uninit,
  1106. .ndo_start_xmit = gtp_dev_xmit,
  1107. };
  1108. static const struct device_type gtp_type = {
  1109. .name = "gtp",
  1110. };
  1111. #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
  1112. #define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
  1113. static void gtp_link_setup(struct net_device *dev)
  1114. {
  1115. struct gtp_dev *gtp = netdev_priv(dev);
  1116. dev->netdev_ops = &gtp_netdev_ops;
  1117. dev->needs_free_netdev = true;
  1118. SET_NETDEV_DEVTYPE(dev, &gtp_type);
  1119. dev->hard_header_len = 0;
  1120. dev->addr_len = 0;
  1121. dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;
  1122. /* Zero header length. */
  1123. dev->type = ARPHRD_NONE;
  1124. dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
  1125. dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
  1126. dev->priv_flags |= IFF_NO_QUEUE;
  1127. dev->lltx = true;
  1128. netif_keep_dst(dev);
  1129. dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
  1130. gtp->dev = dev;
  1131. }
  1132. static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
  1133. static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
  1134. static void gtp_destructor(struct net_device *dev)
  1135. {
  1136. struct gtp_dev *gtp = netdev_priv(dev);
  1137. kfree(gtp->addr_hash);
  1138. kfree(gtp->tid_hash);
  1139. }
  1140. static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf,
  1141. const struct nlattr *nla, int family)
  1142. {
  1143. udp_conf->family = family;
  1144. switch (udp_conf->family) {
  1145. case AF_INET:
  1146. udp_conf->local_ip.s_addr = nla_get_be32(nla);
  1147. break;
  1148. #if IS_ENABLED(CONFIG_IPV6)
  1149. case AF_INET6:
  1150. udp_conf->local_ip6 = nla_get_in6_addr(nla);
  1151. break;
  1152. #endif
  1153. default:
  1154. return -EOPNOTSUPP;
  1155. }
  1156. return 0;
  1157. }
  1158. static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp,
  1159. const struct nlattr *nla, int family)
  1160. {
  1161. struct udp_tunnel_sock_cfg tuncfg = {};
  1162. struct udp_port_cfg udp_conf = {};
  1163. struct net *net = gtp->net;
  1164. struct socket *sock;
  1165. int err;
  1166. if (nla) {
  1167. err = gtp_sock_udp_config(&udp_conf, nla, family);
  1168. if (err < 0)
  1169. return ERR_PTR(err);
  1170. } else {
  1171. udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
  1172. udp_conf.family = AF_INET;
  1173. }
  1174. if (type == UDP_ENCAP_GTP0)
  1175. udp_conf.local_udp_port = htons(GTP0_PORT);
  1176. else if (type == UDP_ENCAP_GTP1U)
  1177. udp_conf.local_udp_port = htons(GTP1U_PORT);
  1178. else
  1179. return ERR_PTR(-EINVAL);
  1180. err = udp_sock_create(net, &udp_conf, &sock);
  1181. if (err)
  1182. return ERR_PTR(err);
  1183. tuncfg.sk_user_data = gtp;
  1184. tuncfg.encap_type = type;
  1185. tuncfg.encap_rcv = gtp_encap_recv;
  1186. tuncfg.encap_destroy = NULL;
  1187. setup_udp_tunnel_sock(net, sock, &tuncfg);
  1188. return sock->sk;
  1189. }
  1190. static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
  1191. int family)
  1192. {
  1193. struct sock *sk1u;
  1194. struct sock *sk0;
  1195. sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);
  1196. if (IS_ERR(sk0))
  1197. return PTR_ERR(sk0);
  1198. sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);
  1199. if (IS_ERR(sk1u)) {
  1200. udp_tunnel_sock_release(sk0->sk_socket);
  1201. return PTR_ERR(sk1u);
  1202. }
  1203. gtp->sk_created = true;
  1204. gtp->sk0 = sk0;
  1205. gtp->sk1u = sk1u;
  1206. return 0;
  1207. }
  1208. #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
  1209. #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
  1210. static int gtp_newlink(struct net *src_net, struct net_device *dev,
  1211. struct nlattr *tb[], struct nlattr *data[],
  1212. struct netlink_ext_ack *extack)
  1213. {
  1214. unsigned int role = GTP_ROLE_GGSN;
  1215. struct gtp_dev *gtp;
  1216. struct gtp_net *gn;
  1217. int hashsize, err;
  1218. #if !IS_ENABLED(CONFIG_IPV6)
  1219. if (data[IFLA_GTP_LOCAL6])
  1220. return -EAFNOSUPPORT;
  1221. #endif
  1222. gtp = netdev_priv(dev);
  1223. if (!data[IFLA_GTP_PDP_HASHSIZE]) {
  1224. hashsize = 1024;
  1225. } else {
  1226. hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
  1227. if (!hashsize)
  1228. hashsize = 1024;
  1229. }
  1230. if (data[IFLA_GTP_ROLE]) {
  1231. role = nla_get_u32(data[IFLA_GTP_ROLE]);
  1232. if (role > GTP_ROLE_SGSN)
  1233. return -EINVAL;
  1234. }
  1235. gtp->role = role;
  1236. if (!data[IFLA_GTP_RESTART_COUNT])
  1237. gtp->restart_count = 0;
  1238. else
  1239. gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
  1240. gtp->net = src_net;
  1241. err = gtp_hashtable_new(gtp, hashsize);
  1242. if (err < 0)
  1243. return err;
  1244. if (data[IFLA_GTP_CREATE_SOCKETS]) {
  1245. if (data[IFLA_GTP_LOCAL6])
  1246. err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6);
  1247. else
  1248. err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET);
  1249. } else {
  1250. err = gtp_encap_enable(gtp, data);
  1251. }
  1252. if (err < 0)
  1253. goto out_hashtable;
  1254. if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) ||
  1255. (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) {
  1256. dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN;
  1257. dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN;
  1258. }
  1259. err = register_netdevice(dev);
  1260. if (err < 0) {
  1261. netdev_dbg(dev, "failed to register new netdev %d\n", err);
  1262. goto out_encap;
  1263. }
  1264. gn = net_generic(src_net, gtp_net_id);
  1265. list_add(&gtp->list, &gn->gtp_dev_list);
  1266. dev->priv_destructor = gtp_destructor;
  1267. netdev_dbg(dev, "registered new GTP interface\n");
  1268. return 0;
  1269. out_encap:
  1270. gtp_encap_disable(gtp);
  1271. out_hashtable:
  1272. kfree(gtp->addr_hash);
  1273. kfree(gtp->tid_hash);
  1274. return err;
  1275. }
  1276. static void gtp_dellink(struct net_device *dev, struct list_head *head)
  1277. {
  1278. struct gtp_dev *gtp = netdev_priv(dev);
  1279. struct hlist_node *next;
  1280. struct pdp_ctx *pctx;
  1281. int i;
  1282. for (i = 0; i < gtp->hash_size; i++)
  1283. hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
  1284. pdp_context_delete(pctx);
  1285. list_del(&gtp->list);
  1286. unregister_netdevice_queue(dev, head);
  1287. }
  1288. static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
  1289. [IFLA_GTP_FD0] = { .type = NLA_U32 },
  1290. [IFLA_GTP_FD1] = { .type = NLA_U32 },
  1291. [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
  1292. [IFLA_GTP_ROLE] = { .type = NLA_U32 },
  1293. [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
  1294. [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
  1295. [IFLA_GTP_LOCAL] = { .type = NLA_U32 },
  1296. [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) },
  1297. };
  1298. static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
  1299. struct netlink_ext_ack *extack)
  1300. {
  1301. if (!data)
  1302. return -EINVAL;
  1303. return 0;
  1304. }
  1305. static size_t gtp_get_size(const struct net_device *dev)
  1306. {
  1307. return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
  1308. nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
  1309. nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
  1310. }
  1311. static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
  1312. {
  1313. struct gtp_dev *gtp = netdev_priv(dev);
  1314. if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
  1315. goto nla_put_failure;
  1316. if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
  1317. goto nla_put_failure;
  1318. if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
  1319. goto nla_put_failure;
  1320. return 0;
  1321. nla_put_failure:
  1322. return -EMSGSIZE;
  1323. }
  1324. static struct rtnl_link_ops gtp_link_ops __read_mostly = {
  1325. .kind = "gtp",
  1326. .maxtype = IFLA_GTP_MAX,
  1327. .policy = gtp_policy,
  1328. .priv_size = sizeof(struct gtp_dev),
  1329. .setup = gtp_link_setup,
  1330. .validate = gtp_validate,
  1331. .newlink = gtp_newlink,
  1332. .dellink = gtp_dellink,
  1333. .get_size = gtp_get_size,
  1334. .fill_info = gtp_fill_info,
  1335. };
  1336. static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
  1337. {
  1338. int i;
  1339. gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
  1340. GFP_KERNEL | __GFP_NOWARN);
  1341. if (gtp->addr_hash == NULL)
  1342. return -ENOMEM;
  1343. gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
  1344. GFP_KERNEL | __GFP_NOWARN);
  1345. if (gtp->tid_hash == NULL)
  1346. goto err1;
  1347. gtp->hash_size = hsize;
  1348. for (i = 0; i < hsize; i++) {
  1349. INIT_HLIST_HEAD(&gtp->addr_hash[i]);
  1350. INIT_HLIST_HEAD(&gtp->tid_hash[i]);
  1351. }
  1352. return 0;
  1353. err1:
  1354. kfree(gtp->addr_hash);
  1355. return -ENOMEM;
  1356. }
  1357. static struct sock *gtp_encap_enable_socket(int fd, int type,
  1358. struct gtp_dev *gtp)
  1359. {
  1360. struct udp_tunnel_sock_cfg tuncfg = {NULL};
  1361. struct socket *sock;
  1362. struct sock *sk;
  1363. int err;
  1364. pr_debug("enable gtp on %d, %d\n", fd, type);
  1365. sock = sockfd_lookup(fd, &err);
  1366. if (!sock) {
  1367. pr_debug("gtp socket fd=%d not found\n", fd);
  1368. return ERR_PTR(err);
  1369. }
  1370. sk = sock->sk;
  1371. if (sk->sk_protocol != IPPROTO_UDP ||
  1372. sk->sk_type != SOCK_DGRAM ||
  1373. (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
  1374. pr_debug("socket fd=%d not UDP\n", fd);
  1375. sk = ERR_PTR(-EINVAL);
  1376. goto out_sock;
  1377. }
  1378. if (sk->sk_family == AF_INET6 &&
  1379. !sk->sk_ipv6only) {
  1380. sk = ERR_PTR(-EADDRNOTAVAIL);
  1381. goto out_sock;
  1382. }
  1383. lock_sock(sk);
  1384. if (sk->sk_user_data) {
  1385. sk = ERR_PTR(-EBUSY);
  1386. goto out_rel_sock;
  1387. }
  1388. sock_hold(sk);
  1389. tuncfg.sk_user_data = gtp;
  1390. tuncfg.encap_type = type;
  1391. tuncfg.encap_rcv = gtp_encap_recv;
  1392. tuncfg.encap_destroy = gtp_encap_destroy;
  1393. setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
  1394. out_rel_sock:
  1395. release_sock(sock->sk);
  1396. out_sock:
  1397. sockfd_put(sock);
  1398. return sk;
  1399. }
  1400. static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
  1401. {
  1402. struct sock *sk1u = NULL;
  1403. struct sock *sk0 = NULL;
  1404. if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
  1405. return -EINVAL;
  1406. if (data[IFLA_GTP_FD0]) {
  1407. int fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
  1408. if (fd0 >= 0) {
  1409. sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
  1410. if (IS_ERR(sk0))
  1411. return PTR_ERR(sk0);
  1412. }
  1413. }
  1414. if (data[IFLA_GTP_FD1]) {
  1415. int fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
  1416. if (fd1 >= 0) {
  1417. sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
  1418. if (IS_ERR(sk1u)) {
  1419. gtp_encap_disable_sock(sk0);
  1420. return PTR_ERR(sk1u);
  1421. }
  1422. }
  1423. }
  1424. gtp->sk0 = sk0;
  1425. gtp->sk1u = sk1u;
  1426. if (sk0 && sk1u &&
  1427. sk0->sk_family != sk1u->sk_family) {
  1428. gtp_encap_disable_sock(sk0);
  1429. gtp_encap_disable_sock(sk1u);
  1430. return -EINVAL;
  1431. }
  1432. return 0;
  1433. }
  1434. static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
  1435. {
  1436. struct gtp_dev *gtp = NULL;
  1437. struct net_device *dev;
  1438. struct net *net;
  1439. /* Examine the link attributes and figure out which network namespace
  1440. * we are talking about.
  1441. */
  1442. if (nla[GTPA_NET_NS_FD])
  1443. net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
  1444. else
  1445. net = get_net(src_net);
  1446. if (IS_ERR(net))
  1447. return NULL;
  1448. /* Check if there's an existing gtpX device to configure */
  1449. dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
  1450. if (dev && dev->netdev_ops == &gtp_netdev_ops)
  1451. gtp = netdev_priv(dev);
  1452. put_net(net);
  1453. return gtp;
  1454. }
  1455. static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
  1456. {
  1457. pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
  1458. switch (pctx->gtp_version) {
  1459. case GTP_V0:
  1460. /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
  1461. * label needs to be the same for uplink and downlink packets,
  1462. * so let's annotate this.
  1463. */
  1464. pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
  1465. pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
  1466. break;
  1467. case GTP_V1:
  1468. pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
  1469. pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
  1470. break;
  1471. default:
  1472. break;
  1473. }
  1474. }
  1475. static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info)
  1476. {
  1477. if (info->attrs[GTPA_PEER_ADDRESS]) {
  1478. pctx->peer.addr.s_addr =
  1479. nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
  1480. } else if (info->attrs[GTPA_PEER_ADDR6]) {
  1481. pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]);
  1482. }
  1483. }
  1484. static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
  1485. {
  1486. ip_pdp_peer_fill(pctx, info);
  1487. pctx->ms.addr.s_addr =
  1488. nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
  1489. gtp_pdp_fill(pctx, info);
  1490. }
  1491. static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
  1492. {
  1493. ip_pdp_peer_fill(pctx, info);
  1494. pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
  1495. if (pctx->ms.addr6.s6_addr32[2] ||
  1496. pctx->ms.addr6.s6_addr32[3])
  1497. return false;
  1498. gtp_pdp_fill(pctx, info);
  1499. return true;
  1500. }
  1501. static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
  1502. struct genl_info *info)
  1503. {
  1504. struct pdp_ctx *pctx, *pctx_tid = NULL;
  1505. struct net_device *dev = gtp->dev;
  1506. u32 hash_ms, hash_tid = 0;
  1507. struct in6_addr ms_addr6;
  1508. unsigned int version;
  1509. bool found = false;
  1510. __be32 ms_addr;
  1511. int family;
  1512. version = nla_get_u32(info->attrs[GTPA_VERSION]);
  1513. if (info->attrs[GTPA_FAMILY])
  1514. family = nla_get_u8(info->attrs[GTPA_FAMILY]);
  1515. else
  1516. family = AF_INET;
  1517. #if !IS_ENABLED(CONFIG_IPV6)
  1518. if (family == AF_INET6)
  1519. return ERR_PTR(-EAFNOSUPPORT);
  1520. #endif
  1521. if (!info->attrs[GTPA_PEER_ADDRESS] &&
  1522. !info->attrs[GTPA_PEER_ADDR6])
  1523. return ERR_PTR(-EINVAL);
  1524. if ((info->attrs[GTPA_PEER_ADDRESS] &&
  1525. sk->sk_family == AF_INET6) ||
  1526. (info->attrs[GTPA_PEER_ADDR6] &&
  1527. sk->sk_family == AF_INET))
  1528. return ERR_PTR(-EAFNOSUPPORT);
  1529. switch (family) {
  1530. case AF_INET:
  1531. if (!info->attrs[GTPA_MS_ADDRESS] ||
  1532. info->attrs[GTPA_MS_ADDR6])
  1533. return ERR_PTR(-EINVAL);
  1534. ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
  1535. hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
  1536. pctx = ipv4_pdp_find(gtp, ms_addr);
  1537. break;
  1538. case AF_INET6:
  1539. if (!info->attrs[GTPA_MS_ADDR6] ||
  1540. info->attrs[GTPA_MS_ADDRESS])
  1541. return ERR_PTR(-EINVAL);
  1542. ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
  1543. hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size;
  1544. pctx = ipv6_pdp_find(gtp, &ms_addr6);
  1545. break;
  1546. default:
  1547. return ERR_PTR(-EAFNOSUPPORT);
  1548. }
  1549. if (pctx)
  1550. found = true;
  1551. if (version == GTP_V0)
  1552. pctx_tid = gtp0_pdp_find(gtp,
  1553. nla_get_u64(info->attrs[GTPA_TID]),
  1554. family);
  1555. else if (version == GTP_V1)
  1556. pctx_tid = gtp1_pdp_find(gtp,
  1557. nla_get_u32(info->attrs[GTPA_I_TEI]),
  1558. family);
  1559. if (pctx_tid)
  1560. found = true;
  1561. if (found) {
  1562. if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
  1563. return ERR_PTR(-EEXIST);
  1564. if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
  1565. return ERR_PTR(-EOPNOTSUPP);
  1566. if (pctx && pctx_tid)
  1567. return ERR_PTR(-EEXIST);
  1568. if (!pctx)
  1569. pctx = pctx_tid;
  1570. switch (pctx->af) {
  1571. case AF_INET:
  1572. ipv4_pdp_fill(pctx, info);
  1573. break;
  1574. case AF_INET6:
  1575. if (!ipv6_pdp_fill(pctx, info))
  1576. return ERR_PTR(-EADDRNOTAVAIL);
  1577. break;
  1578. }
  1579. if (pctx->gtp_version == GTP_V0)
  1580. netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
  1581. pctx->u.v0.tid, pctx);
  1582. else if (pctx->gtp_version == GTP_V1)
  1583. netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
  1584. pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
  1585. return pctx;
  1586. }
  1587. pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
  1588. if (pctx == NULL)
  1589. return ERR_PTR(-ENOMEM);
  1590. sock_hold(sk);
  1591. pctx->sk = sk;
  1592. pctx->dev = gtp->dev;
  1593. pctx->af = family;
  1594. switch (pctx->af) {
  1595. case AF_INET:
  1596. if (!info->attrs[GTPA_MS_ADDRESS]) {
  1597. sock_put(sk);
  1598. kfree(pctx);
  1599. return ERR_PTR(-EINVAL);
  1600. }
  1601. ipv4_pdp_fill(pctx, info);
  1602. break;
  1603. case AF_INET6:
  1604. if (!info->attrs[GTPA_MS_ADDR6]) {
  1605. sock_put(sk);
  1606. kfree(pctx);
  1607. return ERR_PTR(-EINVAL);
  1608. }
  1609. if (!ipv6_pdp_fill(pctx, info)) {
  1610. sock_put(sk);
  1611. kfree(pctx);
  1612. return ERR_PTR(-EADDRNOTAVAIL);
  1613. }
  1614. break;
  1615. }
  1616. atomic_set(&pctx->tx_seq, 0);
  1617. switch (pctx->gtp_version) {
  1618. case GTP_V0:
  1619. /* TS 09.60: "The flow label identifies unambiguously a GTP
  1620. * flow.". We use the tid for this instead, I cannot find a
  1621. * situation in which this doesn't unambiguosly identify the
  1622. * PDP context.
  1623. */
  1624. hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
  1625. break;
  1626. case GTP_V1:
  1627. hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
  1628. break;
  1629. }
  1630. hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
  1631. hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
  1632. switch (pctx->gtp_version) {
  1633. case GTP_V0:
  1634. netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
  1635. pctx->u.v0.tid, &pctx->peer.addr,
  1636. &pctx->ms.addr, pctx);
  1637. break;
  1638. case GTP_V1:
  1639. netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
  1640. pctx->u.v1.i_tei, pctx->u.v1.o_tei,
  1641. &pctx->peer.addr, &pctx->ms.addr, pctx);
  1642. break;
  1643. }
  1644. return pctx;
  1645. }
  1646. static void pdp_context_free(struct rcu_head *head)
  1647. {
  1648. struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
  1649. sock_put(pctx->sk);
  1650. kfree(pctx);
  1651. }
  1652. static void pdp_context_delete(struct pdp_ctx *pctx)
  1653. {
  1654. hlist_del_rcu(&pctx->hlist_tid);
  1655. hlist_del_rcu(&pctx->hlist_addr);
  1656. call_rcu(&pctx->rcu_head, pdp_context_free);
  1657. }
  1658. static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
  1659. static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
  1660. {
  1661. unsigned int version;
  1662. struct pdp_ctx *pctx;
  1663. struct gtp_dev *gtp;
  1664. struct sock *sk;
  1665. int err;
  1666. if (!info->attrs[GTPA_VERSION] ||
  1667. !info->attrs[GTPA_LINK])
  1668. return -EINVAL;
  1669. version = nla_get_u32(info->attrs[GTPA_VERSION]);
  1670. switch (version) {
  1671. case GTP_V0:
  1672. if (!info->attrs[GTPA_TID] ||
  1673. !info->attrs[GTPA_FLOW])
  1674. return -EINVAL;
  1675. break;
  1676. case GTP_V1:
  1677. if (!info->attrs[GTPA_I_TEI] ||
  1678. !info->attrs[GTPA_O_TEI])
  1679. return -EINVAL;
  1680. break;
  1681. default:
  1682. return -EINVAL;
  1683. }
  1684. rtnl_lock();
  1685. gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
  1686. if (!gtp) {
  1687. err = -ENODEV;
  1688. goto out_unlock;
  1689. }
  1690. if (version == GTP_V0)
  1691. sk = gtp->sk0;
  1692. else if (version == GTP_V1)
  1693. sk = gtp->sk1u;
  1694. else
  1695. sk = NULL;
  1696. if (!sk) {
  1697. err = -ENODEV;
  1698. goto out_unlock;
  1699. }
  1700. pctx = gtp_pdp_add(gtp, sk, info);
  1701. if (IS_ERR(pctx)) {
  1702. err = PTR_ERR(pctx);
  1703. } else {
  1704. gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
  1705. err = 0;
  1706. }
  1707. out_unlock:
  1708. rtnl_unlock();
  1709. return err;
  1710. }
  1711. static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
  1712. struct nlattr *nla[])
  1713. {
  1714. struct gtp_dev *gtp;
  1715. int family;
  1716. if (nla[GTPA_FAMILY])
  1717. family = nla_get_u8(nla[GTPA_FAMILY]);
  1718. else
  1719. family = AF_INET;
  1720. gtp = gtp_find_dev(net, nla);
  1721. if (!gtp)
  1722. return ERR_PTR(-ENODEV);
  1723. if (nla[GTPA_MS_ADDRESS]) {
  1724. __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
  1725. if (family != AF_INET)
  1726. return ERR_PTR(-EINVAL);
  1727. return ipv4_pdp_find(gtp, ip);
  1728. } else if (nla[GTPA_MS_ADDR6]) {
  1729. struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]);
  1730. if (family != AF_INET6)
  1731. return ERR_PTR(-EINVAL);
  1732. if (addr.s6_addr32[2] ||
  1733. addr.s6_addr32[3])
  1734. return ERR_PTR(-EADDRNOTAVAIL);
  1735. return ipv6_pdp_find(gtp, &addr);
  1736. } else if (nla[GTPA_VERSION]) {
  1737. u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
  1738. if (gtp_version == GTP_V0 && nla[GTPA_TID]) {
  1739. return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]),
  1740. family);
  1741. } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) {
  1742. return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]),
  1743. family);
  1744. }
  1745. }
  1746. return ERR_PTR(-EINVAL);
  1747. }
  1748. static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
  1749. {
  1750. struct pdp_ctx *pctx;
  1751. if (nla[GTPA_LINK])
  1752. pctx = gtp_find_pdp_by_link(net, nla);
  1753. else
  1754. pctx = ERR_PTR(-EINVAL);
  1755. if (!pctx)
  1756. pctx = ERR_PTR(-ENOENT);
  1757. return pctx;
  1758. }
  1759. static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
  1760. {
  1761. struct pdp_ctx *pctx;
  1762. int err = 0;
  1763. if (!info->attrs[GTPA_VERSION])
  1764. return -EINVAL;
  1765. rcu_read_lock();
  1766. pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
  1767. if (IS_ERR(pctx)) {
  1768. err = PTR_ERR(pctx);
  1769. goto out_unlock;
  1770. }
  1771. if (pctx->gtp_version == GTP_V0)
  1772. netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
  1773. pctx->u.v0.tid, pctx);
  1774. else if (pctx->gtp_version == GTP_V1)
  1775. netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
  1776. pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
  1777. gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
  1778. pdp_context_delete(pctx);
  1779. out_unlock:
  1780. rcu_read_unlock();
  1781. return err;
  1782. }
  1783. static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
  1784. int flags, u32 type, struct pdp_ctx *pctx)
  1785. {
  1786. void *genlh;
  1787. genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
  1788. type);
  1789. if (genlh == NULL)
  1790. goto nlmsg_failure;
  1791. if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
  1792. nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
  1793. nla_put_u8(skb, GTPA_FAMILY, pctx->af))
  1794. goto nla_put_failure;
  1795. switch (pctx->af) {
  1796. case AF_INET:
  1797. if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr))
  1798. goto nla_put_failure;
  1799. break;
  1800. case AF_INET6:
  1801. if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6))
  1802. goto nla_put_failure;
  1803. break;
  1804. }
  1805. switch (pctx->sk->sk_family) {
  1806. case AF_INET:
  1807. if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr))
  1808. goto nla_put_failure;
  1809. break;
  1810. case AF_INET6:
  1811. if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6))
  1812. goto nla_put_failure;
  1813. break;
  1814. }
  1815. switch (pctx->gtp_version) {
  1816. case GTP_V0:
  1817. if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
  1818. nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
  1819. goto nla_put_failure;
  1820. break;
  1821. case GTP_V1:
  1822. if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
  1823. nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
  1824. goto nla_put_failure;
  1825. break;
  1826. }
  1827. genlmsg_end(skb, genlh);
  1828. return 0;
  1829. nlmsg_failure:
  1830. nla_put_failure:
  1831. genlmsg_cancel(skb, genlh);
  1832. return -EMSGSIZE;
  1833. }
  1834. static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
  1835. {
  1836. struct sk_buff *msg;
  1837. int ret;
  1838. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
  1839. if (!msg)
  1840. return -ENOMEM;
  1841. ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
  1842. if (ret < 0) {
  1843. nlmsg_free(msg);
  1844. return ret;
  1845. }
  1846. ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
  1847. 0, GTP_GENL_MCGRP, GFP_ATOMIC);
  1848. return ret;
  1849. }
  1850. static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
  1851. {
  1852. struct pdp_ctx *pctx = NULL;
  1853. struct sk_buff *skb2;
  1854. int err;
  1855. if (!info->attrs[GTPA_VERSION])
  1856. return -EINVAL;
  1857. rcu_read_lock();
  1858. pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
  1859. if (IS_ERR(pctx)) {
  1860. err = PTR_ERR(pctx);
  1861. goto err_unlock;
  1862. }
  1863. skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
  1864. if (skb2 == NULL) {
  1865. err = -ENOMEM;
  1866. goto err_unlock;
  1867. }
  1868. err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
  1869. 0, info->nlhdr->nlmsg_type, pctx);
  1870. if (err < 0)
  1871. goto err_unlock_free;
  1872. rcu_read_unlock();
  1873. return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
  1874. err_unlock_free:
  1875. kfree_skb(skb2);
  1876. err_unlock:
  1877. rcu_read_unlock();
  1878. return err;
  1879. }
  1880. static int gtp_genl_dump_pdp(struct sk_buff *skb,
  1881. struct netlink_callback *cb)
  1882. {
  1883. struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
  1884. int i, j, bucket = cb->args[0], skip = cb->args[1];
  1885. struct net *net = sock_net(skb->sk);
  1886. struct net_device *dev;
  1887. struct pdp_ctx *pctx;
  1888. if (cb->args[4])
  1889. return 0;
  1890. rcu_read_lock();
  1891. for_each_netdev_rcu(net, dev) {
  1892. if (dev->rtnl_link_ops != &gtp_link_ops)
  1893. continue;
  1894. gtp = netdev_priv(dev);
  1895. if (last_gtp && last_gtp != gtp)
  1896. continue;
  1897. else
  1898. last_gtp = NULL;
  1899. for (i = bucket; i < gtp->hash_size; i++) {
  1900. j = 0;
  1901. hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
  1902. hlist_tid) {
  1903. if (j >= skip &&
  1904. gtp_genl_fill_info(skb,
  1905. NETLINK_CB(cb->skb).portid,
  1906. cb->nlh->nlmsg_seq,
  1907. NLM_F_MULTI,
  1908. cb->nlh->nlmsg_type, pctx)) {
  1909. cb->args[0] = i;
  1910. cb->args[1] = j;
  1911. cb->args[2] = (unsigned long)gtp;
  1912. goto out;
  1913. }
  1914. j++;
  1915. }
  1916. skip = 0;
  1917. }
  1918. bucket = 0;
  1919. }
  1920. cb->args[4] = 1;
  1921. out:
  1922. rcu_read_unlock();
  1923. return skb->len;
  1924. }
  1925. static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
  1926. {
  1927. struct sk_buff *skb_to_send;
  1928. __be32 src_ip, dst_ip;
  1929. unsigned int version;
  1930. struct gtp_dev *gtp;
  1931. struct flowi4 fl4;
  1932. struct rtable *rt;
  1933. struct sock *sk;
  1934. __be16 port;
  1935. int len;
  1936. if (!info->attrs[GTPA_VERSION] ||
  1937. !info->attrs[GTPA_LINK] ||
  1938. !info->attrs[GTPA_PEER_ADDRESS] ||
  1939. !info->attrs[GTPA_MS_ADDRESS])
  1940. return -EINVAL;
  1941. version = nla_get_u32(info->attrs[GTPA_VERSION]);
  1942. dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
  1943. src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
  1944. gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
  1945. if (!gtp)
  1946. return -ENODEV;
  1947. if (!gtp->sk_created)
  1948. return -EOPNOTSUPP;
  1949. if (!(gtp->dev->flags & IFF_UP))
  1950. return -ENETDOWN;
  1951. if (version == GTP_V0) {
  1952. struct gtp0_header *gtp0_h;
  1953. len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
  1954. sizeof(struct iphdr) + sizeof(struct udphdr);
  1955. skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
  1956. if (!skb_to_send)
  1957. return -ENOMEM;
  1958. sk = gtp->sk0;
  1959. port = htons(GTP0_PORT);
  1960. gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
  1961. memset(gtp0_h, 0, sizeof(struct gtp0_header));
  1962. gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
  1963. } else if (version == GTP_V1) {
  1964. struct gtp1_header_long *gtp1u_h;
  1965. len = LL_RESERVED_SPACE(gtp->dev) +
  1966. sizeof(struct gtp1_header_long) +
  1967. sizeof(struct iphdr) + sizeof(struct udphdr);
  1968. skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
  1969. if (!skb_to_send)
  1970. return -ENOMEM;
  1971. sk = gtp->sk1u;
  1972. port = htons(GTP1U_PORT);
  1973. gtp1u_h = skb_push(skb_to_send,
  1974. sizeof(struct gtp1_header_long));
  1975. memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
  1976. gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
  1977. } else {
  1978. return -ENODEV;
  1979. }
  1980. rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
  1981. if (IS_ERR(rt)) {
  1982. netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
  1983. &dst_ip);
  1984. kfree_skb(skb_to_send);
  1985. return -ENODEV;
  1986. }
  1987. udp_tunnel_xmit_skb(rt, sk, skb_to_send,
  1988. fl4.saddr, fl4.daddr,
  1989. fl4.flowi4_tos,
  1990. ip4_dst_hoplimit(&rt->dst),
  1991. 0,
  1992. port, port,
  1993. !net_eq(sock_net(sk),
  1994. dev_net(gtp->dev)),
  1995. false);
  1996. return 0;
  1997. }
  1998. static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
  1999. [GTPA_LINK] = { .type = NLA_U32, },
  2000. [GTPA_VERSION] = { .type = NLA_U32, },
  2001. [GTPA_TID] = { .type = NLA_U64, },
  2002. [GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
  2003. [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
  2004. [GTPA_FLOW] = { .type = NLA_U16, },
  2005. [GTPA_NET_NS_FD] = { .type = NLA_U32, },
  2006. [GTPA_I_TEI] = { .type = NLA_U32, },
  2007. [GTPA_O_TEI] = { .type = NLA_U32, },
  2008. [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), },
  2009. [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), },
  2010. [GTPA_FAMILY] = { .type = NLA_U8, },
  2011. };
  2012. static const struct genl_small_ops gtp_genl_ops[] = {
  2013. {
  2014. .cmd = GTP_CMD_NEWPDP,
  2015. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2016. .doit = gtp_genl_new_pdp,
  2017. .flags = GENL_ADMIN_PERM,
  2018. },
  2019. {
  2020. .cmd = GTP_CMD_DELPDP,
  2021. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2022. .doit = gtp_genl_del_pdp,
  2023. .flags = GENL_ADMIN_PERM,
  2024. },
  2025. {
  2026. .cmd = GTP_CMD_GETPDP,
  2027. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2028. .doit = gtp_genl_get_pdp,
  2029. .dumpit = gtp_genl_dump_pdp,
  2030. .flags = GENL_ADMIN_PERM,
  2031. },
  2032. {
  2033. .cmd = GTP_CMD_ECHOREQ,
  2034. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2035. .doit = gtp_genl_send_echo_req,
  2036. .flags = GENL_ADMIN_PERM,
  2037. },
  2038. };
  2039. static struct genl_family gtp_genl_family __ro_after_init = {
  2040. .name = "gtp",
  2041. .version = 0,
  2042. .hdrsize = 0,
  2043. .maxattr = GTPA_MAX,
  2044. .policy = gtp_genl_policy,
  2045. .netnsok = true,
  2046. .module = THIS_MODULE,
  2047. .small_ops = gtp_genl_ops,
  2048. .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
  2049. .resv_start_op = GTP_CMD_ECHOREQ + 1,
  2050. .mcgrps = gtp_genl_mcgrps,
  2051. .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
  2052. };
  2053. static int __net_init gtp_net_init(struct net *net)
  2054. {
  2055. struct gtp_net *gn = net_generic(net, gtp_net_id);
  2056. INIT_LIST_HEAD(&gn->gtp_dev_list);
  2057. return 0;
  2058. }
  2059. static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
  2060. struct list_head *dev_to_kill)
  2061. {
  2062. struct net *net;
  2063. list_for_each_entry(net, net_list, exit_list) {
  2064. struct gtp_net *gn = net_generic(net, gtp_net_id);
  2065. struct gtp_dev *gtp, *gtp_next;
  2066. list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
  2067. gtp_dellink(gtp->dev, dev_to_kill);
  2068. }
  2069. }
  2070. static struct pernet_operations gtp_net_ops = {
  2071. .init = gtp_net_init,
  2072. .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
  2073. .id = &gtp_net_id,
  2074. .size = sizeof(struct gtp_net),
  2075. };
  2076. static int __init gtp_init(void)
  2077. {
  2078. int err;
  2079. get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
  2080. err = register_pernet_subsys(&gtp_net_ops);
  2081. if (err < 0)
  2082. goto error_out;
  2083. err = rtnl_link_register(&gtp_link_ops);
  2084. if (err < 0)
  2085. goto unreg_pernet_subsys;
  2086. err = genl_register_family(&gtp_genl_family);
  2087. if (err < 0)
  2088. goto unreg_rtnl_link;
  2089. pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
  2090. sizeof(struct pdp_ctx));
  2091. return 0;
  2092. unreg_rtnl_link:
  2093. rtnl_link_unregister(&gtp_link_ops);
  2094. unreg_pernet_subsys:
  2095. unregister_pernet_subsys(&gtp_net_ops);
  2096. error_out:
  2097. pr_err("error loading GTP module loaded\n");
  2098. return err;
  2099. }
  2100. late_initcall(gtp_init);
  2101. static void __exit gtp_fini(void)
  2102. {
  2103. genl_unregister_family(&gtp_genl_family);
  2104. rtnl_link_unregister(&gtp_link_ops);
  2105. unregister_pernet_subsys(&gtp_net_ops);
  2106. pr_info("GTP module unloaded\n");
  2107. }
  2108. module_exit(gtp_fini);
  2109. MODULE_LICENSE("GPL");
  2110. MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
  2111. MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
  2112. MODULE_ALIAS_RTNL_LINK("gtp");
  2113. MODULE_ALIAS_GENL_FAMILY("gtp");