cls_flower.c 110 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/sched/cls_flower.c Flower classifier
  4. *
  5. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/rhashtable.h>
  11. #include <linux/workqueue.h>
  12. #include <linux/refcount.h>
  13. #include <linux/bitfield.h>
  14. #include <linux/if_ether.h>
  15. #include <linux/in6.h>
  16. #include <linux/ip.h>
  17. #include <linux/mpls.h>
  18. #include <linux/ppp_defs.h>
  19. #include <net/sch_generic.h>
  20. #include <net/pkt_cls.h>
  21. #include <net/pkt_sched.h>
  22. #include <net/ip.h>
  23. #include <net/flow_dissector.h>
  24. #include <net/geneve.h>
  25. #include <net/vxlan.h>
  26. #include <net/erspan.h>
  27. #include <net/gtp.h>
  28. #include <net/pfcp.h>
  29. #include <net/tc_wrapper.h>
  30. #include <net/dst.h>
  31. #include <net/dst_metadata.h>
  32. #include <uapi/linux/netfilter/nf_conntrack_common.h>
  33. #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
  34. ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
  35. #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
  36. (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
  37. #define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \
  38. (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \
  39. TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST)
  40. #define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \
  41. (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \
  42. TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \
  43. TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \
  44. TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT)
  45. struct fl_flow_key {
  46. struct flow_dissector_key_meta meta;
  47. struct flow_dissector_key_control control;
  48. struct flow_dissector_key_control enc_control;
  49. struct flow_dissector_key_basic basic;
  50. struct flow_dissector_key_eth_addrs eth;
  51. struct flow_dissector_key_vlan vlan;
  52. struct flow_dissector_key_vlan cvlan;
  53. union {
  54. struct flow_dissector_key_ipv4_addrs ipv4;
  55. struct flow_dissector_key_ipv6_addrs ipv6;
  56. };
  57. struct flow_dissector_key_ports tp;
  58. struct flow_dissector_key_icmp icmp;
  59. struct flow_dissector_key_arp arp;
  60. struct flow_dissector_key_keyid enc_key_id;
  61. union {
  62. struct flow_dissector_key_ipv4_addrs enc_ipv4;
  63. struct flow_dissector_key_ipv6_addrs enc_ipv6;
  64. };
  65. struct flow_dissector_key_ports enc_tp;
  66. struct flow_dissector_key_mpls mpls;
  67. struct flow_dissector_key_tcp tcp;
  68. struct flow_dissector_key_ip ip;
  69. struct flow_dissector_key_ip enc_ip;
  70. struct flow_dissector_key_enc_opts enc_opts;
  71. struct flow_dissector_key_ports_range tp_range;
  72. struct flow_dissector_key_ct ct;
  73. struct flow_dissector_key_hash hash;
  74. struct flow_dissector_key_num_of_vlans num_of_vlans;
  75. struct flow_dissector_key_pppoe pppoe;
  76. struct flow_dissector_key_l2tpv3 l2tpv3;
  77. struct flow_dissector_key_ipsec ipsec;
  78. struct flow_dissector_key_cfm cfm;
  79. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  80. struct fl_flow_mask_range {
  81. unsigned short int start;
  82. unsigned short int end;
  83. };
  84. struct fl_flow_mask {
  85. struct fl_flow_key key;
  86. struct fl_flow_mask_range range;
  87. u32 flags;
  88. struct rhash_head ht_node;
  89. struct rhashtable ht;
  90. struct rhashtable_params filter_ht_params;
  91. struct flow_dissector dissector;
  92. struct list_head filters;
  93. struct rcu_work rwork;
  94. struct list_head list;
  95. refcount_t refcnt;
  96. };
  97. struct fl_flow_tmplt {
  98. struct fl_flow_key dummy_key;
  99. struct fl_flow_key mask;
  100. struct flow_dissector dissector;
  101. struct tcf_chain *chain;
  102. };
  103. struct cls_fl_head {
  104. struct rhashtable ht;
  105. spinlock_t masks_lock; /* Protect masks list */
  106. struct list_head masks;
  107. struct list_head hw_filters;
  108. struct rcu_work rwork;
  109. struct idr handle_idr;
  110. };
  111. struct cls_fl_filter {
  112. struct fl_flow_mask *mask;
  113. struct rhash_head ht_node;
  114. struct fl_flow_key mkey;
  115. struct tcf_exts exts;
  116. struct tcf_result res;
  117. struct fl_flow_key key;
  118. struct list_head list;
  119. struct list_head hw_list;
  120. u32 handle;
  121. u32 flags;
  122. u32 in_hw_count;
  123. u8 needs_tc_skb_ext:1;
  124. struct rcu_work rwork;
  125. struct net_device *hw_dev;
  126. /* Flower classifier is unlocked, which means that its reference counter
  127. * can be changed concurrently without any kind of external
  128. * synchronization. Use atomic reference counter to be concurrency-safe.
  129. */
  130. refcount_t refcnt;
  131. bool deleted;
  132. };
  133. static const struct rhashtable_params mask_ht_params = {
  134. .key_offset = offsetof(struct fl_flow_mask, key),
  135. .key_len = sizeof(struct fl_flow_key),
  136. .head_offset = offsetof(struct fl_flow_mask, ht_node),
  137. .automatic_shrinking = true,
  138. };
  139. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  140. {
  141. return mask->range.end - mask->range.start;
  142. }
  143. static void fl_mask_update_range(struct fl_flow_mask *mask)
  144. {
  145. const u8 *bytes = (const u8 *) &mask->key;
  146. size_t size = sizeof(mask->key);
  147. size_t i, first = 0, last;
  148. for (i = 0; i < size; i++) {
  149. if (bytes[i]) {
  150. first = i;
  151. break;
  152. }
  153. }
  154. last = first;
  155. for (i = size - 1; i != first; i--) {
  156. if (bytes[i]) {
  157. last = i;
  158. break;
  159. }
  160. }
  161. mask->range.start = rounddown(first, sizeof(long));
  162. mask->range.end = roundup(last + 1, sizeof(long));
  163. }
  164. static void *fl_key_get_start(struct fl_flow_key *key,
  165. const struct fl_flow_mask *mask)
  166. {
  167. return (u8 *) key + mask->range.start;
  168. }
  169. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  170. struct fl_flow_mask *mask)
  171. {
  172. const long *lkey = fl_key_get_start(key, mask);
  173. const long *lmask = fl_key_get_start(&mask->key, mask);
  174. long *lmkey = fl_key_get_start(mkey, mask);
  175. int i;
  176. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  177. *lmkey++ = *lkey++ & *lmask++;
  178. }
  179. static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
  180. struct fl_flow_mask *mask)
  181. {
  182. const long *lmask = fl_key_get_start(&mask->key, mask);
  183. const long *ltmplt;
  184. int i;
  185. if (!tmplt)
  186. return true;
  187. ltmplt = fl_key_get_start(&tmplt->mask, mask);
  188. for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
  189. if (~*ltmplt++ & *lmask++)
  190. return false;
  191. }
  192. return true;
  193. }
  194. static void fl_clear_masked_range(struct fl_flow_key *key,
  195. struct fl_flow_mask *mask)
  196. {
  197. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  198. }
  199. static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
  200. struct fl_flow_key *key,
  201. struct fl_flow_key *mkey)
  202. {
  203. u16 min_mask, max_mask, min_val, max_val;
  204. min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
  205. max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
  206. min_val = ntohs(filter->key.tp_range.tp_min.dst);
  207. max_val = ntohs(filter->key.tp_range.tp_max.dst);
  208. if (min_mask && max_mask) {
  209. if (ntohs(key->tp_range.tp.dst) < min_val ||
  210. ntohs(key->tp_range.tp.dst) > max_val)
  211. return false;
  212. /* skb does not have min and max values */
  213. mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
  214. mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
  215. }
  216. return true;
  217. }
  218. static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
  219. struct fl_flow_key *key,
  220. struct fl_flow_key *mkey)
  221. {
  222. u16 min_mask, max_mask, min_val, max_val;
  223. min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
  224. max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
  225. min_val = ntohs(filter->key.tp_range.tp_min.src);
  226. max_val = ntohs(filter->key.tp_range.tp_max.src);
  227. if (min_mask && max_mask) {
  228. if (ntohs(key->tp_range.tp.src) < min_val ||
  229. ntohs(key->tp_range.tp.src) > max_val)
  230. return false;
  231. /* skb does not have min and max values */
  232. mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
  233. mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
  234. }
  235. return true;
  236. }
  237. static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
  238. struct fl_flow_key *mkey)
  239. {
  240. return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
  241. mask->filter_ht_params);
  242. }
  243. static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
  244. struct fl_flow_key *mkey,
  245. struct fl_flow_key *key)
  246. {
  247. struct cls_fl_filter *filter, *f;
  248. list_for_each_entry_rcu(filter, &mask->filters, list) {
  249. if (!fl_range_port_dst_cmp(filter, key, mkey))
  250. continue;
  251. if (!fl_range_port_src_cmp(filter, key, mkey))
  252. continue;
  253. f = __fl_lookup(mask, mkey);
  254. if (f)
  255. return f;
  256. }
  257. return NULL;
  258. }
  259. static noinline_for_stack
  260. struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
  261. {
  262. struct fl_flow_key mkey;
  263. fl_set_masked_key(&mkey, key, mask);
  264. if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
  265. return fl_lookup_range(mask, &mkey, key);
  266. return __fl_lookup(mask, &mkey);
  267. }
  268. static u16 fl_ct_info_to_flower_map[] = {
  269. [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  270. TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
  271. [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  272. TCA_FLOWER_KEY_CT_FLAGS_RELATED,
  273. [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  274. TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
  275. TCA_FLOWER_KEY_CT_FLAGS_REPLY,
  276. [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  277. TCA_FLOWER_KEY_CT_FLAGS_RELATED |
  278. TCA_FLOWER_KEY_CT_FLAGS_REPLY,
  279. [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  280. TCA_FLOWER_KEY_CT_FLAGS_NEW,
  281. };
  282. TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
  283. const struct tcf_proto *tp,
  284. struct tcf_result *res)
  285. {
  286. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  287. bool post_ct = tc_skb_cb(skb)->post_ct;
  288. u16 zone = tc_skb_cb(skb)->zone;
  289. struct fl_flow_key skb_key;
  290. struct fl_flow_mask *mask;
  291. struct cls_fl_filter *f;
  292. list_for_each_entry_rcu(mask, &head->masks, list) {
  293. flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
  294. fl_clear_masked_range(&skb_key, mask);
  295. skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
  296. /* skb_flow_dissect() does not set n_proto in case an unknown
  297. * protocol, so do it rather here.
  298. */
  299. skb_key.basic.n_proto = skb_protocol(skb, false);
  300. skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
  301. skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
  302. fl_ct_info_to_flower_map,
  303. ARRAY_SIZE(fl_ct_info_to_flower_map),
  304. post_ct, zone);
  305. skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
  306. skb_flow_dissect(skb, &mask->dissector, &skb_key,
  307. FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
  308. f = fl_mask_lookup(mask, &skb_key);
  309. if (f && !tc_skip_sw(f->flags)) {
  310. *res = f->res;
  311. return tcf_exts_exec(skb, &f->exts, res);
  312. }
  313. }
  314. return -1;
  315. }
  316. static int fl_init(struct tcf_proto *tp)
  317. {
  318. struct cls_fl_head *head;
  319. head = kzalloc(sizeof(*head), GFP_KERNEL);
  320. if (!head)
  321. return -ENOBUFS;
  322. spin_lock_init(&head->masks_lock);
  323. INIT_LIST_HEAD_RCU(&head->masks);
  324. INIT_LIST_HEAD(&head->hw_filters);
  325. rcu_assign_pointer(tp->root, head);
  326. idr_init(&head->handle_idr);
  327. return rhashtable_init(&head->ht, &mask_ht_params);
  328. }
  329. static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
  330. {
  331. /* temporary masks don't have their filters list and ht initialized */
  332. if (mask_init_done) {
  333. WARN_ON(!list_empty(&mask->filters));
  334. rhashtable_destroy(&mask->ht);
  335. }
  336. kfree(mask);
  337. }
  338. static void fl_mask_free_work(struct work_struct *work)
  339. {
  340. struct fl_flow_mask *mask = container_of(to_rcu_work(work),
  341. struct fl_flow_mask, rwork);
  342. fl_mask_free(mask, true);
  343. }
  344. static void fl_uninit_mask_free_work(struct work_struct *work)
  345. {
  346. struct fl_flow_mask *mask = container_of(to_rcu_work(work),
  347. struct fl_flow_mask, rwork);
  348. fl_mask_free(mask, false);
  349. }
  350. static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
  351. {
  352. if (!refcount_dec_and_test(&mask->refcnt))
  353. return false;
  354. rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
  355. spin_lock(&head->masks_lock);
  356. list_del_rcu(&mask->list);
  357. spin_unlock(&head->masks_lock);
  358. tcf_queue_work(&mask->rwork, fl_mask_free_work);
  359. return true;
  360. }
  361. static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
  362. {
  363. /* Flower classifier only changes root pointer during init and destroy.
  364. * Users must obtain reference to tcf_proto instance before calling its
  365. * API, so tp->root pointer is protected from concurrent call to
  366. * fl_destroy() by reference counting.
  367. */
  368. return rcu_dereference_raw(tp->root);
  369. }
  370. static void __fl_destroy_filter(struct cls_fl_filter *f)
  371. {
  372. if (f->needs_tc_skb_ext)
  373. tc_skb_ext_tc_disable();
  374. tcf_exts_destroy(&f->exts);
  375. tcf_exts_put_net(&f->exts);
  376. kfree(f);
  377. }
  378. static void fl_destroy_filter_work(struct work_struct *work)
  379. {
  380. struct cls_fl_filter *f = container_of(to_rcu_work(work),
  381. struct cls_fl_filter, rwork);
  382. __fl_destroy_filter(f);
  383. }
  384. static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
  385. bool rtnl_held, struct netlink_ext_ack *extack)
  386. {
  387. struct tcf_block *block = tp->chain->block;
  388. struct flow_cls_offload cls_flower = {};
  389. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
  390. cls_flower.command = FLOW_CLS_DESTROY;
  391. cls_flower.cookie = (unsigned long) f;
  392. tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
  393. &f->flags, &f->in_hw_count, rtnl_held);
  394. }
  395. static int fl_hw_replace_filter(struct tcf_proto *tp,
  396. struct cls_fl_filter *f, bool rtnl_held,
  397. struct netlink_ext_ack *extack)
  398. {
  399. struct tcf_block *block = tp->chain->block;
  400. struct flow_cls_offload cls_flower = {};
  401. bool skip_sw = tc_skip_sw(f->flags);
  402. int err = 0;
  403. cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
  404. if (!cls_flower.rule)
  405. return -ENOMEM;
  406. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
  407. cls_flower.command = FLOW_CLS_REPLACE;
  408. cls_flower.cookie = (unsigned long) f;
  409. cls_flower.rule->match.dissector = &f->mask->dissector;
  410. cls_flower.rule->match.mask = &f->mask->key;
  411. cls_flower.rule->match.key = &f->mkey;
  412. cls_flower.classid = f->res.classid;
  413. err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
  414. cls_flower.common.extack);
  415. if (err) {
  416. kfree(cls_flower.rule);
  417. return skip_sw ? err : 0;
  418. }
  419. err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
  420. skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
  421. tc_cleanup_offload_action(&cls_flower.rule->action);
  422. kfree(cls_flower.rule);
  423. if (err) {
  424. fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
  425. return err;
  426. }
  427. if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
  428. return -EINVAL;
  429. return 0;
  430. }
  431. static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
  432. bool rtnl_held)
  433. {
  434. struct tcf_block *block = tp->chain->block;
  435. struct flow_cls_offload cls_flower = {};
  436. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
  437. cls_flower.command = FLOW_CLS_STATS;
  438. cls_flower.cookie = (unsigned long) f;
  439. cls_flower.classid = f->res.classid;
  440. tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
  441. rtnl_held);
  442. tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
  443. }
  444. static void __fl_put(struct cls_fl_filter *f)
  445. {
  446. if (!refcount_dec_and_test(&f->refcnt))
  447. return;
  448. if (tcf_exts_get_net(&f->exts))
  449. tcf_queue_work(&f->rwork, fl_destroy_filter_work);
  450. else
  451. __fl_destroy_filter(f);
  452. }
  453. static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
  454. {
  455. struct cls_fl_filter *f;
  456. rcu_read_lock();
  457. f = idr_find(&head->handle_idr, handle);
  458. if (f && !refcount_inc_not_zero(&f->refcnt))
  459. f = NULL;
  460. rcu_read_unlock();
  461. return f;
  462. }
  463. static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
  464. {
  465. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  466. struct cls_fl_filter *f;
  467. f = idr_find(&head->handle_idr, handle);
  468. return f ? &f->exts : NULL;
  469. }
  470. static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
  471. bool *last, bool rtnl_held,
  472. struct netlink_ext_ack *extack)
  473. {
  474. struct cls_fl_head *head = fl_head_dereference(tp);
  475. *last = false;
  476. spin_lock(&tp->lock);
  477. if (f->deleted) {
  478. spin_unlock(&tp->lock);
  479. return -ENOENT;
  480. }
  481. f->deleted = true;
  482. rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
  483. f->mask->filter_ht_params);
  484. idr_remove(&head->handle_idr, f->handle);
  485. list_del_rcu(&f->list);
  486. spin_unlock(&tp->lock);
  487. *last = fl_mask_put(head, f->mask);
  488. if (!tc_skip_hw(f->flags))
  489. fl_hw_destroy_filter(tp, f, rtnl_held, extack);
  490. tcf_unbind_filter(tp, &f->res);
  491. __fl_put(f);
  492. return 0;
  493. }
  494. static void fl_destroy_sleepable(struct work_struct *work)
  495. {
  496. struct cls_fl_head *head = container_of(to_rcu_work(work),
  497. struct cls_fl_head,
  498. rwork);
  499. rhashtable_destroy(&head->ht);
  500. kfree(head);
  501. module_put(THIS_MODULE);
  502. }
  503. static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
  504. struct netlink_ext_ack *extack)
  505. {
  506. struct cls_fl_head *head = fl_head_dereference(tp);
  507. struct fl_flow_mask *mask, *next_mask;
  508. struct cls_fl_filter *f, *next;
  509. bool last;
  510. list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
  511. list_for_each_entry_safe(f, next, &mask->filters, list) {
  512. __fl_delete(tp, f, &last, rtnl_held, extack);
  513. if (last)
  514. break;
  515. }
  516. }
  517. idr_destroy(&head->handle_idr);
  518. __module_get(THIS_MODULE);
  519. tcf_queue_work(&head->rwork, fl_destroy_sleepable);
  520. }
  521. static void fl_put(struct tcf_proto *tp, void *arg)
  522. {
  523. struct cls_fl_filter *f = arg;
  524. __fl_put(f);
  525. }
  526. static void *fl_get(struct tcf_proto *tp, u32 handle)
  527. {
  528. struct cls_fl_head *head = fl_head_dereference(tp);
  529. return __fl_get(head, handle);
  530. }
  531. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  532. [TCA_FLOWER_UNSPEC] = { .strict_start_type =
  533. TCA_FLOWER_L2_MISS },
  534. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  535. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  536. .len = IFNAMSIZ },
  537. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  538. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  539. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  540. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  541. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  542. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  543. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  544. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  545. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  546. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  547. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  548. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  549. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  550. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  551. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  552. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  553. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  554. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  555. [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
  556. [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
  557. [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
  558. [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
  559. [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
  560. [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
  561. [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
  562. [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
  563. [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  564. [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  565. [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  566. [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  567. [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
  568. [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
  569. [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
  570. [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
  571. [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
  572. [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
  573. [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
  574. [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
  575. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
  576. [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
  577. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
  578. [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
  579. [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
  580. TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
  581. [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
  582. TCA_FLOWER_KEY_FLAGS_POLICY_MASK),
  583. [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
  584. [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
  585. [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
  586. [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
  587. [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
  588. [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
  589. [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
  590. [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
  591. [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
  592. [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
  593. [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
  594. [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
  595. [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
  596. [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
  597. [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
  598. [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
  599. [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
  600. [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
  601. [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
  602. [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
  603. [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
  604. [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
  605. [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
  606. [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
  607. [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
  608. [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
  609. [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
  610. [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
  611. [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
  612. [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
  613. [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
  614. [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
  615. [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
  616. [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
  617. [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
  618. [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
  619. [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
  620. [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
  621. [TCA_FLOWER_KEY_CT_STATE] =
  622. NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
  623. [TCA_FLOWER_KEY_CT_STATE_MASK] =
  624. NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
  625. [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
  626. [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
  627. [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
  628. [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
  629. [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
  630. .len = 128 / BITS_PER_BYTE },
  631. [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
  632. .len = 128 / BITS_PER_BYTE },
  633. [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
  634. [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
  635. [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
  636. [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
  637. [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
  638. [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
  639. [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
  640. [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 },
  641. [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 },
  642. [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
  643. [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
  644. [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32,
  645. TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
  646. [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32,
  647. TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK),
  648. };
  649. static const struct nla_policy
  650. enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
  651. [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
  652. .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
  653. [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
  654. [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
  655. [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
  656. [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
  657. [TCA_FLOWER_KEY_ENC_OPTS_PFCP] = { .type = NLA_NESTED },
  658. };
  659. static const struct nla_policy
  660. geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
  661. [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
  662. [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
  663. [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
  664. .len = 128 },
  665. };
  666. static const struct nla_policy
  667. vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
  668. [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
  669. };
  670. static const struct nla_policy
  671. erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
  672. [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
  673. [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
  674. [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
  675. [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
  676. };
  677. static const struct nla_policy
  678. gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
  679. [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
  680. [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
  681. };
  682. static const struct nla_policy
  683. pfcp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1] = {
  684. [TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE] = { .type = NLA_U8 },
  685. [TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID] = { .type = NLA_U64 },
  686. };
  687. static const struct nla_policy
  688. mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
  689. [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
  690. [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
  691. [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
  692. [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
  693. [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
  694. };
  695. static const struct nla_policy
  696. cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = {
  697. [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8,
  698. FLOW_DIS_CFM_MDL_MAX),
  699. [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 },
  700. };
  701. static void fl_set_key_val(struct nlattr **tb,
  702. void *val, int val_type,
  703. void *mask, int mask_type, int len)
  704. {
  705. if (!tb[val_type])
  706. return;
  707. nla_memcpy(val, tb[val_type], len);
  708. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  709. memset(mask, 0xff, len);
  710. else
  711. nla_memcpy(mask, tb[mask_type], len);
  712. }
  713. static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key,
  714. struct fl_flow_key *mask,
  715. struct netlink_ext_ack *extack)
  716. {
  717. if (key->basic.ip_proto != IPPROTO_ESP &&
  718. key->basic.ip_proto != IPPROTO_AH) {
  719. NL_SET_ERR_MSG(extack,
  720. "Protocol must be either ESP or AH");
  721. return -EINVAL;
  722. }
  723. fl_set_key_val(tb, &key->ipsec.spi,
  724. TCA_FLOWER_KEY_SPI,
  725. &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
  726. sizeof(key->ipsec.spi));
  727. return 0;
  728. }
  729. static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
  730. struct fl_flow_key *mask,
  731. struct netlink_ext_ack *extack)
  732. {
  733. fl_set_key_val(tb, &key->tp_range.tp_min.dst,
  734. TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
  735. TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
  736. fl_set_key_val(tb, &key->tp_range.tp_max.dst,
  737. TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
  738. TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
  739. fl_set_key_val(tb, &key->tp_range.tp_min.src,
  740. TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
  741. TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
  742. fl_set_key_val(tb, &key->tp_range.tp_max.src,
  743. TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
  744. TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
  745. if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
  746. NL_SET_ERR_MSG(extack,
  747. "Both min and max destination ports must be specified");
  748. return -EINVAL;
  749. }
  750. if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
  751. NL_SET_ERR_MSG(extack,
  752. "Both min and max source ports must be specified");
  753. return -EINVAL;
  754. }
  755. if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
  756. ntohs(key->tp_range.tp_max.dst) <=
  757. ntohs(key->tp_range.tp_min.dst)) {
  758. NL_SET_ERR_MSG_ATTR(extack,
  759. tb[TCA_FLOWER_KEY_PORT_DST_MIN],
  760. "Invalid destination port range (min must be strictly smaller than max)");
  761. return -EINVAL;
  762. }
  763. if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
  764. ntohs(key->tp_range.tp_max.src) <=
  765. ntohs(key->tp_range.tp_min.src)) {
  766. NL_SET_ERR_MSG_ATTR(extack,
  767. tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
  768. "Invalid source port range (min must be strictly smaller than max)");
  769. return -EINVAL;
  770. }
  771. return 0;
  772. }
  773. static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
  774. struct flow_dissector_key_mpls *key_val,
  775. struct flow_dissector_key_mpls *key_mask,
  776. struct netlink_ext_ack *extack)
  777. {
  778. struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
  779. struct flow_dissector_mpls_lse *lse_mask;
  780. struct flow_dissector_mpls_lse *lse_val;
  781. u8 lse_index;
  782. u8 depth;
  783. int err;
  784. err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
  785. mpls_stack_entry_policy, extack);
  786. if (err < 0)
  787. return err;
  788. if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
  789. NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
  790. return -EINVAL;
  791. }
  792. depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
  793. /* LSE depth starts at 1, for consistency with terminology used by
  794. * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
  795. */
  796. if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
  797. NL_SET_ERR_MSG_ATTR(extack,
  798. tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
  799. "Invalid MPLS depth");
  800. return -EINVAL;
  801. }
  802. lse_index = depth - 1;
  803. dissector_set_mpls_lse(key_val, lse_index);
  804. dissector_set_mpls_lse(key_mask, lse_index);
  805. lse_val = &key_val->ls[lse_index];
  806. lse_mask = &key_mask->ls[lse_index];
  807. if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
  808. lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
  809. lse_mask->mpls_ttl = MPLS_TTL_MASK;
  810. }
  811. if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
  812. u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
  813. if (bos & ~MPLS_BOS_MASK) {
  814. NL_SET_ERR_MSG_ATTR(extack,
  815. tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
  816. "Bottom Of Stack (BOS) must be 0 or 1");
  817. return -EINVAL;
  818. }
  819. lse_val->mpls_bos = bos;
  820. lse_mask->mpls_bos = MPLS_BOS_MASK;
  821. }
  822. if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
  823. u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
  824. if (tc & ~MPLS_TC_MASK) {
  825. NL_SET_ERR_MSG_ATTR(extack,
  826. tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
  827. "Traffic Class (TC) must be between 0 and 7");
  828. return -EINVAL;
  829. }
  830. lse_val->mpls_tc = tc;
  831. lse_mask->mpls_tc = MPLS_TC_MASK;
  832. }
  833. if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
  834. u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
  835. if (label & ~MPLS_LABEL_MASK) {
  836. NL_SET_ERR_MSG_ATTR(extack,
  837. tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
  838. "Label must be between 0 and 1048575");
  839. return -EINVAL;
  840. }
  841. lse_val->mpls_label = label;
  842. lse_mask->mpls_label = MPLS_LABEL_MASK;
  843. }
  844. return 0;
  845. }
  846. static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
  847. struct flow_dissector_key_mpls *key_val,
  848. struct flow_dissector_key_mpls *key_mask,
  849. struct netlink_ext_ack *extack)
  850. {
  851. struct nlattr *nla_lse;
  852. int rem;
  853. int err;
  854. if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
  855. NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
  856. "NLA_F_NESTED is missing");
  857. return -EINVAL;
  858. }
  859. nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
  860. if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
  861. NL_SET_ERR_MSG_ATTR(extack, nla_lse,
  862. "Invalid MPLS option type");
  863. return -EINVAL;
  864. }
  865. err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
  866. if (err < 0)
  867. return err;
  868. }
  869. if (rem) {
  870. NL_SET_ERR_MSG(extack,
  871. "Bytes leftover after parsing MPLS options");
  872. return -EINVAL;
  873. }
  874. return 0;
  875. }
  876. static int fl_set_key_mpls(struct nlattr **tb,
  877. struct flow_dissector_key_mpls *key_val,
  878. struct flow_dissector_key_mpls *key_mask,
  879. struct netlink_ext_ack *extack)
  880. {
  881. struct flow_dissector_mpls_lse *lse_mask;
  882. struct flow_dissector_mpls_lse *lse_val;
  883. if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
  884. if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
  885. tb[TCA_FLOWER_KEY_MPLS_BOS] ||
  886. tb[TCA_FLOWER_KEY_MPLS_TC] ||
  887. tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
  888. NL_SET_ERR_MSG_ATTR(extack,
  889. tb[TCA_FLOWER_KEY_MPLS_OPTS],
  890. "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
  891. return -EBADMSG;
  892. }
  893. return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
  894. key_val, key_mask, extack);
  895. }
  896. lse_val = &key_val->ls[0];
  897. lse_mask = &key_mask->ls[0];
  898. if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
  899. lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
  900. lse_mask->mpls_ttl = MPLS_TTL_MASK;
  901. dissector_set_mpls_lse(key_val, 0);
  902. dissector_set_mpls_lse(key_mask, 0);
  903. }
  904. if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
  905. u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
  906. if (bos & ~MPLS_BOS_MASK) {
  907. NL_SET_ERR_MSG_ATTR(extack,
  908. tb[TCA_FLOWER_KEY_MPLS_BOS],
  909. "Bottom Of Stack (BOS) must be 0 or 1");
  910. return -EINVAL;
  911. }
  912. lse_val->mpls_bos = bos;
  913. lse_mask->mpls_bos = MPLS_BOS_MASK;
  914. dissector_set_mpls_lse(key_val, 0);
  915. dissector_set_mpls_lse(key_mask, 0);
  916. }
  917. if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
  918. u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
  919. if (tc & ~MPLS_TC_MASK) {
  920. NL_SET_ERR_MSG_ATTR(extack,
  921. tb[TCA_FLOWER_KEY_MPLS_TC],
  922. "Traffic Class (TC) must be between 0 and 7");
  923. return -EINVAL;
  924. }
  925. lse_val->mpls_tc = tc;
  926. lse_mask->mpls_tc = MPLS_TC_MASK;
  927. dissector_set_mpls_lse(key_val, 0);
  928. dissector_set_mpls_lse(key_mask, 0);
  929. }
  930. if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
  931. u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
  932. if (label & ~MPLS_LABEL_MASK) {
  933. NL_SET_ERR_MSG_ATTR(extack,
  934. tb[TCA_FLOWER_KEY_MPLS_LABEL],
  935. "Label must be between 0 and 1048575");
  936. return -EINVAL;
  937. }
  938. lse_val->mpls_label = label;
  939. lse_mask->mpls_label = MPLS_LABEL_MASK;
  940. dissector_set_mpls_lse(key_val, 0);
  941. dissector_set_mpls_lse(key_mask, 0);
  942. }
  943. return 0;
  944. }
  945. static void fl_set_key_vlan(struct nlattr **tb,
  946. __be16 ethertype,
  947. int vlan_id_key, int vlan_prio_key,
  948. int vlan_next_eth_type_key,
  949. struct flow_dissector_key_vlan *key_val,
  950. struct flow_dissector_key_vlan *key_mask)
  951. {
  952. #define VLAN_PRIORITY_MASK 0x7
  953. if (tb[vlan_id_key]) {
  954. key_val->vlan_id =
  955. nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
  956. key_mask->vlan_id = VLAN_VID_MASK;
  957. }
  958. if (tb[vlan_prio_key]) {
  959. key_val->vlan_priority =
  960. nla_get_u8(tb[vlan_prio_key]) &
  961. VLAN_PRIORITY_MASK;
  962. key_mask->vlan_priority = VLAN_PRIORITY_MASK;
  963. }
  964. if (ethertype) {
  965. key_val->vlan_tpid = ethertype;
  966. key_mask->vlan_tpid = cpu_to_be16(~0);
  967. }
  968. if (tb[vlan_next_eth_type_key]) {
  969. key_val->vlan_eth_type =
  970. nla_get_be16(tb[vlan_next_eth_type_key]);
  971. key_mask->vlan_eth_type = cpu_to_be16(~0);
  972. }
  973. }
  974. static void fl_set_key_pppoe(struct nlattr **tb,
  975. struct flow_dissector_key_pppoe *key_val,
  976. struct flow_dissector_key_pppoe *key_mask,
  977. struct fl_flow_key *key,
  978. struct fl_flow_key *mask)
  979. {
  980. /* key_val::type must be set to ETH_P_PPP_SES
  981. * because ETH_P_PPP_SES was stored in basic.n_proto
  982. * which might get overwritten by ppp_proto
  983. * or might be set to 0, the role of key_val::type
  984. * is similar to vlan_key::tpid
  985. */
  986. key_val->type = htons(ETH_P_PPP_SES);
  987. key_mask->type = cpu_to_be16(~0);
  988. if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
  989. key_val->session_id =
  990. nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
  991. key_mask->session_id = cpu_to_be16(~0);
  992. }
  993. if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
  994. key_val->ppp_proto =
  995. nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
  996. key_mask->ppp_proto = cpu_to_be16(~0);
  997. if (key_val->ppp_proto == htons(PPP_IP)) {
  998. key->basic.n_proto = htons(ETH_P_IP);
  999. mask->basic.n_proto = cpu_to_be16(~0);
  1000. } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
  1001. key->basic.n_proto = htons(ETH_P_IPV6);
  1002. mask->basic.n_proto = cpu_to_be16(~0);
  1003. } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
  1004. key->basic.n_proto = htons(ETH_P_MPLS_UC);
  1005. mask->basic.n_proto = cpu_to_be16(~0);
  1006. } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
  1007. key->basic.n_proto = htons(ETH_P_MPLS_MC);
  1008. mask->basic.n_proto = cpu_to_be16(~0);
  1009. }
  1010. } else {
  1011. key->basic.n_proto = 0;
  1012. mask->basic.n_proto = cpu_to_be16(0);
  1013. }
  1014. }
  1015. static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
  1016. u32 *dissector_key, u32 *dissector_mask,
  1017. u32 flower_flag_bit, u32 dissector_flag_bit)
  1018. {
  1019. if (flower_mask & flower_flag_bit) {
  1020. *dissector_mask |= dissector_flag_bit;
  1021. if (flower_key & flower_flag_bit)
  1022. *dissector_key |= dissector_flag_bit;
  1023. }
  1024. }
  1025. static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb,
  1026. bool encap, u32 *flags_key, u32 *flags_mask,
  1027. struct netlink_ext_ack *extack)
  1028. {
  1029. int fl_key, fl_mask;
  1030. u32 key, mask;
  1031. if (encap) {
  1032. fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
  1033. fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
  1034. } else {
  1035. fl_key = TCA_FLOWER_KEY_FLAGS;
  1036. fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
  1037. }
  1038. /* mask is mandatory for flags */
  1039. if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) {
  1040. NL_SET_ERR_MSG(extack, "Missing flags mask");
  1041. return -EINVAL;
  1042. }
  1043. key = be32_to_cpu(nla_get_be32(tb[fl_key]));
  1044. mask = be32_to_cpu(nla_get_be32(tb[fl_mask]));
  1045. *flags_key = 0;
  1046. *flags_mask = 0;
  1047. fl_set_key_flag(key, mask, flags_key, flags_mask,
  1048. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  1049. fl_set_key_flag(key, mask, flags_key, flags_mask,
  1050. TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
  1051. FLOW_DIS_FIRST_FRAG);
  1052. fl_set_key_flag(key, mask, flags_key, flags_mask,
  1053. TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
  1054. FLOW_DIS_F_TUNNEL_CSUM);
  1055. fl_set_key_flag(key, mask, flags_key, flags_mask,
  1056. TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
  1057. FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
  1058. fl_set_key_flag(key, mask, flags_key, flags_mask,
  1059. TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
  1060. fl_set_key_flag(key, mask, flags_key, flags_mask,
  1061. TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
  1062. FLOW_DIS_F_TUNNEL_CRIT_OPT);
  1063. return 0;
  1064. }
  1065. static void fl_set_key_ip(struct nlattr **tb, bool encap,
  1066. struct flow_dissector_key_ip *key,
  1067. struct flow_dissector_key_ip *mask)
  1068. {
  1069. int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
  1070. int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
  1071. int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
  1072. int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
  1073. fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
  1074. fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
  1075. }
  1076. static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
  1077. int depth, int option_len,
  1078. struct netlink_ext_ack *extack)
  1079. {
  1080. struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
  1081. struct nlattr *class = NULL, *type = NULL, *data = NULL;
  1082. struct geneve_opt *opt;
  1083. int err, data_len = 0;
  1084. if (option_len > sizeof(struct geneve_opt))
  1085. data_len = option_len - sizeof(struct geneve_opt);
  1086. if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
  1087. return -ERANGE;
  1088. opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
  1089. memset(opt, 0xff, option_len);
  1090. opt->length = data_len / 4;
  1091. opt->r1 = 0;
  1092. opt->r2 = 0;
  1093. opt->r3 = 0;
  1094. /* If no mask has been prodived we assume an exact match. */
  1095. if (!depth)
  1096. return sizeof(struct geneve_opt) + data_len;
  1097. if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
  1098. NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
  1099. return -EINVAL;
  1100. }
  1101. err = nla_parse_nested_deprecated(tb,
  1102. TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
  1103. nla, geneve_opt_policy, extack);
  1104. if (err < 0)
  1105. return err;
  1106. /* We are not allowed to omit any of CLASS, TYPE or DATA
  1107. * fields from the key.
  1108. */
  1109. if (!option_len &&
  1110. (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
  1111. !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
  1112. !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
  1113. NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
  1114. return -EINVAL;
  1115. }
  1116. /* Omitting any of CLASS, TYPE or DATA fields is allowed
  1117. * for the mask.
  1118. */
  1119. if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
  1120. int new_len = key->enc_opts.len;
  1121. data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
  1122. data_len = nla_len(data);
  1123. if (data_len < 4) {
  1124. NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
  1125. return -ERANGE;
  1126. }
  1127. if (data_len % 4) {
  1128. NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
  1129. return -ERANGE;
  1130. }
  1131. new_len += sizeof(struct geneve_opt) + data_len;
  1132. BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
  1133. if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
  1134. NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
  1135. return -ERANGE;
  1136. }
  1137. opt->length = data_len / 4;
  1138. memcpy(opt->opt_data, nla_data(data), data_len);
  1139. }
  1140. if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
  1141. class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
  1142. opt->opt_class = nla_get_be16(class);
  1143. }
  1144. if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
  1145. type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
  1146. opt->type = nla_get_u8(type);
  1147. }
  1148. return sizeof(struct geneve_opt) + data_len;
  1149. }
  1150. static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
  1151. int depth, int option_len,
  1152. struct netlink_ext_ack *extack)
  1153. {
  1154. struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
  1155. struct vxlan_metadata *md;
  1156. int err;
  1157. md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
  1158. memset(md, 0xff, sizeof(*md));
  1159. if (!depth)
  1160. return sizeof(*md);
  1161. if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
  1162. NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
  1163. return -EINVAL;
  1164. }
  1165. err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
  1166. vxlan_opt_policy, extack);
  1167. if (err < 0)
  1168. return err;
  1169. if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
  1170. NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
  1171. return -EINVAL;
  1172. }
  1173. if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
  1174. md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
  1175. md->gbp &= VXLAN_GBP_MASK;
  1176. }
  1177. return sizeof(*md);
  1178. }
  1179. static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
  1180. int depth, int option_len,
  1181. struct netlink_ext_ack *extack)
  1182. {
  1183. struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
  1184. struct erspan_metadata *md;
  1185. int err;
  1186. md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
  1187. md->version = 1;
  1188. if (!depth)
  1189. return sizeof(*md);
  1190. if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
  1191. NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
  1192. return -EINVAL;
  1193. }
  1194. err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
  1195. erspan_opt_policy, extack);
  1196. if (err < 0)
  1197. return err;
  1198. if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
  1199. NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
  1200. return -EINVAL;
  1201. }
  1202. if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
  1203. md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
  1204. if (md->version == 1) {
  1205. if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
  1206. NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
  1207. return -EINVAL;
  1208. }
  1209. memset(&md->u.index, 0xff, sizeof(md->u.index));
  1210. if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
  1211. nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
  1212. md->u.index = nla_get_be32(nla);
  1213. }
  1214. } else if (md->version == 2) {
  1215. if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
  1216. !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
  1217. NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
  1218. return -EINVAL;
  1219. }
  1220. md->u.md2.dir = 1;
  1221. if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
  1222. nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
  1223. md->u.md2.dir = nla_get_u8(nla);
  1224. }
  1225. set_hwid(&md->u.md2, 0xff);
  1226. if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
  1227. nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
  1228. set_hwid(&md->u.md2, nla_get_u8(nla));
  1229. }
  1230. } else {
  1231. NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
  1232. return -EINVAL;
  1233. }
  1234. return sizeof(*md);
  1235. }
  1236. static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
  1237. int depth, int option_len,
  1238. struct netlink_ext_ack *extack)
  1239. {
  1240. struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
  1241. struct gtp_pdu_session_info *sinfo;
  1242. u8 len = key->enc_opts.len;
  1243. int err;
  1244. sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
  1245. memset(sinfo, 0xff, option_len);
  1246. if (!depth)
  1247. return sizeof(*sinfo);
  1248. if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
  1249. NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
  1250. return -EINVAL;
  1251. }
  1252. err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
  1253. gtp_opt_policy, extack);
  1254. if (err < 0)
  1255. return err;
  1256. if (!option_len &&
  1257. (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
  1258. !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
  1259. NL_SET_ERR_MSG_MOD(extack,
  1260. "Missing tunnel key gtp option pdu type or qfi");
  1261. return -EINVAL;
  1262. }
  1263. if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
  1264. sinfo->pdu_type =
  1265. nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
  1266. if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
  1267. sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
  1268. return sizeof(*sinfo);
  1269. }
  1270. static int fl_set_pfcp_opt(const struct nlattr *nla, struct fl_flow_key *key,
  1271. int depth, int option_len,
  1272. struct netlink_ext_ack *extack)
  1273. {
  1274. struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX + 1];
  1275. struct pfcp_metadata *md;
  1276. int err;
  1277. md = (struct pfcp_metadata *)&key->enc_opts.data[key->enc_opts.len];
  1278. memset(md, 0xff, sizeof(*md));
  1279. if (!depth)
  1280. return sizeof(*md);
  1281. if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_PFCP) {
  1282. NL_SET_ERR_MSG_MOD(extack, "Non-pfcp option type for mask");
  1283. return -EINVAL;
  1284. }
  1285. err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_PFCP_MAX, nla,
  1286. pfcp_opt_policy, extack);
  1287. if (err < 0)
  1288. return err;
  1289. if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]) {
  1290. NL_SET_ERR_MSG_MOD(extack, "Missing tunnel key pfcp option type");
  1291. return -EINVAL;
  1292. }
  1293. if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE])
  1294. md->type = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE]);
  1295. if (tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID])
  1296. md->seid = nla_get_be64(tb[TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID]);
  1297. return sizeof(*md);
  1298. }
  1299. static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
  1300. struct fl_flow_key *mask,
  1301. struct netlink_ext_ack *extack)
  1302. {
  1303. const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
  1304. int err, option_len, key_depth, msk_depth = 0;
  1305. err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
  1306. TCA_FLOWER_KEY_ENC_OPTS_MAX,
  1307. enc_opts_policy, extack);
  1308. if (err)
  1309. return err;
  1310. nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
  1311. if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
  1312. err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
  1313. TCA_FLOWER_KEY_ENC_OPTS_MAX,
  1314. enc_opts_policy, extack);
  1315. if (err)
  1316. return err;
  1317. nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
  1318. msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
  1319. if (!nla_ok(nla_opt_msk, msk_depth)) {
  1320. NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
  1321. return -EINVAL;
  1322. }
  1323. }
  1324. nla_for_each_attr(nla_opt_key, nla_enc_key,
  1325. nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
  1326. switch (nla_type(nla_opt_key)) {
  1327. case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
  1328. if (key->enc_opts.dst_opt_type &&
  1329. key->enc_opts.dst_opt_type !=
  1330. IP_TUNNEL_GENEVE_OPT_BIT) {
  1331. NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
  1332. return -EINVAL;
  1333. }
  1334. option_len = 0;
  1335. key->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
  1336. option_len = fl_set_geneve_opt(nla_opt_key, key,
  1337. key_depth, option_len,
  1338. extack);
  1339. if (option_len < 0)
  1340. return option_len;
  1341. key->enc_opts.len += option_len;
  1342. /* At the same time we need to parse through the mask
  1343. * in order to verify exact and mask attribute lengths.
  1344. */
  1345. mask->enc_opts.dst_opt_type = IP_TUNNEL_GENEVE_OPT_BIT;
  1346. option_len = fl_set_geneve_opt(nla_opt_msk, mask,
  1347. msk_depth, option_len,
  1348. extack);
  1349. if (option_len < 0)
  1350. return option_len;
  1351. mask->enc_opts.len += option_len;
  1352. if (key->enc_opts.len != mask->enc_opts.len) {
  1353. NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
  1354. return -EINVAL;
  1355. }
  1356. break;
  1357. case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
  1358. if (key->enc_opts.dst_opt_type) {
  1359. NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
  1360. return -EINVAL;
  1361. }
  1362. option_len = 0;
  1363. key->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
  1364. option_len = fl_set_vxlan_opt(nla_opt_key, key,
  1365. key_depth, option_len,
  1366. extack);
  1367. if (option_len < 0)
  1368. return option_len;
  1369. key->enc_opts.len += option_len;
  1370. /* At the same time we need to parse through the mask
  1371. * in order to verify exact and mask attribute lengths.
  1372. */
  1373. mask->enc_opts.dst_opt_type = IP_TUNNEL_VXLAN_OPT_BIT;
  1374. option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
  1375. msk_depth, option_len,
  1376. extack);
  1377. if (option_len < 0)
  1378. return option_len;
  1379. mask->enc_opts.len += option_len;
  1380. if (key->enc_opts.len != mask->enc_opts.len) {
  1381. NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
  1382. return -EINVAL;
  1383. }
  1384. break;
  1385. case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
  1386. if (key->enc_opts.dst_opt_type) {
  1387. NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
  1388. return -EINVAL;
  1389. }
  1390. option_len = 0;
  1391. key->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
  1392. option_len = fl_set_erspan_opt(nla_opt_key, key,
  1393. key_depth, option_len,
  1394. extack);
  1395. if (option_len < 0)
  1396. return option_len;
  1397. key->enc_opts.len += option_len;
  1398. /* At the same time we need to parse through the mask
  1399. * in order to verify exact and mask attribute lengths.
  1400. */
  1401. mask->enc_opts.dst_opt_type = IP_TUNNEL_ERSPAN_OPT_BIT;
  1402. option_len = fl_set_erspan_opt(nla_opt_msk, mask,
  1403. msk_depth, option_len,
  1404. extack);
  1405. if (option_len < 0)
  1406. return option_len;
  1407. mask->enc_opts.len += option_len;
  1408. if (key->enc_opts.len != mask->enc_opts.len) {
  1409. NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
  1410. return -EINVAL;
  1411. }
  1412. break;
  1413. case TCA_FLOWER_KEY_ENC_OPTS_GTP:
  1414. if (key->enc_opts.dst_opt_type) {
  1415. NL_SET_ERR_MSG_MOD(extack,
  1416. "Duplicate type for gtp options");
  1417. return -EINVAL;
  1418. }
  1419. option_len = 0;
  1420. key->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
  1421. option_len = fl_set_gtp_opt(nla_opt_key, key,
  1422. key_depth, option_len,
  1423. extack);
  1424. if (option_len < 0)
  1425. return option_len;
  1426. key->enc_opts.len += option_len;
  1427. /* At the same time we need to parse through the mask
  1428. * in order to verify exact and mask attribute lengths.
  1429. */
  1430. mask->enc_opts.dst_opt_type = IP_TUNNEL_GTP_OPT_BIT;
  1431. option_len = fl_set_gtp_opt(nla_opt_msk, mask,
  1432. msk_depth, option_len,
  1433. extack);
  1434. if (option_len < 0)
  1435. return option_len;
  1436. mask->enc_opts.len += option_len;
  1437. if (key->enc_opts.len != mask->enc_opts.len) {
  1438. NL_SET_ERR_MSG_MOD(extack,
  1439. "Key and mask miss aligned");
  1440. return -EINVAL;
  1441. }
  1442. break;
  1443. case TCA_FLOWER_KEY_ENC_OPTS_PFCP:
  1444. if (key->enc_opts.dst_opt_type) {
  1445. NL_SET_ERR_MSG_MOD(extack, "Duplicate type for pfcp options");
  1446. return -EINVAL;
  1447. }
  1448. option_len = 0;
  1449. key->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
  1450. option_len = fl_set_pfcp_opt(nla_opt_key, key,
  1451. key_depth, option_len,
  1452. extack);
  1453. if (option_len < 0)
  1454. return option_len;
  1455. key->enc_opts.len += option_len;
  1456. /* At the same time we need to parse through the mask
  1457. * in order to verify exact and mask attribute lengths.
  1458. */
  1459. mask->enc_opts.dst_opt_type = IP_TUNNEL_PFCP_OPT_BIT;
  1460. option_len = fl_set_pfcp_opt(nla_opt_msk, mask,
  1461. msk_depth, option_len,
  1462. extack);
  1463. if (option_len < 0)
  1464. return option_len;
  1465. mask->enc_opts.len += option_len;
  1466. if (key->enc_opts.len != mask->enc_opts.len) {
  1467. NL_SET_ERR_MSG_MOD(extack, "Key and mask miss aligned");
  1468. return -EINVAL;
  1469. }
  1470. break;
  1471. default:
  1472. NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
  1473. return -EINVAL;
  1474. }
  1475. if (!msk_depth)
  1476. continue;
  1477. if (!nla_ok(nla_opt_msk, msk_depth)) {
  1478. NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
  1479. return -EINVAL;
  1480. }
  1481. nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
  1482. }
  1483. return 0;
  1484. }
  1485. static int fl_validate_ct_state(u16 state, struct nlattr *tb,
  1486. struct netlink_ext_ack *extack)
  1487. {
  1488. if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
  1489. NL_SET_ERR_MSG_ATTR(extack, tb,
  1490. "no trk, so no other flag can be set");
  1491. return -EINVAL;
  1492. }
  1493. if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
  1494. state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
  1495. NL_SET_ERR_MSG_ATTR(extack, tb,
  1496. "new and est are mutually exclusive");
  1497. return -EINVAL;
  1498. }
  1499. if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
  1500. state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
  1501. TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
  1502. NL_SET_ERR_MSG_ATTR(extack, tb,
  1503. "when inv is set, only trk may be set");
  1504. return -EINVAL;
  1505. }
  1506. if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
  1507. state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
  1508. NL_SET_ERR_MSG_ATTR(extack, tb,
  1509. "new and rpl are mutually exclusive");
  1510. return -EINVAL;
  1511. }
  1512. return 0;
  1513. }
  1514. static int fl_set_key_ct(struct nlattr **tb,
  1515. struct flow_dissector_key_ct *key,
  1516. struct flow_dissector_key_ct *mask,
  1517. struct netlink_ext_ack *extack)
  1518. {
  1519. if (tb[TCA_FLOWER_KEY_CT_STATE]) {
  1520. int err;
  1521. if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
  1522. NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
  1523. return -EOPNOTSUPP;
  1524. }
  1525. fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
  1526. &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
  1527. sizeof(key->ct_state));
  1528. err = fl_validate_ct_state(key->ct_state & mask->ct_state,
  1529. tb[TCA_FLOWER_KEY_CT_STATE_MASK],
  1530. extack);
  1531. if (err)
  1532. return err;
  1533. }
  1534. if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
  1535. if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
  1536. NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
  1537. return -EOPNOTSUPP;
  1538. }
  1539. fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
  1540. &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
  1541. sizeof(key->ct_zone));
  1542. }
  1543. if (tb[TCA_FLOWER_KEY_CT_MARK]) {
  1544. if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
  1545. NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
  1546. return -EOPNOTSUPP;
  1547. }
  1548. fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
  1549. &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
  1550. sizeof(key->ct_mark));
  1551. }
  1552. if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
  1553. if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
  1554. NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
  1555. return -EOPNOTSUPP;
  1556. }
  1557. fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
  1558. mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
  1559. sizeof(key->ct_labels));
  1560. }
  1561. return 0;
  1562. }
  1563. static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
  1564. struct fl_flow_key *key, struct fl_flow_key *mask,
  1565. int vthresh)
  1566. {
  1567. const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
  1568. if (!tb) {
  1569. *ethertype = 0;
  1570. return good_num_of_vlans;
  1571. }
  1572. *ethertype = nla_get_be16(tb);
  1573. if (good_num_of_vlans || eth_type_vlan(*ethertype))
  1574. return true;
  1575. key->basic.n_proto = *ethertype;
  1576. mask->basic.n_proto = cpu_to_be16(~0);
  1577. return false;
  1578. }
  1579. static void fl_set_key_cfm_md_level(struct nlattr **tb,
  1580. struct fl_flow_key *key,
  1581. struct fl_flow_key *mask,
  1582. struct netlink_ext_ack *extack)
  1583. {
  1584. u8 level;
  1585. if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL])
  1586. return;
  1587. level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]);
  1588. key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level);
  1589. mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK;
  1590. }
  1591. static void fl_set_key_cfm_opcode(struct nlattr **tb,
  1592. struct fl_flow_key *key,
  1593. struct fl_flow_key *mask,
  1594. struct netlink_ext_ack *extack)
  1595. {
  1596. fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE,
  1597. &mask->cfm.opcode, TCA_FLOWER_UNSPEC,
  1598. sizeof(key->cfm.opcode));
  1599. }
  1600. static int fl_set_key_cfm(struct nlattr **tb,
  1601. struct fl_flow_key *key,
  1602. struct fl_flow_key *mask,
  1603. struct netlink_ext_ack *extack)
  1604. {
  1605. struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1];
  1606. int err;
  1607. if (!tb[TCA_FLOWER_KEY_CFM])
  1608. return 0;
  1609. err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX,
  1610. tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack);
  1611. if (err < 0)
  1612. return err;
  1613. fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack);
  1614. fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack);
  1615. return 0;
  1616. }
  1617. static int fl_set_key(struct net *net, struct nlattr *tca_opts,
  1618. struct nlattr **tb, struct fl_flow_key *key,
  1619. struct fl_flow_key *mask, struct netlink_ext_ack *extack)
  1620. {
  1621. __be16 ethertype;
  1622. int ret = 0;
  1623. if (tb[TCA_FLOWER_INDEV]) {
  1624. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
  1625. if (err < 0)
  1626. return err;
  1627. key->meta.ingress_ifindex = err;
  1628. mask->meta.ingress_ifindex = 0xffffffff;
  1629. }
  1630. fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS,
  1631. &mask->meta.l2_miss, TCA_FLOWER_UNSPEC,
  1632. sizeof(key->meta.l2_miss));
  1633. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  1634. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  1635. sizeof(key->eth.dst));
  1636. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  1637. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  1638. sizeof(key->eth.src));
  1639. fl_set_key_val(tb, &key->num_of_vlans,
  1640. TCA_FLOWER_KEY_NUM_OF_VLANS,
  1641. &mask->num_of_vlans,
  1642. TCA_FLOWER_UNSPEC,
  1643. sizeof(key->num_of_vlans));
  1644. if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
  1645. fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
  1646. TCA_FLOWER_KEY_VLAN_PRIO,
  1647. TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  1648. &key->vlan, &mask->vlan);
  1649. if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
  1650. &ethertype, key, mask, 1)) {
  1651. fl_set_key_vlan(tb, ethertype,
  1652. TCA_FLOWER_KEY_CVLAN_ID,
  1653. TCA_FLOWER_KEY_CVLAN_PRIO,
  1654. TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
  1655. &key->cvlan, &mask->cvlan);
  1656. fl_set_key_val(tb, &key->basic.n_proto,
  1657. TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
  1658. &mask->basic.n_proto,
  1659. TCA_FLOWER_UNSPEC,
  1660. sizeof(key->basic.n_proto));
  1661. }
  1662. }
  1663. if (key->basic.n_proto == htons(ETH_P_PPP_SES))
  1664. fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
  1665. if (key->basic.n_proto == htons(ETH_P_IP) ||
  1666. key->basic.n_proto == htons(ETH_P_IPV6)) {
  1667. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  1668. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  1669. sizeof(key->basic.ip_proto));
  1670. fl_set_key_ip(tb, false, &key->ip, &mask->ip);
  1671. }
  1672. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  1673. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  1674. mask->control.addr_type = ~0;
  1675. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  1676. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  1677. sizeof(key->ipv4.src));
  1678. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  1679. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  1680. sizeof(key->ipv4.dst));
  1681. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  1682. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  1683. mask->control.addr_type = ~0;
  1684. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  1685. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  1686. sizeof(key->ipv6.src));
  1687. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  1688. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  1689. sizeof(key->ipv6.dst));
  1690. }
  1691. if (key->basic.ip_proto == IPPROTO_TCP) {
  1692. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  1693. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  1694. sizeof(key->tp.src));
  1695. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  1696. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  1697. sizeof(key->tp.dst));
  1698. fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
  1699. &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
  1700. sizeof(key->tcp.flags));
  1701. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  1702. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  1703. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  1704. sizeof(key->tp.src));
  1705. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  1706. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  1707. sizeof(key->tp.dst));
  1708. } else if (key->basic.ip_proto == IPPROTO_SCTP) {
  1709. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  1710. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  1711. sizeof(key->tp.src));
  1712. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  1713. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  1714. sizeof(key->tp.dst));
  1715. } else if (key->basic.n_proto == htons(ETH_P_IP) &&
  1716. key->basic.ip_proto == IPPROTO_ICMP) {
  1717. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
  1718. &mask->icmp.type,
  1719. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  1720. sizeof(key->icmp.type));
  1721. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
  1722. &mask->icmp.code,
  1723. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  1724. sizeof(key->icmp.code));
  1725. } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  1726. key->basic.ip_proto == IPPROTO_ICMPV6) {
  1727. fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
  1728. &mask->icmp.type,
  1729. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  1730. sizeof(key->icmp.type));
  1731. fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
  1732. &mask->icmp.code,
  1733. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  1734. sizeof(key->icmp.code));
  1735. } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
  1736. key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
  1737. ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
  1738. if (ret)
  1739. return ret;
  1740. } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
  1741. key->basic.n_proto == htons(ETH_P_RARP)) {
  1742. fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
  1743. &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
  1744. sizeof(key->arp.sip));
  1745. fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
  1746. &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
  1747. sizeof(key->arp.tip));
  1748. fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
  1749. &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
  1750. sizeof(key->arp.op));
  1751. fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  1752. mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  1753. sizeof(key->arp.sha));
  1754. fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  1755. mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  1756. sizeof(key->arp.tha));
  1757. } else if (key->basic.ip_proto == IPPROTO_L2TP) {
  1758. fl_set_key_val(tb, &key->l2tpv3.session_id,
  1759. TCA_FLOWER_KEY_L2TPV3_SID,
  1760. &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
  1761. sizeof(key->l2tpv3.session_id));
  1762. } else if (key->basic.n_proto == htons(ETH_P_CFM)) {
  1763. ret = fl_set_key_cfm(tb, key, mask, extack);
  1764. if (ret)
  1765. return ret;
  1766. }
  1767. if (key->basic.ip_proto == IPPROTO_TCP ||
  1768. key->basic.ip_proto == IPPROTO_UDP ||
  1769. key->basic.ip_proto == IPPROTO_SCTP) {
  1770. ret = fl_set_key_port_range(tb, key, mask, extack);
  1771. if (ret)
  1772. return ret;
  1773. }
  1774. if (tb[TCA_FLOWER_KEY_SPI]) {
  1775. ret = fl_set_key_spi(tb, key, mask, extack);
  1776. if (ret)
  1777. return ret;
  1778. }
  1779. if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
  1780. tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
  1781. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  1782. mask->enc_control.addr_type = ~0;
  1783. fl_set_key_val(tb, &key->enc_ipv4.src,
  1784. TCA_FLOWER_KEY_ENC_IPV4_SRC,
  1785. &mask->enc_ipv4.src,
  1786. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  1787. sizeof(key->enc_ipv4.src));
  1788. fl_set_key_val(tb, &key->enc_ipv4.dst,
  1789. TCA_FLOWER_KEY_ENC_IPV4_DST,
  1790. &mask->enc_ipv4.dst,
  1791. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  1792. sizeof(key->enc_ipv4.dst));
  1793. }
  1794. if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
  1795. tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
  1796. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  1797. mask->enc_control.addr_type = ~0;
  1798. fl_set_key_val(tb, &key->enc_ipv6.src,
  1799. TCA_FLOWER_KEY_ENC_IPV6_SRC,
  1800. &mask->enc_ipv6.src,
  1801. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  1802. sizeof(key->enc_ipv6.src));
  1803. fl_set_key_val(tb, &key->enc_ipv6.dst,
  1804. TCA_FLOWER_KEY_ENC_IPV6_DST,
  1805. &mask->enc_ipv6.dst,
  1806. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  1807. sizeof(key->enc_ipv6.dst));
  1808. }
  1809. fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
  1810. &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
  1811. sizeof(key->enc_key_id.keyid));
  1812. fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  1813. &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  1814. sizeof(key->enc_tp.src));
  1815. fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  1816. &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  1817. sizeof(key->enc_tp.dst));
  1818. fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
  1819. fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
  1820. &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
  1821. sizeof(key->hash.hash));
  1822. if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
  1823. ret = fl_set_enc_opt(tb, key, mask, extack);
  1824. if (ret)
  1825. return ret;
  1826. }
  1827. ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
  1828. if (ret)
  1829. return ret;
  1830. if (tb[TCA_FLOWER_KEY_FLAGS]) {
  1831. ret = fl_set_key_flags(tca_opts, tb, false,
  1832. &key->control.flags,
  1833. &mask->control.flags, extack);
  1834. if (ret)
  1835. return ret;
  1836. }
  1837. if (tb[TCA_FLOWER_KEY_ENC_FLAGS])
  1838. ret = fl_set_key_flags(tca_opts, tb, true,
  1839. &key->enc_control.flags,
  1840. &mask->enc_control.flags, extack);
  1841. return ret;
  1842. }
  1843. static void fl_mask_copy(struct fl_flow_mask *dst,
  1844. struct fl_flow_mask *src)
  1845. {
  1846. const void *psrc = fl_key_get_start(&src->key, src);
  1847. void *pdst = fl_key_get_start(&dst->key, src);
  1848. memcpy(pdst, psrc, fl_mask_range(src));
  1849. dst->range = src->range;
  1850. }
  1851. static const struct rhashtable_params fl_ht_params = {
  1852. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  1853. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  1854. .automatic_shrinking = true,
  1855. };
  1856. static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
  1857. {
  1858. mask->filter_ht_params = fl_ht_params;
  1859. mask->filter_ht_params.key_len = fl_mask_range(mask);
  1860. mask->filter_ht_params.key_offset += mask->range.start;
  1861. return rhashtable_init(&mask->ht, &mask->filter_ht_params);
  1862. }
  1863. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  1864. #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
  1865. #define FL_KEY_IS_MASKED(mask, member) \
  1866. memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
  1867. 0, FL_KEY_MEMBER_SIZE(member)) \
  1868. #define FL_KEY_SET(keys, cnt, id, member) \
  1869. do { \
  1870. keys[cnt].key_id = id; \
  1871. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  1872. cnt++; \
  1873. } while(0);
  1874. #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
  1875. do { \
  1876. if (FL_KEY_IS_MASKED(mask, member)) \
  1877. FL_KEY_SET(keys, cnt, id, member); \
  1878. } while(0);
  1879. static void fl_init_dissector(struct flow_dissector *dissector,
  1880. struct fl_flow_key *mask)
  1881. {
  1882. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  1883. size_t cnt = 0;
  1884. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1885. FLOW_DISSECTOR_KEY_META, meta);
  1886. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  1887. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  1888. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1889. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  1890. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1891. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  1892. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1893. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  1894. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1895. FLOW_DISSECTOR_KEY_PORTS, tp);
  1896. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1897. FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
  1898. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1899. FLOW_DISSECTOR_KEY_IP, ip);
  1900. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1901. FLOW_DISSECTOR_KEY_TCP, tcp);
  1902. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1903. FLOW_DISSECTOR_KEY_ICMP, icmp);
  1904. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1905. FLOW_DISSECTOR_KEY_ARP, arp);
  1906. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1907. FLOW_DISSECTOR_KEY_MPLS, mpls);
  1908. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1909. FLOW_DISSECTOR_KEY_VLAN, vlan);
  1910. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1911. FLOW_DISSECTOR_KEY_CVLAN, cvlan);
  1912. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1913. FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
  1914. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1915. FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
  1916. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1917. FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
  1918. if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
  1919. FL_KEY_IS_MASKED(mask, enc_ipv6) ||
  1920. FL_KEY_IS_MASKED(mask, enc_control))
  1921. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
  1922. enc_control);
  1923. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1924. FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
  1925. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1926. FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
  1927. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1928. FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
  1929. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1930. FLOW_DISSECTOR_KEY_CT, ct);
  1931. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1932. FLOW_DISSECTOR_KEY_HASH, hash);
  1933. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1934. FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
  1935. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1936. FLOW_DISSECTOR_KEY_PPPOE, pppoe);
  1937. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1938. FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
  1939. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1940. FLOW_DISSECTOR_KEY_IPSEC, ipsec);
  1941. FL_KEY_SET_IF_MASKED(mask, keys, cnt,
  1942. FLOW_DISSECTOR_KEY_CFM, cfm);
  1943. skb_flow_dissector_init(dissector, keys, cnt);
  1944. }
  1945. static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
  1946. struct fl_flow_mask *mask)
  1947. {
  1948. struct fl_flow_mask *newmask;
  1949. int err;
  1950. newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
  1951. if (!newmask)
  1952. return ERR_PTR(-ENOMEM);
  1953. fl_mask_copy(newmask, mask);
  1954. if ((newmask->key.tp_range.tp_min.dst &&
  1955. newmask->key.tp_range.tp_max.dst) ||
  1956. (newmask->key.tp_range.tp_min.src &&
  1957. newmask->key.tp_range.tp_max.src))
  1958. newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
  1959. err = fl_init_mask_hashtable(newmask);
  1960. if (err)
  1961. goto errout_free;
  1962. fl_init_dissector(&newmask->dissector, &newmask->key);
  1963. INIT_LIST_HEAD_RCU(&newmask->filters);
  1964. refcount_set(&newmask->refcnt, 1);
  1965. err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
  1966. &newmask->ht_node, mask_ht_params);
  1967. if (err)
  1968. goto errout_destroy;
  1969. spin_lock(&head->masks_lock);
  1970. list_add_tail_rcu(&newmask->list, &head->masks);
  1971. spin_unlock(&head->masks_lock);
  1972. return newmask;
  1973. errout_destroy:
  1974. rhashtable_destroy(&newmask->ht);
  1975. errout_free:
  1976. kfree(newmask);
  1977. return ERR_PTR(err);
  1978. }
  1979. static int fl_check_assign_mask(struct cls_fl_head *head,
  1980. struct cls_fl_filter *fnew,
  1981. struct cls_fl_filter *fold,
  1982. struct fl_flow_mask *mask)
  1983. {
  1984. struct fl_flow_mask *newmask;
  1985. int ret = 0;
  1986. rcu_read_lock();
  1987. /* Insert mask as temporary node to prevent concurrent creation of mask
  1988. * with same key. Any concurrent lookups with same key will return
  1989. * -EAGAIN because mask's refcnt is zero.
  1990. */
  1991. fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
  1992. &mask->ht_node,
  1993. mask_ht_params);
  1994. if (!fnew->mask) {
  1995. rcu_read_unlock();
  1996. if (fold) {
  1997. ret = -EINVAL;
  1998. goto errout_cleanup;
  1999. }
  2000. newmask = fl_create_new_mask(head, mask);
  2001. if (IS_ERR(newmask)) {
  2002. ret = PTR_ERR(newmask);
  2003. goto errout_cleanup;
  2004. }
  2005. fnew->mask = newmask;
  2006. return 0;
  2007. } else if (IS_ERR(fnew->mask)) {
  2008. ret = PTR_ERR(fnew->mask);
  2009. } else if (fold && fold->mask != fnew->mask) {
  2010. ret = -EINVAL;
  2011. } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
  2012. /* Mask was deleted concurrently, try again */
  2013. ret = -EAGAIN;
  2014. }
  2015. rcu_read_unlock();
  2016. return ret;
  2017. errout_cleanup:
  2018. rhashtable_remove_fast(&head->ht, &mask->ht_node,
  2019. mask_ht_params);
  2020. return ret;
  2021. }
  2022. static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
  2023. {
  2024. return mask->meta.l2_miss;
  2025. }
  2026. static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
  2027. struct cls_fl_filter *fold,
  2028. bool *in_ht)
  2029. {
  2030. struct fl_flow_mask *mask = fnew->mask;
  2031. int err;
  2032. err = rhashtable_lookup_insert_fast(&mask->ht,
  2033. &fnew->ht_node,
  2034. mask->filter_ht_params);
  2035. if (err) {
  2036. *in_ht = false;
  2037. /* It is okay if filter with same key exists when
  2038. * overwriting.
  2039. */
  2040. return fold && err == -EEXIST ? 0 : err;
  2041. }
  2042. *in_ht = true;
  2043. return 0;
  2044. }
  2045. static int fl_change(struct net *net, struct sk_buff *in_skb,
  2046. struct tcf_proto *tp, unsigned long base,
  2047. u32 handle, struct nlattr **tca,
  2048. void **arg, u32 flags,
  2049. struct netlink_ext_ack *extack)
  2050. {
  2051. struct cls_fl_head *head = fl_head_dereference(tp);
  2052. bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
  2053. struct nlattr *tca_opts = tca[TCA_OPTIONS];
  2054. struct cls_fl_filter *fold = *arg;
  2055. bool bound_to_filter = false;
  2056. struct cls_fl_filter *fnew;
  2057. struct fl_flow_mask *mask;
  2058. struct nlattr **tb;
  2059. bool in_ht;
  2060. int err;
  2061. if (!tca_opts) {
  2062. err = -EINVAL;
  2063. goto errout_fold;
  2064. }
  2065. mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
  2066. if (!mask) {
  2067. err = -ENOBUFS;
  2068. goto errout_fold;
  2069. }
  2070. tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
  2071. if (!tb) {
  2072. err = -ENOBUFS;
  2073. goto errout_mask_alloc;
  2074. }
  2075. err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
  2076. tca_opts, fl_policy, NULL);
  2077. if (err < 0)
  2078. goto errout_tb;
  2079. if (fold && handle && fold->handle != handle) {
  2080. err = -EINVAL;
  2081. goto errout_tb;
  2082. }
  2083. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  2084. if (!fnew) {
  2085. err = -ENOBUFS;
  2086. goto errout_tb;
  2087. }
  2088. INIT_LIST_HEAD(&fnew->hw_list);
  2089. refcount_set(&fnew->refcnt, 1);
  2090. if (tb[TCA_FLOWER_FLAGS]) {
  2091. fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
  2092. if (!tc_flags_valid(fnew->flags)) {
  2093. kfree(fnew);
  2094. err = -EINVAL;
  2095. goto errout_tb;
  2096. }
  2097. }
  2098. if (!fold) {
  2099. spin_lock(&tp->lock);
  2100. if (!handle) {
  2101. handle = 1;
  2102. err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
  2103. INT_MAX, GFP_ATOMIC);
  2104. } else {
  2105. err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
  2106. handle, GFP_ATOMIC);
  2107. /* Filter with specified handle was concurrently
  2108. * inserted after initial check in cls_api. This is not
  2109. * necessarily an error if NLM_F_EXCL is not set in
  2110. * message flags. Returning EAGAIN will cause cls_api to
  2111. * try to update concurrently inserted rule.
  2112. */
  2113. if (err == -ENOSPC)
  2114. err = -EAGAIN;
  2115. }
  2116. spin_unlock(&tp->lock);
  2117. if (err) {
  2118. kfree(fnew);
  2119. goto errout_tb;
  2120. }
  2121. }
  2122. fnew->handle = handle;
  2123. err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
  2124. !tc_skip_hw(fnew->flags));
  2125. if (err < 0)
  2126. goto errout_idr;
  2127. err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
  2128. &fnew->exts, flags, fnew->flags,
  2129. extack);
  2130. if (err < 0)
  2131. goto errout_idr;
  2132. if (tb[TCA_FLOWER_CLASSID]) {
  2133. fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  2134. if (flags & TCA_ACT_FLAGS_NO_RTNL)
  2135. rtnl_lock();
  2136. tcf_bind_filter(tp, &fnew->res, base);
  2137. if (flags & TCA_ACT_FLAGS_NO_RTNL)
  2138. rtnl_unlock();
  2139. bound_to_filter = true;
  2140. }
  2141. err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack);
  2142. if (err)
  2143. goto unbind_filter;
  2144. fl_mask_update_range(mask);
  2145. fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
  2146. if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
  2147. NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
  2148. err = -EINVAL;
  2149. goto unbind_filter;
  2150. }
  2151. /* Enable tc skb extension if filter matches on data extracted from
  2152. * this extension.
  2153. */
  2154. if (fl_needs_tc_skb_ext(&mask->key)) {
  2155. fnew->needs_tc_skb_ext = 1;
  2156. tc_skb_ext_tc_enable();
  2157. }
  2158. err = fl_check_assign_mask(head, fnew, fold, mask);
  2159. if (err)
  2160. goto unbind_filter;
  2161. err = fl_ht_insert_unique(fnew, fold, &in_ht);
  2162. if (err)
  2163. goto errout_mask;
  2164. if (!tc_skip_hw(fnew->flags)) {
  2165. err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
  2166. if (err)
  2167. goto errout_ht;
  2168. }
  2169. if (!tc_in_hw(fnew->flags))
  2170. fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
  2171. tcf_proto_update_usesw(tp, fnew->flags);
  2172. spin_lock(&tp->lock);
  2173. /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
  2174. * proto again or create new one, if necessary.
  2175. */
  2176. if (tp->deleting) {
  2177. err = -EAGAIN;
  2178. goto errout_hw;
  2179. }
  2180. if (fold) {
  2181. /* Fold filter was deleted concurrently. Retry lookup. */
  2182. if (fold->deleted) {
  2183. err = -EAGAIN;
  2184. goto errout_hw;
  2185. }
  2186. fnew->handle = handle;
  2187. if (!in_ht) {
  2188. struct rhashtable_params params =
  2189. fnew->mask->filter_ht_params;
  2190. err = rhashtable_insert_fast(&fnew->mask->ht,
  2191. &fnew->ht_node,
  2192. params);
  2193. if (err)
  2194. goto errout_hw;
  2195. in_ht = true;
  2196. }
  2197. refcount_inc(&fnew->refcnt);
  2198. rhashtable_remove_fast(&fold->mask->ht,
  2199. &fold->ht_node,
  2200. fold->mask->filter_ht_params);
  2201. idr_replace(&head->handle_idr, fnew, fnew->handle);
  2202. list_replace_rcu(&fold->list, &fnew->list);
  2203. fold->deleted = true;
  2204. spin_unlock(&tp->lock);
  2205. fl_mask_put(head, fold->mask);
  2206. if (!tc_skip_hw(fold->flags))
  2207. fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
  2208. tcf_unbind_filter(tp, &fold->res);
  2209. /* Caller holds reference to fold, so refcnt is always > 0
  2210. * after this.
  2211. */
  2212. refcount_dec(&fold->refcnt);
  2213. __fl_put(fold);
  2214. } else {
  2215. idr_replace(&head->handle_idr, fnew, fnew->handle);
  2216. refcount_inc(&fnew->refcnt);
  2217. list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
  2218. spin_unlock(&tp->lock);
  2219. }
  2220. *arg = fnew;
  2221. kfree(tb);
  2222. tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
  2223. return 0;
  2224. errout_ht:
  2225. spin_lock(&tp->lock);
  2226. errout_hw:
  2227. fnew->deleted = true;
  2228. spin_unlock(&tp->lock);
  2229. if (!tc_skip_hw(fnew->flags))
  2230. fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
  2231. if (in_ht)
  2232. rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
  2233. fnew->mask->filter_ht_params);
  2234. errout_mask:
  2235. fl_mask_put(head, fnew->mask);
  2236. unbind_filter:
  2237. if (bound_to_filter) {
  2238. if (flags & TCA_ACT_FLAGS_NO_RTNL)
  2239. rtnl_lock();
  2240. tcf_unbind_filter(tp, &fnew->res);
  2241. if (flags & TCA_ACT_FLAGS_NO_RTNL)
  2242. rtnl_unlock();
  2243. }
  2244. errout_idr:
  2245. if (!fold) {
  2246. spin_lock(&tp->lock);
  2247. idr_remove(&head->handle_idr, fnew->handle);
  2248. spin_unlock(&tp->lock);
  2249. }
  2250. __fl_put(fnew);
  2251. errout_tb:
  2252. kfree(tb);
  2253. errout_mask_alloc:
  2254. tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
  2255. errout_fold:
  2256. if (fold)
  2257. __fl_put(fold);
  2258. return err;
  2259. }
  2260. static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
  2261. bool rtnl_held, struct netlink_ext_ack *extack)
  2262. {
  2263. struct cls_fl_head *head = fl_head_dereference(tp);
  2264. struct cls_fl_filter *f = arg;
  2265. bool last_on_mask;
  2266. int err = 0;
  2267. err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
  2268. *last = list_empty(&head->masks);
  2269. __fl_put(f);
  2270. return err;
  2271. }
  2272. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
  2273. bool rtnl_held)
  2274. {
  2275. struct cls_fl_head *head = fl_head_dereference(tp);
  2276. unsigned long id = arg->cookie, tmp;
  2277. struct cls_fl_filter *f;
  2278. arg->count = arg->skip;
  2279. rcu_read_lock();
  2280. idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
  2281. /* don't return filters that are being deleted */
  2282. if (!f || !refcount_inc_not_zero(&f->refcnt))
  2283. continue;
  2284. rcu_read_unlock();
  2285. if (arg->fn(tp, f, arg) < 0) {
  2286. __fl_put(f);
  2287. arg->stop = 1;
  2288. rcu_read_lock();
  2289. break;
  2290. }
  2291. __fl_put(f);
  2292. arg->count++;
  2293. rcu_read_lock();
  2294. }
  2295. rcu_read_unlock();
  2296. arg->cookie = id;
  2297. }
  2298. static struct cls_fl_filter *
  2299. fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
  2300. {
  2301. struct cls_fl_head *head = fl_head_dereference(tp);
  2302. spin_lock(&tp->lock);
  2303. if (list_empty(&head->hw_filters)) {
  2304. spin_unlock(&tp->lock);
  2305. return NULL;
  2306. }
  2307. if (!f)
  2308. f = list_entry(&head->hw_filters, struct cls_fl_filter,
  2309. hw_list);
  2310. list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
  2311. if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
  2312. spin_unlock(&tp->lock);
  2313. return f;
  2314. }
  2315. }
  2316. spin_unlock(&tp->lock);
  2317. return NULL;
  2318. }
  2319. static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
  2320. void *cb_priv, struct netlink_ext_ack *extack)
  2321. {
  2322. struct tcf_block *block = tp->chain->block;
  2323. struct flow_cls_offload cls_flower = {};
  2324. struct cls_fl_filter *f = NULL;
  2325. int err;
  2326. /* hw_filters list can only be changed by hw offload functions after
  2327. * obtaining rtnl lock. Make sure it is not changed while reoffload is
  2328. * iterating it.
  2329. */
  2330. ASSERT_RTNL();
  2331. while ((f = fl_get_next_hw_filter(tp, f, add))) {
  2332. cls_flower.rule =
  2333. flow_rule_alloc(tcf_exts_num_actions(&f->exts));
  2334. if (!cls_flower.rule) {
  2335. __fl_put(f);
  2336. return -ENOMEM;
  2337. }
  2338. tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
  2339. extack);
  2340. cls_flower.command = add ?
  2341. FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
  2342. cls_flower.cookie = (unsigned long)f;
  2343. cls_flower.rule->match.dissector = &f->mask->dissector;
  2344. cls_flower.rule->match.mask = &f->mask->key;
  2345. cls_flower.rule->match.key = &f->mkey;
  2346. err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
  2347. cls_flower.common.extack);
  2348. if (err) {
  2349. kfree(cls_flower.rule);
  2350. if (tc_skip_sw(f->flags)) {
  2351. __fl_put(f);
  2352. return err;
  2353. }
  2354. goto next_flow;
  2355. }
  2356. cls_flower.classid = f->res.classid;
  2357. err = tc_setup_cb_reoffload(block, tp, add, cb,
  2358. TC_SETUP_CLSFLOWER, &cls_flower,
  2359. cb_priv, &f->flags,
  2360. &f->in_hw_count);
  2361. tc_cleanup_offload_action(&cls_flower.rule->action);
  2362. kfree(cls_flower.rule);
  2363. if (err) {
  2364. __fl_put(f);
  2365. return err;
  2366. }
  2367. next_flow:
  2368. __fl_put(f);
  2369. }
  2370. return 0;
  2371. }
  2372. static void fl_hw_add(struct tcf_proto *tp, void *type_data)
  2373. {
  2374. struct flow_cls_offload *cls_flower = type_data;
  2375. struct cls_fl_filter *f =
  2376. (struct cls_fl_filter *) cls_flower->cookie;
  2377. struct cls_fl_head *head = fl_head_dereference(tp);
  2378. spin_lock(&tp->lock);
  2379. list_add(&f->hw_list, &head->hw_filters);
  2380. spin_unlock(&tp->lock);
  2381. }
  2382. static void fl_hw_del(struct tcf_proto *tp, void *type_data)
  2383. {
  2384. struct flow_cls_offload *cls_flower = type_data;
  2385. struct cls_fl_filter *f =
  2386. (struct cls_fl_filter *) cls_flower->cookie;
  2387. spin_lock(&tp->lock);
  2388. if (!list_empty(&f->hw_list))
  2389. list_del_init(&f->hw_list);
  2390. spin_unlock(&tp->lock);
  2391. }
  2392. static int fl_hw_create_tmplt(struct tcf_chain *chain,
  2393. struct fl_flow_tmplt *tmplt)
  2394. {
  2395. struct flow_cls_offload cls_flower = {};
  2396. struct tcf_block *block = chain->block;
  2397. cls_flower.rule = flow_rule_alloc(0);
  2398. if (!cls_flower.rule)
  2399. return -ENOMEM;
  2400. cls_flower.common.chain_index = chain->index;
  2401. cls_flower.command = FLOW_CLS_TMPLT_CREATE;
  2402. cls_flower.cookie = (unsigned long) tmplt;
  2403. cls_flower.rule->match.dissector = &tmplt->dissector;
  2404. cls_flower.rule->match.mask = &tmplt->mask;
  2405. cls_flower.rule->match.key = &tmplt->dummy_key;
  2406. /* We don't care if driver (any of them) fails to handle this
  2407. * call. It serves just as a hint for it.
  2408. */
  2409. tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
  2410. kfree(cls_flower.rule);
  2411. return 0;
  2412. }
  2413. static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
  2414. struct fl_flow_tmplt *tmplt)
  2415. {
  2416. struct flow_cls_offload cls_flower = {};
  2417. struct tcf_block *block = chain->block;
  2418. cls_flower.common.chain_index = chain->index;
  2419. cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
  2420. cls_flower.cookie = (unsigned long) tmplt;
  2421. tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
  2422. }
  2423. static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
  2424. struct nlattr **tca,
  2425. struct netlink_ext_ack *extack)
  2426. {
  2427. struct nlattr *tca_opts = tca[TCA_OPTIONS];
  2428. struct fl_flow_tmplt *tmplt;
  2429. struct nlattr **tb;
  2430. int err;
  2431. if (!tca_opts)
  2432. return ERR_PTR(-EINVAL);
  2433. tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
  2434. if (!tb)
  2435. return ERR_PTR(-ENOBUFS);
  2436. err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
  2437. tca_opts, fl_policy, NULL);
  2438. if (err)
  2439. goto errout_tb;
  2440. tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
  2441. if (!tmplt) {
  2442. err = -ENOMEM;
  2443. goto errout_tb;
  2444. }
  2445. tmplt->chain = chain;
  2446. err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key,
  2447. &tmplt->mask, extack);
  2448. if (err)
  2449. goto errout_tmplt;
  2450. fl_init_dissector(&tmplt->dissector, &tmplt->mask);
  2451. err = fl_hw_create_tmplt(chain, tmplt);
  2452. if (err)
  2453. goto errout_tmplt;
  2454. kfree(tb);
  2455. return tmplt;
  2456. errout_tmplt:
  2457. kfree(tmplt);
  2458. errout_tb:
  2459. kfree(tb);
  2460. return ERR_PTR(err);
  2461. }
  2462. static void fl_tmplt_destroy(void *tmplt_priv)
  2463. {
  2464. struct fl_flow_tmplt *tmplt = tmplt_priv;
  2465. fl_hw_destroy_tmplt(tmplt->chain, tmplt);
  2466. kfree(tmplt);
  2467. }
  2468. static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
  2469. flow_setup_cb_t *cb, void *cb_priv)
  2470. {
  2471. struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
  2472. struct flow_cls_offload cls_flower = {};
  2473. cls_flower.rule = flow_rule_alloc(0);
  2474. if (!cls_flower.rule)
  2475. return;
  2476. cls_flower.common.chain_index = chain->index;
  2477. cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
  2478. FLOW_CLS_TMPLT_DESTROY;
  2479. cls_flower.cookie = (unsigned long) tmplt;
  2480. cls_flower.rule->match.dissector = &tmplt->dissector;
  2481. cls_flower.rule->match.mask = &tmplt->mask;
  2482. cls_flower.rule->match.key = &tmplt->dummy_key;
  2483. cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
  2484. kfree(cls_flower.rule);
  2485. }
  2486. static int fl_dump_key_val(struct sk_buff *skb,
  2487. void *val, int val_type,
  2488. void *mask, int mask_type, int len)
  2489. {
  2490. int err;
  2491. if (!memchr_inv(mask, 0, len))
  2492. return 0;
  2493. err = nla_put(skb, val_type, len, val);
  2494. if (err)
  2495. return err;
  2496. if (mask_type != TCA_FLOWER_UNSPEC) {
  2497. err = nla_put(skb, mask_type, len, mask);
  2498. if (err)
  2499. return err;
  2500. }
  2501. return 0;
  2502. }
  2503. static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
  2504. struct fl_flow_key *mask)
  2505. {
  2506. if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
  2507. TCA_FLOWER_KEY_PORT_DST_MIN,
  2508. &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
  2509. sizeof(key->tp_range.tp_min.dst)) ||
  2510. fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
  2511. TCA_FLOWER_KEY_PORT_DST_MAX,
  2512. &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
  2513. sizeof(key->tp_range.tp_max.dst)) ||
  2514. fl_dump_key_val(skb, &key->tp_range.tp_min.src,
  2515. TCA_FLOWER_KEY_PORT_SRC_MIN,
  2516. &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
  2517. sizeof(key->tp_range.tp_min.src)) ||
  2518. fl_dump_key_val(skb, &key->tp_range.tp_max.src,
  2519. TCA_FLOWER_KEY_PORT_SRC_MAX,
  2520. &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
  2521. sizeof(key->tp_range.tp_max.src)))
  2522. return -1;
  2523. return 0;
  2524. }
  2525. static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
  2526. struct flow_dissector_key_mpls *mpls_key,
  2527. struct flow_dissector_key_mpls *mpls_mask,
  2528. u8 lse_index)
  2529. {
  2530. struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
  2531. struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
  2532. int err;
  2533. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
  2534. lse_index + 1);
  2535. if (err)
  2536. return err;
  2537. if (lse_mask->mpls_ttl) {
  2538. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
  2539. lse_key->mpls_ttl);
  2540. if (err)
  2541. return err;
  2542. }
  2543. if (lse_mask->mpls_bos) {
  2544. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
  2545. lse_key->mpls_bos);
  2546. if (err)
  2547. return err;
  2548. }
  2549. if (lse_mask->mpls_tc) {
  2550. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
  2551. lse_key->mpls_tc);
  2552. if (err)
  2553. return err;
  2554. }
  2555. if (lse_mask->mpls_label) {
  2556. err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
  2557. lse_key->mpls_label);
  2558. if (err)
  2559. return err;
  2560. }
  2561. return 0;
  2562. }
  2563. static int fl_dump_key_mpls_opts(struct sk_buff *skb,
  2564. struct flow_dissector_key_mpls *mpls_key,
  2565. struct flow_dissector_key_mpls *mpls_mask)
  2566. {
  2567. struct nlattr *opts;
  2568. struct nlattr *lse;
  2569. u8 lse_index;
  2570. int err;
  2571. opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
  2572. if (!opts)
  2573. return -EMSGSIZE;
  2574. for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
  2575. if (!(mpls_mask->used_lses & 1 << lse_index))
  2576. continue;
  2577. lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
  2578. if (!lse) {
  2579. err = -EMSGSIZE;
  2580. goto err_opts;
  2581. }
  2582. err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
  2583. lse_index);
  2584. if (err)
  2585. goto err_opts_lse;
  2586. nla_nest_end(skb, lse);
  2587. }
  2588. nla_nest_end(skb, opts);
  2589. return 0;
  2590. err_opts_lse:
  2591. nla_nest_cancel(skb, lse);
  2592. err_opts:
  2593. nla_nest_cancel(skb, opts);
  2594. return err;
  2595. }
  2596. static int fl_dump_key_mpls(struct sk_buff *skb,
  2597. struct flow_dissector_key_mpls *mpls_key,
  2598. struct flow_dissector_key_mpls *mpls_mask)
  2599. {
  2600. struct flow_dissector_mpls_lse *lse_mask;
  2601. struct flow_dissector_mpls_lse *lse_key;
  2602. int err;
  2603. if (!mpls_mask->used_lses)
  2604. return 0;
  2605. lse_mask = &mpls_mask->ls[0];
  2606. lse_key = &mpls_key->ls[0];
  2607. /* For backward compatibility, don't use the MPLS nested attributes if
  2608. * the rule can be expressed using the old attributes.
  2609. */
  2610. if (mpls_mask->used_lses & ~1 ||
  2611. (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
  2612. !lse_mask->mpls_tc && !lse_mask->mpls_label))
  2613. return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
  2614. if (lse_mask->mpls_ttl) {
  2615. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
  2616. lse_key->mpls_ttl);
  2617. if (err)
  2618. return err;
  2619. }
  2620. if (lse_mask->mpls_tc) {
  2621. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
  2622. lse_key->mpls_tc);
  2623. if (err)
  2624. return err;
  2625. }
  2626. if (lse_mask->mpls_label) {
  2627. err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
  2628. lse_key->mpls_label);
  2629. if (err)
  2630. return err;
  2631. }
  2632. if (lse_mask->mpls_bos) {
  2633. err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
  2634. lse_key->mpls_bos);
  2635. if (err)
  2636. return err;
  2637. }
  2638. return 0;
  2639. }
  2640. static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
  2641. struct flow_dissector_key_ip *key,
  2642. struct flow_dissector_key_ip *mask)
  2643. {
  2644. int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
  2645. int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
  2646. int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
  2647. int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
  2648. if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
  2649. fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
  2650. return -1;
  2651. return 0;
  2652. }
  2653. static int fl_dump_key_vlan(struct sk_buff *skb,
  2654. int vlan_id_key, int vlan_prio_key,
  2655. struct flow_dissector_key_vlan *vlan_key,
  2656. struct flow_dissector_key_vlan *vlan_mask)
  2657. {
  2658. int err;
  2659. if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
  2660. return 0;
  2661. if (vlan_mask->vlan_id) {
  2662. err = nla_put_u16(skb, vlan_id_key,
  2663. vlan_key->vlan_id);
  2664. if (err)
  2665. return err;
  2666. }
  2667. if (vlan_mask->vlan_priority) {
  2668. err = nla_put_u8(skb, vlan_prio_key,
  2669. vlan_key->vlan_priority);
  2670. if (err)
  2671. return err;
  2672. }
  2673. return 0;
  2674. }
  2675. static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
  2676. u32 *flower_key, u32 *flower_mask,
  2677. u32 flower_flag_bit, u32 dissector_flag_bit)
  2678. {
  2679. if (dissector_mask & dissector_flag_bit) {
  2680. *flower_mask |= flower_flag_bit;
  2681. if (dissector_key & dissector_flag_bit)
  2682. *flower_key |= flower_flag_bit;
  2683. }
  2684. }
  2685. static int fl_dump_key_flags(struct sk_buff *skb, bool encap,
  2686. u32 flags_key, u32 flags_mask)
  2687. {
  2688. int fl_key, fl_mask;
  2689. __be32 _key, _mask;
  2690. u32 key, mask;
  2691. int err;
  2692. if (encap) {
  2693. fl_key = TCA_FLOWER_KEY_ENC_FLAGS;
  2694. fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK;
  2695. } else {
  2696. fl_key = TCA_FLOWER_KEY_FLAGS;
  2697. fl_mask = TCA_FLOWER_KEY_FLAGS_MASK;
  2698. }
  2699. if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
  2700. return 0;
  2701. key = 0;
  2702. mask = 0;
  2703. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  2704. TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
  2705. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  2706. TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
  2707. FLOW_DIS_FIRST_FRAG);
  2708. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  2709. TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
  2710. FLOW_DIS_F_TUNNEL_CSUM);
  2711. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  2712. TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
  2713. FLOW_DIS_F_TUNNEL_DONT_FRAGMENT);
  2714. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  2715. TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM);
  2716. fl_get_key_flag(flags_key, flags_mask, &key, &mask,
  2717. TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
  2718. FLOW_DIS_F_TUNNEL_CRIT_OPT);
  2719. _key = cpu_to_be32(key);
  2720. _mask = cpu_to_be32(mask);
  2721. err = nla_put(skb, fl_key, 4, &_key);
  2722. if (err)
  2723. return err;
  2724. return nla_put(skb, fl_mask, 4, &_mask);
  2725. }
  2726. static int fl_dump_key_geneve_opt(struct sk_buff *skb,
  2727. struct flow_dissector_key_enc_opts *enc_opts)
  2728. {
  2729. struct geneve_opt *opt;
  2730. struct nlattr *nest;
  2731. int opt_off = 0;
  2732. nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
  2733. if (!nest)
  2734. goto nla_put_failure;
  2735. while (enc_opts->len > opt_off) {
  2736. opt = (struct geneve_opt *)&enc_opts->data[opt_off];
  2737. if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
  2738. opt->opt_class))
  2739. goto nla_put_failure;
  2740. if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
  2741. opt->type))
  2742. goto nla_put_failure;
  2743. if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
  2744. opt->length * 4, opt->opt_data))
  2745. goto nla_put_failure;
  2746. opt_off += sizeof(struct geneve_opt) + opt->length * 4;
  2747. }
  2748. nla_nest_end(skb, nest);
  2749. return 0;
  2750. nla_put_failure:
  2751. nla_nest_cancel(skb, nest);
  2752. return -EMSGSIZE;
  2753. }
  2754. static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
  2755. struct flow_dissector_key_enc_opts *enc_opts)
  2756. {
  2757. struct vxlan_metadata *md;
  2758. struct nlattr *nest;
  2759. nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
  2760. if (!nest)
  2761. goto nla_put_failure;
  2762. md = (struct vxlan_metadata *)&enc_opts->data[0];
  2763. if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
  2764. goto nla_put_failure;
  2765. nla_nest_end(skb, nest);
  2766. return 0;
  2767. nla_put_failure:
  2768. nla_nest_cancel(skb, nest);
  2769. return -EMSGSIZE;
  2770. }
  2771. static int fl_dump_key_erspan_opt(struct sk_buff *skb,
  2772. struct flow_dissector_key_enc_opts *enc_opts)
  2773. {
  2774. struct erspan_metadata *md;
  2775. struct nlattr *nest;
  2776. nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
  2777. if (!nest)
  2778. goto nla_put_failure;
  2779. md = (struct erspan_metadata *)&enc_opts->data[0];
  2780. if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
  2781. goto nla_put_failure;
  2782. if (md->version == 1 &&
  2783. nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
  2784. goto nla_put_failure;
  2785. if (md->version == 2 &&
  2786. (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
  2787. md->u.md2.dir) ||
  2788. nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
  2789. get_hwid(&md->u.md2))))
  2790. goto nla_put_failure;
  2791. nla_nest_end(skb, nest);
  2792. return 0;
  2793. nla_put_failure:
  2794. nla_nest_cancel(skb, nest);
  2795. return -EMSGSIZE;
  2796. }
  2797. static int fl_dump_key_gtp_opt(struct sk_buff *skb,
  2798. struct flow_dissector_key_enc_opts *enc_opts)
  2799. {
  2800. struct gtp_pdu_session_info *session_info;
  2801. struct nlattr *nest;
  2802. nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
  2803. if (!nest)
  2804. goto nla_put_failure;
  2805. session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
  2806. if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
  2807. session_info->pdu_type))
  2808. goto nla_put_failure;
  2809. if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
  2810. goto nla_put_failure;
  2811. nla_nest_end(skb, nest);
  2812. return 0;
  2813. nla_put_failure:
  2814. nla_nest_cancel(skb, nest);
  2815. return -EMSGSIZE;
  2816. }
  2817. static int fl_dump_key_pfcp_opt(struct sk_buff *skb,
  2818. struct flow_dissector_key_enc_opts *enc_opts)
  2819. {
  2820. struct pfcp_metadata *md;
  2821. struct nlattr *nest;
  2822. nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_PFCP);
  2823. if (!nest)
  2824. goto nla_put_failure;
  2825. md = (struct pfcp_metadata *)&enc_opts->data[0];
  2826. if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_TYPE, md->type))
  2827. goto nla_put_failure;
  2828. if (nla_put_be64(skb, TCA_FLOWER_KEY_ENC_OPT_PFCP_SEID,
  2829. md->seid, 0))
  2830. goto nla_put_failure;
  2831. nla_nest_end(skb, nest);
  2832. return 0;
  2833. nla_put_failure:
  2834. nla_nest_cancel(skb, nest);
  2835. return -EMSGSIZE;
  2836. }
  2837. static int fl_dump_key_ct(struct sk_buff *skb,
  2838. struct flow_dissector_key_ct *key,
  2839. struct flow_dissector_key_ct *mask)
  2840. {
  2841. if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
  2842. fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
  2843. &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
  2844. sizeof(key->ct_state)))
  2845. goto nla_put_failure;
  2846. if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
  2847. fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
  2848. &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
  2849. sizeof(key->ct_zone)))
  2850. goto nla_put_failure;
  2851. if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
  2852. fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
  2853. &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
  2854. sizeof(key->ct_mark)))
  2855. goto nla_put_failure;
  2856. if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
  2857. fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
  2858. &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
  2859. sizeof(key->ct_labels)))
  2860. goto nla_put_failure;
  2861. return 0;
  2862. nla_put_failure:
  2863. return -EMSGSIZE;
  2864. }
  2865. static int fl_dump_key_cfm(struct sk_buff *skb,
  2866. struct flow_dissector_key_cfm *key,
  2867. struct flow_dissector_key_cfm *mask)
  2868. {
  2869. struct nlattr *opts;
  2870. int err;
  2871. u8 mdl;
  2872. if (!memchr_inv(mask, 0, sizeof(*mask)))
  2873. return 0;
  2874. opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM);
  2875. if (!opts)
  2876. return -EMSGSIZE;
  2877. if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) {
  2878. mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver);
  2879. err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl);
  2880. if (err)
  2881. goto err_cfm_opts;
  2882. }
  2883. if (mask->opcode) {
  2884. err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode);
  2885. if (err)
  2886. goto err_cfm_opts;
  2887. }
  2888. nla_nest_end(skb, opts);
  2889. return 0;
  2890. err_cfm_opts:
  2891. nla_nest_cancel(skb, opts);
  2892. return err;
  2893. }
  2894. static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
  2895. struct flow_dissector_key_enc_opts *enc_opts)
  2896. {
  2897. struct nlattr *nest;
  2898. int err;
  2899. if (!enc_opts->len)
  2900. return 0;
  2901. nest = nla_nest_start_noflag(skb, enc_opt_type);
  2902. if (!nest)
  2903. goto nla_put_failure;
  2904. switch (enc_opts->dst_opt_type) {
  2905. case IP_TUNNEL_GENEVE_OPT_BIT:
  2906. err = fl_dump_key_geneve_opt(skb, enc_opts);
  2907. if (err)
  2908. goto nla_put_failure;
  2909. break;
  2910. case IP_TUNNEL_VXLAN_OPT_BIT:
  2911. err = fl_dump_key_vxlan_opt(skb, enc_opts);
  2912. if (err)
  2913. goto nla_put_failure;
  2914. break;
  2915. case IP_TUNNEL_ERSPAN_OPT_BIT:
  2916. err = fl_dump_key_erspan_opt(skb, enc_opts);
  2917. if (err)
  2918. goto nla_put_failure;
  2919. break;
  2920. case IP_TUNNEL_GTP_OPT_BIT:
  2921. err = fl_dump_key_gtp_opt(skb, enc_opts);
  2922. if (err)
  2923. goto nla_put_failure;
  2924. break;
  2925. case IP_TUNNEL_PFCP_OPT_BIT:
  2926. err = fl_dump_key_pfcp_opt(skb, enc_opts);
  2927. if (err)
  2928. goto nla_put_failure;
  2929. break;
  2930. default:
  2931. goto nla_put_failure;
  2932. }
  2933. nla_nest_end(skb, nest);
  2934. return 0;
  2935. nla_put_failure:
  2936. nla_nest_cancel(skb, nest);
  2937. return -EMSGSIZE;
  2938. }
  2939. static int fl_dump_key_enc_opt(struct sk_buff *skb,
  2940. struct flow_dissector_key_enc_opts *key_opts,
  2941. struct flow_dissector_key_enc_opts *msk_opts)
  2942. {
  2943. int err;
  2944. err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
  2945. if (err)
  2946. return err;
  2947. return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
  2948. }
  2949. static int fl_dump_key(struct sk_buff *skb, struct net *net,
  2950. struct fl_flow_key *key, struct fl_flow_key *mask)
  2951. {
  2952. if (mask->meta.ingress_ifindex) {
  2953. struct net_device *dev;
  2954. dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
  2955. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  2956. goto nla_put_failure;
  2957. }
  2958. if (fl_dump_key_val(skb, &key->meta.l2_miss,
  2959. TCA_FLOWER_L2_MISS, &mask->meta.l2_miss,
  2960. TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss)))
  2961. goto nla_put_failure;
  2962. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  2963. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  2964. sizeof(key->eth.dst)) ||
  2965. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  2966. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  2967. sizeof(key->eth.src)) ||
  2968. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  2969. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  2970. sizeof(key->basic.n_proto)))
  2971. goto nla_put_failure;
  2972. if (mask->num_of_vlans.num_of_vlans) {
  2973. if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
  2974. goto nla_put_failure;
  2975. }
  2976. if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
  2977. goto nla_put_failure;
  2978. if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
  2979. TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
  2980. goto nla_put_failure;
  2981. if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
  2982. TCA_FLOWER_KEY_CVLAN_PRIO,
  2983. &key->cvlan, &mask->cvlan) ||
  2984. (mask->cvlan.vlan_tpid &&
  2985. nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  2986. key->cvlan.vlan_tpid)))
  2987. goto nla_put_failure;
  2988. if (mask->basic.n_proto) {
  2989. if (mask->cvlan.vlan_eth_type) {
  2990. if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
  2991. key->basic.n_proto))
  2992. goto nla_put_failure;
  2993. } else if (mask->vlan.vlan_eth_type) {
  2994. if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
  2995. key->vlan.vlan_eth_type))
  2996. goto nla_put_failure;
  2997. }
  2998. }
  2999. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  3000. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  3001. (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  3002. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  3003. sizeof(key->basic.ip_proto)) ||
  3004. fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
  3005. goto nla_put_failure;
  3006. if (mask->pppoe.session_id) {
  3007. if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
  3008. key->pppoe.session_id))
  3009. goto nla_put_failure;
  3010. }
  3011. if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
  3012. if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
  3013. key->pppoe.ppp_proto))
  3014. goto nla_put_failure;
  3015. }
  3016. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  3017. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  3018. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  3019. sizeof(key->ipv4.src)) ||
  3020. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  3021. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  3022. sizeof(key->ipv4.dst))))
  3023. goto nla_put_failure;
  3024. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  3025. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  3026. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  3027. sizeof(key->ipv6.src)) ||
  3028. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  3029. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  3030. sizeof(key->ipv6.dst))))
  3031. goto nla_put_failure;
  3032. if (key->basic.ip_proto == IPPROTO_TCP &&
  3033. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  3034. &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
  3035. sizeof(key->tp.src)) ||
  3036. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  3037. &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
  3038. sizeof(key->tp.dst)) ||
  3039. fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
  3040. &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
  3041. sizeof(key->tcp.flags))))
  3042. goto nla_put_failure;
  3043. else if (key->basic.ip_proto == IPPROTO_UDP &&
  3044. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  3045. &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
  3046. sizeof(key->tp.src)) ||
  3047. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  3048. &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
  3049. sizeof(key->tp.dst))))
  3050. goto nla_put_failure;
  3051. else if (key->basic.ip_proto == IPPROTO_SCTP &&
  3052. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
  3053. &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
  3054. sizeof(key->tp.src)) ||
  3055. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
  3056. &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
  3057. sizeof(key->tp.dst))))
  3058. goto nla_put_failure;
  3059. else if (key->basic.n_proto == htons(ETH_P_IP) &&
  3060. key->basic.ip_proto == IPPROTO_ICMP &&
  3061. (fl_dump_key_val(skb, &key->icmp.type,
  3062. TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
  3063. TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
  3064. sizeof(key->icmp.type)) ||
  3065. fl_dump_key_val(skb, &key->icmp.code,
  3066. TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
  3067. TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
  3068. sizeof(key->icmp.code))))
  3069. goto nla_put_failure;
  3070. else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
  3071. key->basic.ip_proto == IPPROTO_ICMPV6 &&
  3072. (fl_dump_key_val(skb, &key->icmp.type,
  3073. TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
  3074. TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
  3075. sizeof(key->icmp.type)) ||
  3076. fl_dump_key_val(skb, &key->icmp.code,
  3077. TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
  3078. TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
  3079. sizeof(key->icmp.code))))
  3080. goto nla_put_failure;
  3081. else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
  3082. key->basic.n_proto == htons(ETH_P_RARP)) &&
  3083. (fl_dump_key_val(skb, &key->arp.sip,
  3084. TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
  3085. TCA_FLOWER_KEY_ARP_SIP_MASK,
  3086. sizeof(key->arp.sip)) ||
  3087. fl_dump_key_val(skb, &key->arp.tip,
  3088. TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
  3089. TCA_FLOWER_KEY_ARP_TIP_MASK,
  3090. sizeof(key->arp.tip)) ||
  3091. fl_dump_key_val(skb, &key->arp.op,
  3092. TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
  3093. TCA_FLOWER_KEY_ARP_OP_MASK,
  3094. sizeof(key->arp.op)) ||
  3095. fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
  3096. mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
  3097. sizeof(key->arp.sha)) ||
  3098. fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
  3099. mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
  3100. sizeof(key->arp.tha))))
  3101. goto nla_put_failure;
  3102. else if (key->basic.ip_proto == IPPROTO_L2TP &&
  3103. fl_dump_key_val(skb, &key->l2tpv3.session_id,
  3104. TCA_FLOWER_KEY_L2TPV3_SID,
  3105. &mask->l2tpv3.session_id,
  3106. TCA_FLOWER_UNSPEC,
  3107. sizeof(key->l2tpv3.session_id)))
  3108. goto nla_put_failure;
  3109. if (key->ipsec.spi &&
  3110. fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI,
  3111. &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
  3112. sizeof(key->ipsec.spi)))
  3113. goto nla_put_failure;
  3114. if ((key->basic.ip_proto == IPPROTO_TCP ||
  3115. key->basic.ip_proto == IPPROTO_UDP ||
  3116. key->basic.ip_proto == IPPROTO_SCTP) &&
  3117. fl_dump_key_port_range(skb, key, mask))
  3118. goto nla_put_failure;
  3119. if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  3120. (fl_dump_key_val(skb, &key->enc_ipv4.src,
  3121. TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
  3122. TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
  3123. sizeof(key->enc_ipv4.src)) ||
  3124. fl_dump_key_val(skb, &key->enc_ipv4.dst,
  3125. TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
  3126. TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
  3127. sizeof(key->enc_ipv4.dst))))
  3128. goto nla_put_failure;
  3129. else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  3130. (fl_dump_key_val(skb, &key->enc_ipv6.src,
  3131. TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
  3132. TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
  3133. sizeof(key->enc_ipv6.src)) ||
  3134. fl_dump_key_val(skb, &key->enc_ipv6.dst,
  3135. TCA_FLOWER_KEY_ENC_IPV6_DST,
  3136. &mask->enc_ipv6.dst,
  3137. TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
  3138. sizeof(key->enc_ipv6.dst))))
  3139. goto nla_put_failure;
  3140. if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
  3141. &mask->enc_key_id, TCA_FLOWER_UNSPEC,
  3142. sizeof(key->enc_key_id)) ||
  3143. fl_dump_key_val(skb, &key->enc_tp.src,
  3144. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
  3145. &mask->enc_tp.src,
  3146. TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
  3147. sizeof(key->enc_tp.src)) ||
  3148. fl_dump_key_val(skb, &key->enc_tp.dst,
  3149. TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
  3150. &mask->enc_tp.dst,
  3151. TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
  3152. sizeof(key->enc_tp.dst)) ||
  3153. fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
  3154. fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
  3155. goto nla_put_failure;
  3156. if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
  3157. goto nla_put_failure;
  3158. if (fl_dump_key_flags(skb, false, key->control.flags,
  3159. mask->control.flags))
  3160. goto nla_put_failure;
  3161. if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
  3162. &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
  3163. sizeof(key->hash.hash)))
  3164. goto nla_put_failure;
  3165. if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
  3166. goto nla_put_failure;
  3167. if (fl_dump_key_flags(skb, true, key->enc_control.flags,
  3168. mask->enc_control.flags))
  3169. goto nla_put_failure;
  3170. return 0;
  3171. nla_put_failure:
  3172. return -EMSGSIZE;
  3173. }
  3174. static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
  3175. struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
  3176. {
  3177. struct cls_fl_filter *f = fh;
  3178. struct nlattr *nest;
  3179. struct fl_flow_key *key, *mask;
  3180. bool skip_hw;
  3181. if (!f)
  3182. return skb->len;
  3183. t->tcm_handle = f->handle;
  3184. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  3185. if (!nest)
  3186. goto nla_put_failure;
  3187. spin_lock(&tp->lock);
  3188. if (f->res.classid &&
  3189. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  3190. goto nla_put_failure_locked;
  3191. key = &f->key;
  3192. mask = &f->mask->key;
  3193. skip_hw = tc_skip_hw(f->flags);
  3194. if (fl_dump_key(skb, net, key, mask))
  3195. goto nla_put_failure_locked;
  3196. if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
  3197. goto nla_put_failure_locked;
  3198. spin_unlock(&tp->lock);
  3199. if (!skip_hw)
  3200. fl_hw_update_stats(tp, f, rtnl_held);
  3201. if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
  3202. goto nla_put_failure;
  3203. if (tcf_exts_dump(skb, &f->exts))
  3204. goto nla_put_failure;
  3205. nla_nest_end(skb, nest);
  3206. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  3207. goto nla_put_failure;
  3208. return skb->len;
  3209. nla_put_failure_locked:
  3210. spin_unlock(&tp->lock);
  3211. nla_put_failure:
  3212. nla_nest_cancel(skb, nest);
  3213. return -1;
  3214. }
  3215. static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
  3216. struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
  3217. {
  3218. struct cls_fl_filter *f = fh;
  3219. struct nlattr *nest;
  3220. bool skip_hw;
  3221. if (!f)
  3222. return skb->len;
  3223. t->tcm_handle = f->handle;
  3224. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  3225. if (!nest)
  3226. goto nla_put_failure;
  3227. spin_lock(&tp->lock);
  3228. skip_hw = tc_skip_hw(f->flags);
  3229. if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
  3230. goto nla_put_failure_locked;
  3231. spin_unlock(&tp->lock);
  3232. if (!skip_hw)
  3233. fl_hw_update_stats(tp, f, rtnl_held);
  3234. if (tcf_exts_terse_dump(skb, &f->exts))
  3235. goto nla_put_failure;
  3236. nla_nest_end(skb, nest);
  3237. return skb->len;
  3238. nla_put_failure_locked:
  3239. spin_unlock(&tp->lock);
  3240. nla_put_failure:
  3241. nla_nest_cancel(skb, nest);
  3242. return -1;
  3243. }
  3244. static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
  3245. {
  3246. struct fl_flow_tmplt *tmplt = tmplt_priv;
  3247. struct fl_flow_key *key, *mask;
  3248. struct nlattr *nest;
  3249. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  3250. if (!nest)
  3251. goto nla_put_failure;
  3252. key = &tmplt->dummy_key;
  3253. mask = &tmplt->mask;
  3254. if (fl_dump_key(skb, net, key, mask))
  3255. goto nla_put_failure;
  3256. nla_nest_end(skb, nest);
  3257. return skb->len;
  3258. nla_put_failure:
  3259. nla_nest_cancel(skb, nest);
  3260. return -EMSGSIZE;
  3261. }
  3262. static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
  3263. unsigned long base)
  3264. {
  3265. struct cls_fl_filter *f = fh;
  3266. tc_cls_bind_class(classid, cl, q, &f->res, base);
  3267. }
  3268. static bool fl_delete_empty(struct tcf_proto *tp)
  3269. {
  3270. struct cls_fl_head *head = fl_head_dereference(tp);
  3271. spin_lock(&tp->lock);
  3272. tp->deleting = idr_is_empty(&head->handle_idr);
  3273. spin_unlock(&tp->lock);
  3274. return tp->deleting;
  3275. }
  3276. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  3277. .kind = "flower",
  3278. .classify = fl_classify,
  3279. .init = fl_init,
  3280. .destroy = fl_destroy,
  3281. .get = fl_get,
  3282. .put = fl_put,
  3283. .change = fl_change,
  3284. .delete = fl_delete,
  3285. .delete_empty = fl_delete_empty,
  3286. .walk = fl_walk,
  3287. .reoffload = fl_reoffload,
  3288. .hw_add = fl_hw_add,
  3289. .hw_del = fl_hw_del,
  3290. .dump = fl_dump,
  3291. .terse_dump = fl_terse_dump,
  3292. .bind_class = fl_bind_class,
  3293. .tmplt_create = fl_tmplt_create,
  3294. .tmplt_destroy = fl_tmplt_destroy,
  3295. .tmplt_reoffload = fl_tmplt_reoffload,
  3296. .tmplt_dump = fl_tmplt_dump,
  3297. .get_exts = fl_get_exts,
  3298. .owner = THIS_MODULE,
  3299. .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
  3300. };
  3301. MODULE_ALIAS_NET_CLS("flower");
  3302. static int __init cls_fl_init(void)
  3303. {
  3304. return register_tcf_proto_ops(&cls_fl_ops);
  3305. }
  3306. static void __exit cls_fl_exit(void)
  3307. {
  3308. unregister_tcf_proto_ops(&cls_fl_ops);
  3309. }
  3310. module_init(cls_fl_init);
  3311. module_exit(cls_fl_exit);
  3312. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  3313. MODULE_DESCRIPTION("Flower classifier");
  3314. MODULE_LICENSE("GPL v2");