rtnetlink.c 167 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * INET An implementation of the TCP/IP protocol suite for the LINUX
  4. * operating system. INET is implemented using the BSD Socket
  5. * interface as the means of communication with the user level.
  6. *
  7. * Routing netlink socket interface: protocol independent part.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. *
  11. * Fixes:
  12. * Vitaly E. Lavrov RTA_OK arithmetic was wrong.
  13. */
  14. #include <linux/bitops.h>
  15. #include <linux/errno.h>
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/socket.h>
  19. #include <linux/kernel.h>
  20. #include <linux/timer.h>
  21. #include <linux/string.h>
  22. #include <linux/sockios.h>
  23. #include <linux/net.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/mm.h>
  26. #include <linux/slab.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/capability.h>
  29. #include <linux/skbuff.h>
  30. #include <linux/init.h>
  31. #include <linux/security.h>
  32. #include <linux/mutex.h>
  33. #include <linux/if_addr.h>
  34. #include <linux/if_bridge.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/pci.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/bpf.h>
  39. #include <linux/uaccess.h>
  40. #include <linux/inet.h>
  41. #include <linux/netdevice.h>
  42. #include <net/ip.h>
  43. #include <net/protocol.h>
  44. #include <net/arp.h>
  45. #include <net/route.h>
  46. #include <net/udp.h>
  47. #include <net/tcp.h>
  48. #include <net/sock.h>
  49. #include <net/pkt_sched.h>
  50. #include <net/fib_rules.h>
  51. #include <net/rtnetlink.h>
  52. #include <net/net_namespace.h>
  53. #include <net/devlink.h>
  54. #if IS_ENABLED(CONFIG_IPV6)
  55. #include <net/addrconf.h>
  56. #endif
  57. #include <linux/dpll.h>
  58. #include "dev.h"
  59. #define RTNL_MAX_TYPE 50
  60. #define RTNL_SLAVE_MAX_TYPE 44
  61. struct rtnl_link {
  62. rtnl_doit_func doit;
  63. rtnl_dumpit_func dumpit;
  64. struct module *owner;
  65. unsigned int flags;
  66. struct rcu_head rcu;
  67. };
  68. static DEFINE_MUTEX(rtnl_mutex);
  69. void rtnl_lock(void)
  70. {
  71. mutex_lock(&rtnl_mutex);
  72. }
  73. EXPORT_SYMBOL(rtnl_lock);
  74. int rtnl_lock_killable(void)
  75. {
  76. return mutex_lock_killable(&rtnl_mutex);
  77. }
  78. EXPORT_SYMBOL(rtnl_lock_killable);
  79. static struct sk_buff *defer_kfree_skb_list;
  80. void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail)
  81. {
  82. if (head && tail) {
  83. tail->next = defer_kfree_skb_list;
  84. defer_kfree_skb_list = head;
  85. }
  86. }
  87. EXPORT_SYMBOL(rtnl_kfree_skbs);
  88. void __rtnl_unlock(void)
  89. {
  90. struct sk_buff *head = defer_kfree_skb_list;
  91. defer_kfree_skb_list = NULL;
  92. /* Ensure that we didn't actually add any TODO item when __rtnl_unlock()
  93. * is used. In some places, e.g. in cfg80211, we have code that will do
  94. * something like
  95. * rtnl_lock()
  96. * wiphy_lock()
  97. * ...
  98. * rtnl_unlock()
  99. *
  100. * and because netdev_run_todo() acquires the RTNL for items on the list
  101. * we could cause a situation such as this:
  102. * Thread 1 Thread 2
  103. * rtnl_lock()
  104. * unregister_netdevice()
  105. * __rtnl_unlock()
  106. * rtnl_lock()
  107. * wiphy_lock()
  108. * rtnl_unlock()
  109. * netdev_run_todo()
  110. * __rtnl_unlock()
  111. *
  112. * // list not empty now
  113. * // because of thread 2
  114. * rtnl_lock()
  115. * while (!list_empty(...))
  116. * rtnl_lock()
  117. * wiphy_lock()
  118. * **** DEADLOCK ****
  119. *
  120. * However, usage of __rtnl_unlock() is rare, and so we can ensure that
  121. * it's not used in cases where something is added to do the list.
  122. */
  123. WARN_ON(!list_empty(&net_todo_list));
  124. mutex_unlock(&rtnl_mutex);
  125. while (head) {
  126. struct sk_buff *next = head->next;
  127. kfree_skb(head);
  128. cond_resched();
  129. head = next;
  130. }
  131. }
  132. void rtnl_unlock(void)
  133. {
  134. /* This fellow will unlock it for us. */
  135. netdev_run_todo();
  136. }
  137. EXPORT_SYMBOL(rtnl_unlock);
  138. int rtnl_trylock(void)
  139. {
  140. return mutex_trylock(&rtnl_mutex);
  141. }
  142. EXPORT_SYMBOL(rtnl_trylock);
  143. int rtnl_is_locked(void)
  144. {
  145. return mutex_is_locked(&rtnl_mutex);
  146. }
  147. EXPORT_SYMBOL(rtnl_is_locked);
  148. bool refcount_dec_and_rtnl_lock(refcount_t *r)
  149. {
  150. return refcount_dec_and_mutex_lock(r, &rtnl_mutex);
  151. }
  152. EXPORT_SYMBOL(refcount_dec_and_rtnl_lock);
  153. #ifdef CONFIG_PROVE_LOCKING
  154. bool lockdep_rtnl_is_held(void)
  155. {
  156. return lockdep_is_held(&rtnl_mutex);
  157. }
  158. EXPORT_SYMBOL(lockdep_rtnl_is_held);
  159. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  160. static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
  161. static inline int rtm_msgindex(int msgtype)
  162. {
  163. int msgindex = msgtype - RTM_BASE;
  164. /*
  165. * msgindex < 0 implies someone tried to register a netlink
  166. * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
  167. * the message type has not been added to linux/rtnetlink.h
  168. */
  169. BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
  170. return msgindex;
  171. }
  172. static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
  173. {
  174. struct rtnl_link __rcu **tab;
  175. if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
  176. protocol = PF_UNSPEC;
  177. tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
  178. if (!tab)
  179. tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
  180. return rcu_dereference_rtnl(tab[msgtype]);
  181. }
  182. static int rtnl_register_internal(struct module *owner,
  183. int protocol, int msgtype,
  184. rtnl_doit_func doit, rtnl_dumpit_func dumpit,
  185. unsigned int flags)
  186. {
  187. struct rtnl_link *link, *old;
  188. struct rtnl_link __rcu **tab;
  189. int msgindex;
  190. int ret = -ENOBUFS;
  191. BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  192. msgindex = rtm_msgindex(msgtype);
  193. rtnl_lock();
  194. tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
  195. if (tab == NULL) {
  196. tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
  197. if (!tab)
  198. goto unlock;
  199. /* ensures we see the 0 stores */
  200. rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
  201. }
  202. old = rtnl_dereference(tab[msgindex]);
  203. if (old) {
  204. link = kmemdup(old, sizeof(*old), GFP_KERNEL);
  205. if (!link)
  206. goto unlock;
  207. } else {
  208. link = kzalloc(sizeof(*link), GFP_KERNEL);
  209. if (!link)
  210. goto unlock;
  211. }
  212. WARN_ON(link->owner && link->owner != owner);
  213. link->owner = owner;
  214. WARN_ON(doit && link->doit && link->doit != doit);
  215. if (doit)
  216. link->doit = doit;
  217. WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
  218. if (dumpit)
  219. link->dumpit = dumpit;
  220. WARN_ON(rtnl_msgtype_kind(msgtype) != RTNL_KIND_DEL &&
  221. (flags & RTNL_FLAG_BULK_DEL_SUPPORTED));
  222. link->flags |= flags;
  223. /* publish protocol:msgtype */
  224. rcu_assign_pointer(tab[msgindex], link);
  225. ret = 0;
  226. if (old)
  227. kfree_rcu(old, rcu);
  228. unlock:
  229. rtnl_unlock();
  230. return ret;
  231. }
  232. /**
  233. * rtnl_register_module - Register a rtnetlink message type
  234. *
  235. * @owner: module registering the hook (THIS_MODULE)
  236. * @protocol: Protocol family or PF_UNSPEC
  237. * @msgtype: rtnetlink message type
  238. * @doit: Function pointer called for each request message
  239. * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
  240. * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
  241. *
  242. * Like rtnl_register, but for use by removable modules.
  243. */
  244. int rtnl_register_module(struct module *owner,
  245. int protocol, int msgtype,
  246. rtnl_doit_func doit, rtnl_dumpit_func dumpit,
  247. unsigned int flags)
  248. {
  249. return rtnl_register_internal(owner, protocol, msgtype,
  250. doit, dumpit, flags);
  251. }
  252. EXPORT_SYMBOL_GPL(rtnl_register_module);
  253. /**
  254. * rtnl_register - Register a rtnetlink message type
  255. * @protocol: Protocol family or PF_UNSPEC
  256. * @msgtype: rtnetlink message type
  257. * @doit: Function pointer called for each request message
  258. * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
  259. * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
  260. *
  261. * Registers the specified function pointers (at least one of them has
  262. * to be non-NULL) to be called whenever a request message for the
  263. * specified protocol family and message type is received.
  264. *
  265. * The special protocol family PF_UNSPEC may be used to define fallback
  266. * function pointers for the case when no entry for the specific protocol
  267. * family exists.
  268. */
  269. void rtnl_register(int protocol, int msgtype,
  270. rtnl_doit_func doit, rtnl_dumpit_func dumpit,
  271. unsigned int flags)
  272. {
  273. int err;
  274. err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
  275. flags);
  276. if (err)
  277. pr_err("Unable to register rtnetlink message handler, "
  278. "protocol = %d, message type = %d\n", protocol, msgtype);
  279. }
  280. /**
  281. * rtnl_unregister - Unregister a rtnetlink message type
  282. * @protocol: Protocol family or PF_UNSPEC
  283. * @msgtype: rtnetlink message type
  284. *
  285. * Returns 0 on success or a negative error code.
  286. */
  287. int rtnl_unregister(int protocol, int msgtype)
  288. {
  289. struct rtnl_link __rcu **tab;
  290. struct rtnl_link *link;
  291. int msgindex;
  292. BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  293. msgindex = rtm_msgindex(msgtype);
  294. rtnl_lock();
  295. tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
  296. if (!tab) {
  297. rtnl_unlock();
  298. return -ENOENT;
  299. }
  300. link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
  301. rtnl_unlock();
  302. kfree_rcu(link, rcu);
  303. return 0;
  304. }
  305. EXPORT_SYMBOL_GPL(rtnl_unregister);
  306. /**
  307. * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
  308. * @protocol : Protocol family or PF_UNSPEC
  309. *
  310. * Identical to calling rtnl_unregster() for all registered message types
  311. * of a certain protocol family.
  312. */
  313. void rtnl_unregister_all(int protocol)
  314. {
  315. struct rtnl_link __rcu **tab;
  316. struct rtnl_link *link;
  317. int msgindex;
  318. BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  319. rtnl_lock();
  320. tab = rcu_replace_pointer_rtnl(rtnl_msg_handlers[protocol], NULL);
  321. if (!tab) {
  322. rtnl_unlock();
  323. return;
  324. }
  325. for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
  326. link = rcu_replace_pointer_rtnl(tab[msgindex], NULL);
  327. kfree_rcu(link, rcu);
  328. }
  329. rtnl_unlock();
  330. synchronize_net();
  331. kfree(tab);
  332. }
  333. EXPORT_SYMBOL_GPL(rtnl_unregister_all);
  334. int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n)
  335. {
  336. const struct rtnl_msg_handler *handler;
  337. int i, err;
  338. for (i = 0, handler = handlers; i < n; i++, handler++) {
  339. err = rtnl_register_internal(handler->owner, handler->protocol,
  340. handler->msgtype, handler->doit,
  341. handler->dumpit, handler->flags);
  342. if (err) {
  343. __rtnl_unregister_many(handlers, i);
  344. break;
  345. }
  346. }
  347. return err;
  348. }
  349. EXPORT_SYMBOL_GPL(__rtnl_register_many);
  350. void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n)
  351. {
  352. const struct rtnl_msg_handler *handler;
  353. int i;
  354. for (i = n - 1, handler = handlers + n - 1; i >= 0; i--, handler--)
  355. rtnl_unregister(handler->protocol, handler->msgtype);
  356. }
  357. EXPORT_SYMBOL_GPL(__rtnl_unregister_many);
  358. static LIST_HEAD(link_ops);
  359. static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
  360. {
  361. const struct rtnl_link_ops *ops;
  362. list_for_each_entry(ops, &link_ops, list) {
  363. if (!strcmp(ops->kind, kind))
  364. return ops;
  365. }
  366. return NULL;
  367. }
  368. /**
  369. * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  370. * @ops: struct rtnl_link_ops * to register
  371. *
  372. * The caller must hold the rtnl_mutex. This function should be used
  373. * by drivers that create devices during module initialization. It
  374. * must be called before registering the devices.
  375. *
  376. * Returns 0 on success or a negative error code.
  377. */
  378. int __rtnl_link_register(struct rtnl_link_ops *ops)
  379. {
  380. if (rtnl_link_ops_get(ops->kind))
  381. return -EEXIST;
  382. /* The check for alloc/setup is here because if ops
  383. * does not have that filled up, it is not possible
  384. * to use the ops for creating device. So do not
  385. * fill up dellink as well. That disables rtnl_dellink.
  386. */
  387. if ((ops->alloc || ops->setup) && !ops->dellink)
  388. ops->dellink = unregister_netdevice_queue;
  389. list_add_tail(&ops->list, &link_ops);
  390. return 0;
  391. }
  392. EXPORT_SYMBOL_GPL(__rtnl_link_register);
  393. /**
  394. * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  395. * @ops: struct rtnl_link_ops * to register
  396. *
  397. * Returns 0 on success or a negative error code.
  398. */
  399. int rtnl_link_register(struct rtnl_link_ops *ops)
  400. {
  401. int err;
  402. /* Sanity-check max sizes to avoid stack buffer overflow. */
  403. if (WARN_ON(ops->maxtype > RTNL_MAX_TYPE ||
  404. ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE))
  405. return -EINVAL;
  406. rtnl_lock();
  407. err = __rtnl_link_register(ops);
  408. rtnl_unlock();
  409. return err;
  410. }
  411. EXPORT_SYMBOL_GPL(rtnl_link_register);
  412. static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
  413. {
  414. struct net_device *dev;
  415. LIST_HEAD(list_kill);
  416. for_each_netdev(net, dev) {
  417. if (dev->rtnl_link_ops == ops)
  418. ops->dellink(dev, &list_kill);
  419. }
  420. unregister_netdevice_many(&list_kill);
  421. }
  422. /**
  423. * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  424. * @ops: struct rtnl_link_ops * to unregister
  425. *
  426. * The caller must hold the rtnl_mutex and guarantee net_namespace_list
  427. * integrity (hold pernet_ops_rwsem for writing to close the race
  428. * with setup_net() and cleanup_net()).
  429. */
  430. void __rtnl_link_unregister(struct rtnl_link_ops *ops)
  431. {
  432. struct net *net;
  433. for_each_net(net) {
  434. __rtnl_kill_links(net, ops);
  435. }
  436. list_del(&ops->list);
  437. }
  438. EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
  439. /* Return with the rtnl_lock held when there are no network
  440. * devices unregistering in any network namespace.
  441. */
  442. static void rtnl_lock_unregistering_all(void)
  443. {
  444. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  445. add_wait_queue(&netdev_unregistering_wq, &wait);
  446. for (;;) {
  447. rtnl_lock();
  448. /* We held write locked pernet_ops_rwsem, and parallel
  449. * setup_net() and cleanup_net() are not possible.
  450. */
  451. if (!atomic_read(&dev_unreg_count))
  452. break;
  453. __rtnl_unlock();
  454. wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  455. }
  456. remove_wait_queue(&netdev_unregistering_wq, &wait);
  457. }
  458. /**
  459. * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
  460. * @ops: struct rtnl_link_ops * to unregister
  461. */
  462. void rtnl_link_unregister(struct rtnl_link_ops *ops)
  463. {
  464. /* Close the race with setup_net() and cleanup_net() */
  465. down_write(&pernet_ops_rwsem);
  466. rtnl_lock_unregistering_all();
  467. __rtnl_link_unregister(ops);
  468. rtnl_unlock();
  469. up_write(&pernet_ops_rwsem);
  470. }
  471. EXPORT_SYMBOL_GPL(rtnl_link_unregister);
  472. static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
  473. {
  474. struct net_device *master_dev;
  475. const struct rtnl_link_ops *ops;
  476. size_t size = 0;
  477. rcu_read_lock();
  478. master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
  479. if (!master_dev)
  480. goto out;
  481. ops = master_dev->rtnl_link_ops;
  482. if (!ops || !ops->get_slave_size)
  483. goto out;
  484. /* IFLA_INFO_SLAVE_DATA + nested data */
  485. size = nla_total_size(sizeof(struct nlattr)) +
  486. ops->get_slave_size(master_dev, dev);
  487. out:
  488. rcu_read_unlock();
  489. return size;
  490. }
  491. static size_t rtnl_link_get_size(const struct net_device *dev)
  492. {
  493. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  494. size_t size;
  495. if (!ops)
  496. return 0;
  497. size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
  498. nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
  499. if (ops->get_size)
  500. /* IFLA_INFO_DATA + nested data */
  501. size += nla_total_size(sizeof(struct nlattr)) +
  502. ops->get_size(dev);
  503. if (ops->get_xstats_size)
  504. /* IFLA_INFO_XSTATS */
  505. size += nla_total_size(ops->get_xstats_size(dev));
  506. size += rtnl_link_get_slave_info_data_size(dev);
  507. return size;
  508. }
  509. static LIST_HEAD(rtnl_af_ops);
  510. static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
  511. {
  512. const struct rtnl_af_ops *ops;
  513. ASSERT_RTNL();
  514. list_for_each_entry(ops, &rtnl_af_ops, list) {
  515. if (ops->family == family)
  516. return ops;
  517. }
  518. return NULL;
  519. }
  520. /**
  521. * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
  522. * @ops: struct rtnl_af_ops * to register
  523. *
  524. * Returns 0 on success or a negative error code.
  525. */
  526. void rtnl_af_register(struct rtnl_af_ops *ops)
  527. {
  528. rtnl_lock();
  529. list_add_tail_rcu(&ops->list, &rtnl_af_ops);
  530. rtnl_unlock();
  531. }
  532. EXPORT_SYMBOL_GPL(rtnl_af_register);
  533. /**
  534. * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
  535. * @ops: struct rtnl_af_ops * to unregister
  536. */
  537. void rtnl_af_unregister(struct rtnl_af_ops *ops)
  538. {
  539. rtnl_lock();
  540. list_del_rcu(&ops->list);
  541. rtnl_unlock();
  542. synchronize_rcu();
  543. }
  544. EXPORT_SYMBOL_GPL(rtnl_af_unregister);
  545. static size_t rtnl_link_get_af_size(const struct net_device *dev,
  546. u32 ext_filter_mask)
  547. {
  548. struct rtnl_af_ops *af_ops;
  549. size_t size;
  550. /* IFLA_AF_SPEC */
  551. size = nla_total_size(sizeof(struct nlattr));
  552. rcu_read_lock();
  553. list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
  554. if (af_ops->get_link_af_size) {
  555. /* AF_* + nested data */
  556. size += nla_total_size(sizeof(struct nlattr)) +
  557. af_ops->get_link_af_size(dev, ext_filter_mask);
  558. }
  559. }
  560. rcu_read_unlock();
  561. return size;
  562. }
  563. static bool rtnl_have_link_slave_info(const struct net_device *dev)
  564. {
  565. struct net_device *master_dev;
  566. bool ret = false;
  567. rcu_read_lock();
  568. master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev);
  569. if (master_dev && master_dev->rtnl_link_ops)
  570. ret = true;
  571. rcu_read_unlock();
  572. return ret;
  573. }
  574. static int rtnl_link_slave_info_fill(struct sk_buff *skb,
  575. const struct net_device *dev)
  576. {
  577. struct net_device *master_dev;
  578. const struct rtnl_link_ops *ops;
  579. struct nlattr *slave_data;
  580. int err;
  581. master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
  582. if (!master_dev)
  583. return 0;
  584. ops = master_dev->rtnl_link_ops;
  585. if (!ops)
  586. return 0;
  587. if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
  588. return -EMSGSIZE;
  589. if (ops->fill_slave_info) {
  590. slave_data = nla_nest_start_noflag(skb, IFLA_INFO_SLAVE_DATA);
  591. if (!slave_data)
  592. return -EMSGSIZE;
  593. err = ops->fill_slave_info(skb, master_dev, dev);
  594. if (err < 0)
  595. goto err_cancel_slave_data;
  596. nla_nest_end(skb, slave_data);
  597. }
  598. return 0;
  599. err_cancel_slave_data:
  600. nla_nest_cancel(skb, slave_data);
  601. return err;
  602. }
  603. static int rtnl_link_info_fill(struct sk_buff *skb,
  604. const struct net_device *dev)
  605. {
  606. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  607. struct nlattr *data;
  608. int err;
  609. if (!ops)
  610. return 0;
  611. if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
  612. return -EMSGSIZE;
  613. if (ops->fill_xstats) {
  614. err = ops->fill_xstats(skb, dev);
  615. if (err < 0)
  616. return err;
  617. }
  618. if (ops->fill_info) {
  619. data = nla_nest_start_noflag(skb, IFLA_INFO_DATA);
  620. if (data == NULL)
  621. return -EMSGSIZE;
  622. err = ops->fill_info(skb, dev);
  623. if (err < 0)
  624. goto err_cancel_data;
  625. nla_nest_end(skb, data);
  626. }
  627. return 0;
  628. err_cancel_data:
  629. nla_nest_cancel(skb, data);
  630. return err;
  631. }
  632. static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
  633. {
  634. struct nlattr *linkinfo;
  635. int err = -EMSGSIZE;
  636. linkinfo = nla_nest_start_noflag(skb, IFLA_LINKINFO);
  637. if (linkinfo == NULL)
  638. goto out;
  639. err = rtnl_link_info_fill(skb, dev);
  640. if (err < 0)
  641. goto err_cancel_link;
  642. err = rtnl_link_slave_info_fill(skb, dev);
  643. if (err < 0)
  644. goto err_cancel_link;
  645. nla_nest_end(skb, linkinfo);
  646. return 0;
  647. err_cancel_link:
  648. nla_nest_cancel(skb, linkinfo);
  649. out:
  650. return err;
  651. }
  652. int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
  653. {
  654. struct sock *rtnl = net->rtnl;
  655. return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL);
  656. }
  657. int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
  658. {
  659. struct sock *rtnl = net->rtnl;
  660. return nlmsg_unicast(rtnl, skb, pid);
  661. }
  662. EXPORT_SYMBOL(rtnl_unicast);
  663. void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
  664. const struct nlmsghdr *nlh, gfp_t flags)
  665. {
  666. struct sock *rtnl = net->rtnl;
  667. nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags);
  668. }
  669. EXPORT_SYMBOL(rtnl_notify);
  670. void rtnl_set_sk_err(struct net *net, u32 group, int error)
  671. {
  672. struct sock *rtnl = net->rtnl;
  673. netlink_set_err(rtnl, 0, group, error);
  674. }
  675. EXPORT_SYMBOL(rtnl_set_sk_err);
  676. int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
  677. {
  678. struct nlattr *mx;
  679. int i, valid = 0;
  680. /* nothing is dumped for dst_default_metrics, so just skip the loop */
  681. if (metrics == dst_default_metrics.metrics)
  682. return 0;
  683. mx = nla_nest_start_noflag(skb, RTA_METRICS);
  684. if (mx == NULL)
  685. return -ENOBUFS;
  686. for (i = 0; i < RTAX_MAX; i++) {
  687. if (metrics[i]) {
  688. if (i == RTAX_CC_ALGO - 1) {
  689. char tmp[TCP_CA_NAME_MAX], *name;
  690. name = tcp_ca_get_name_by_key(metrics[i], tmp);
  691. if (!name)
  692. continue;
  693. if (nla_put_string(skb, i + 1, name))
  694. goto nla_put_failure;
  695. } else if (i == RTAX_FEATURES - 1) {
  696. u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
  697. if (!user_features)
  698. continue;
  699. BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
  700. if (nla_put_u32(skb, i + 1, user_features))
  701. goto nla_put_failure;
  702. } else {
  703. if (nla_put_u32(skb, i + 1, metrics[i]))
  704. goto nla_put_failure;
  705. }
  706. valid++;
  707. }
  708. }
  709. if (!valid) {
  710. nla_nest_cancel(skb, mx);
  711. return 0;
  712. }
  713. return nla_nest_end(skb, mx);
  714. nla_put_failure:
  715. nla_nest_cancel(skb, mx);
  716. return -EMSGSIZE;
  717. }
  718. EXPORT_SYMBOL(rtnetlink_put_metrics);
  719. int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
  720. long expires, u32 error)
  721. {
  722. struct rta_cacheinfo ci = {
  723. .rta_error = error,
  724. .rta_id = id,
  725. };
  726. if (dst) {
  727. ci.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse);
  728. ci.rta_used = dst->__use;
  729. ci.rta_clntref = rcuref_read(&dst->__rcuref);
  730. }
  731. if (expires) {
  732. unsigned long clock;
  733. clock = jiffies_to_clock_t(abs(expires));
  734. clock = min_t(unsigned long, clock, INT_MAX);
  735. ci.rta_expires = (expires > 0) ? clock : -clock;
  736. }
  737. return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
  738. }
  739. EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
  740. void netdev_set_operstate(struct net_device *dev, int newstate)
  741. {
  742. unsigned int old = READ_ONCE(dev->operstate);
  743. do {
  744. if (old == newstate)
  745. return;
  746. } while (!try_cmpxchg(&dev->operstate, &old, newstate));
  747. netdev_state_change(dev);
  748. }
  749. EXPORT_SYMBOL(netdev_set_operstate);
  750. static void set_operstate(struct net_device *dev, unsigned char transition)
  751. {
  752. unsigned char operstate = READ_ONCE(dev->operstate);
  753. switch (transition) {
  754. case IF_OPER_UP:
  755. if ((operstate == IF_OPER_DORMANT ||
  756. operstate == IF_OPER_TESTING ||
  757. operstate == IF_OPER_UNKNOWN) &&
  758. !netif_dormant(dev) && !netif_testing(dev))
  759. operstate = IF_OPER_UP;
  760. break;
  761. case IF_OPER_TESTING:
  762. if (netif_oper_up(dev))
  763. operstate = IF_OPER_TESTING;
  764. break;
  765. case IF_OPER_DORMANT:
  766. if (netif_oper_up(dev))
  767. operstate = IF_OPER_DORMANT;
  768. break;
  769. }
  770. netdev_set_operstate(dev, operstate);
  771. }
  772. static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
  773. {
  774. return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
  775. (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
  776. }
  777. static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
  778. const struct ifinfomsg *ifm)
  779. {
  780. unsigned int flags = ifm->ifi_flags;
  781. /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
  782. if (ifm->ifi_change)
  783. flags = (flags & ifm->ifi_change) |
  784. (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
  785. return flags;
  786. }
  787. static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
  788. const struct rtnl_link_stats64 *b)
  789. {
  790. a->rx_packets = b->rx_packets;
  791. a->tx_packets = b->tx_packets;
  792. a->rx_bytes = b->rx_bytes;
  793. a->tx_bytes = b->tx_bytes;
  794. a->rx_errors = b->rx_errors;
  795. a->tx_errors = b->tx_errors;
  796. a->rx_dropped = b->rx_dropped;
  797. a->tx_dropped = b->tx_dropped;
  798. a->multicast = b->multicast;
  799. a->collisions = b->collisions;
  800. a->rx_length_errors = b->rx_length_errors;
  801. a->rx_over_errors = b->rx_over_errors;
  802. a->rx_crc_errors = b->rx_crc_errors;
  803. a->rx_frame_errors = b->rx_frame_errors;
  804. a->rx_fifo_errors = b->rx_fifo_errors;
  805. a->rx_missed_errors = b->rx_missed_errors;
  806. a->tx_aborted_errors = b->tx_aborted_errors;
  807. a->tx_carrier_errors = b->tx_carrier_errors;
  808. a->tx_fifo_errors = b->tx_fifo_errors;
  809. a->tx_heartbeat_errors = b->tx_heartbeat_errors;
  810. a->tx_window_errors = b->tx_window_errors;
  811. a->rx_compressed = b->rx_compressed;
  812. a->tx_compressed = b->tx_compressed;
  813. a->rx_nohandler = b->rx_nohandler;
  814. }
  815. /* All VF info */
  816. static inline int rtnl_vfinfo_size(const struct net_device *dev,
  817. u32 ext_filter_mask)
  818. {
  819. if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
  820. int num_vfs = dev_num_vf(dev->dev.parent);
  821. size_t size = nla_total_size(0);
  822. size += num_vfs *
  823. (nla_total_size(0) +
  824. nla_total_size(sizeof(struct ifla_vf_mac)) +
  825. nla_total_size(sizeof(struct ifla_vf_broadcast)) +
  826. nla_total_size(sizeof(struct ifla_vf_vlan)) +
  827. nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
  828. nla_total_size(MAX_VLAN_LIST_LEN *
  829. sizeof(struct ifla_vf_vlan_info)) +
  830. nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
  831. nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
  832. nla_total_size(sizeof(struct ifla_vf_rate)) +
  833. nla_total_size(sizeof(struct ifla_vf_link_state)) +
  834. nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
  835. nla_total_size(sizeof(struct ifla_vf_trust)));
  836. if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
  837. size += num_vfs *
  838. (nla_total_size(0) + /* nest IFLA_VF_STATS */
  839. /* IFLA_VF_STATS_RX_PACKETS */
  840. nla_total_size_64bit(sizeof(__u64)) +
  841. /* IFLA_VF_STATS_TX_PACKETS */
  842. nla_total_size_64bit(sizeof(__u64)) +
  843. /* IFLA_VF_STATS_RX_BYTES */
  844. nla_total_size_64bit(sizeof(__u64)) +
  845. /* IFLA_VF_STATS_TX_BYTES */
  846. nla_total_size_64bit(sizeof(__u64)) +
  847. /* IFLA_VF_STATS_BROADCAST */
  848. nla_total_size_64bit(sizeof(__u64)) +
  849. /* IFLA_VF_STATS_MULTICAST */
  850. nla_total_size_64bit(sizeof(__u64)) +
  851. /* IFLA_VF_STATS_RX_DROPPED */
  852. nla_total_size_64bit(sizeof(__u64)) +
  853. /* IFLA_VF_STATS_TX_DROPPED */
  854. nla_total_size_64bit(sizeof(__u64)));
  855. }
  856. if (dev->netdev_ops->ndo_get_vf_guid)
  857. size += num_vfs * 2 *
  858. nla_total_size(sizeof(struct ifla_vf_guid));
  859. return size;
  860. } else
  861. return 0;
  862. }
  863. static size_t rtnl_port_size(const struct net_device *dev,
  864. u32 ext_filter_mask)
  865. {
  866. size_t port_size = nla_total_size(4) /* PORT_VF */
  867. + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
  868. + nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
  869. + nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
  870. + nla_total_size(1) /* PROT_VDP_REQUEST */
  871. + nla_total_size(2); /* PORT_VDP_RESPONSE */
  872. size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
  873. size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
  874. + port_size;
  875. size_t port_self_size = nla_total_size(sizeof(struct nlattr))
  876. + port_size;
  877. if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
  878. !(ext_filter_mask & RTEXT_FILTER_VF))
  879. return 0;
  880. if (dev_num_vf(dev->dev.parent))
  881. return port_self_size + vf_ports_size +
  882. vf_port_size * dev_num_vf(dev->dev.parent);
  883. else
  884. return port_self_size;
  885. }
  886. static size_t rtnl_xdp_size(void)
  887. {
  888. size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
  889. nla_total_size(1) + /* XDP_ATTACHED */
  890. nla_total_size(4) + /* XDP_PROG_ID (or 1st mode) */
  891. nla_total_size(4); /* XDP_<mode>_PROG_ID */
  892. return xdp_size;
  893. }
  894. static size_t rtnl_prop_list_size(const struct net_device *dev)
  895. {
  896. struct netdev_name_node *name_node;
  897. unsigned int cnt = 0;
  898. rcu_read_lock();
  899. list_for_each_entry_rcu(name_node, &dev->name_node->list, list)
  900. cnt++;
  901. rcu_read_unlock();
  902. if (!cnt)
  903. return 0;
  904. return nla_total_size(0) + cnt * nla_total_size(ALTIFNAMSIZ);
  905. }
  906. static size_t rtnl_proto_down_size(const struct net_device *dev)
  907. {
  908. size_t size = nla_total_size(1);
  909. /* Assume dev->proto_down_reason is not zero. */
  910. size += nla_total_size(0) + nla_total_size(4);
  911. return size;
  912. }
  913. static size_t rtnl_devlink_port_size(const struct net_device *dev)
  914. {
  915. size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
  916. if (dev->devlink_port)
  917. size += devlink_nl_port_handle_size(dev->devlink_port);
  918. return size;
  919. }
  920. static size_t rtnl_dpll_pin_size(const struct net_device *dev)
  921. {
  922. size_t size = nla_total_size(0); /* nest IFLA_DPLL_PIN */
  923. size += dpll_netdev_pin_handle_size(dev);
  924. return size;
  925. }
  926. static noinline size_t if_nlmsg_size(const struct net_device *dev,
  927. u32 ext_filter_mask)
  928. {
  929. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  930. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  931. + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
  932. + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
  933. + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap))
  934. + nla_total_size(sizeof(struct rtnl_link_stats))
  935. + nla_total_size_64bit(sizeof(struct rtnl_link_stats64))
  936. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  937. + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
  938. + nla_total_size(4) /* IFLA_TXQLEN */
  939. + nla_total_size(4) /* IFLA_WEIGHT */
  940. + nla_total_size(4) /* IFLA_MTU */
  941. + nla_total_size(4) /* IFLA_LINK */
  942. + nla_total_size(4) /* IFLA_MASTER */
  943. + nla_total_size(1) /* IFLA_CARRIER */
  944. + nla_total_size(4) /* IFLA_PROMISCUITY */
  945. + nla_total_size(4) /* IFLA_ALLMULTI */
  946. + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
  947. + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
  948. + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
  949. + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
  950. + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
  951. + nla_total_size(4) /* IFLA_GSO_IPV4_MAX_SIZE */
  952. + nla_total_size(4) /* IFLA_GRO_IPV4_MAX_SIZE */
  953. + nla_total_size(4) /* IFLA_TSO_MAX_SIZE */
  954. + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */
  955. + nla_total_size(1) /* IFLA_OPERSTATE */
  956. + nla_total_size(1) /* IFLA_LINKMODE */
  957. + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
  958. + nla_total_size(4) /* IFLA_LINK_NETNSID */
  959. + nla_total_size(4) /* IFLA_GROUP */
  960. + nla_total_size(ext_filter_mask
  961. & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
  962. + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
  963. + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
  964. + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
  965. + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
  966. + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
  967. + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
  968. + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */
  969. + rtnl_xdp_size() /* IFLA_XDP */
  970. + nla_total_size(4) /* IFLA_EVENT */
  971. + nla_total_size(4) /* IFLA_NEW_NETNSID */
  972. + nla_total_size(4) /* IFLA_NEW_IFINDEX */
  973. + rtnl_proto_down_size(dev) /* proto down */
  974. + nla_total_size(4) /* IFLA_TARGET_NETNSID */
  975. + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
  976. + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
  977. + nla_total_size(4) /* IFLA_MIN_MTU */
  978. + nla_total_size(4) /* IFLA_MAX_MTU */
  979. + rtnl_prop_list_size(dev)
  980. + nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
  981. + rtnl_devlink_port_size(dev)
  982. + rtnl_dpll_pin_size(dev)
  983. + 0;
  984. }
  985. static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
  986. {
  987. struct nlattr *vf_ports;
  988. struct nlattr *vf_port;
  989. int vf;
  990. int err;
  991. vf_ports = nla_nest_start_noflag(skb, IFLA_VF_PORTS);
  992. if (!vf_ports)
  993. return -EMSGSIZE;
  994. for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
  995. vf_port = nla_nest_start_noflag(skb, IFLA_VF_PORT);
  996. if (!vf_port)
  997. goto nla_put_failure;
  998. if (nla_put_u32(skb, IFLA_PORT_VF, vf))
  999. goto nla_put_failure;
  1000. err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
  1001. if (err == -EMSGSIZE)
  1002. goto nla_put_failure;
  1003. if (err) {
  1004. nla_nest_cancel(skb, vf_port);
  1005. continue;
  1006. }
  1007. nla_nest_end(skb, vf_port);
  1008. }
  1009. nla_nest_end(skb, vf_ports);
  1010. return 0;
  1011. nla_put_failure:
  1012. nla_nest_cancel(skb, vf_ports);
  1013. return -EMSGSIZE;
  1014. }
  1015. static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
  1016. {
  1017. struct nlattr *port_self;
  1018. int err;
  1019. port_self = nla_nest_start_noflag(skb, IFLA_PORT_SELF);
  1020. if (!port_self)
  1021. return -EMSGSIZE;
  1022. err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
  1023. if (err) {
  1024. nla_nest_cancel(skb, port_self);
  1025. return (err == -EMSGSIZE) ? err : 0;
  1026. }
  1027. nla_nest_end(skb, port_self);
  1028. return 0;
  1029. }
  1030. static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
  1031. u32 ext_filter_mask)
  1032. {
  1033. int err;
  1034. if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
  1035. !(ext_filter_mask & RTEXT_FILTER_VF))
  1036. return 0;
  1037. err = rtnl_port_self_fill(skb, dev);
  1038. if (err)
  1039. return err;
  1040. if (dev_num_vf(dev->dev.parent)) {
  1041. err = rtnl_vf_ports_fill(skb, dev);
  1042. if (err)
  1043. return err;
  1044. }
  1045. return 0;
  1046. }
  1047. static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
  1048. {
  1049. int err;
  1050. struct netdev_phys_item_id ppid;
  1051. err = dev_get_phys_port_id(dev, &ppid);
  1052. if (err) {
  1053. if (err == -EOPNOTSUPP)
  1054. return 0;
  1055. return err;
  1056. }
  1057. if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
  1058. return -EMSGSIZE;
  1059. return 0;
  1060. }
  1061. static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
  1062. {
  1063. char name[IFNAMSIZ];
  1064. int err;
  1065. err = dev_get_phys_port_name(dev, name, sizeof(name));
  1066. if (err) {
  1067. if (err == -EOPNOTSUPP)
  1068. return 0;
  1069. return err;
  1070. }
  1071. if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
  1072. return -EMSGSIZE;
  1073. return 0;
  1074. }
  1075. static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
  1076. {
  1077. struct netdev_phys_item_id ppid = { };
  1078. int err;
  1079. err = dev_get_port_parent_id(dev, &ppid, false);
  1080. if (err) {
  1081. if (err == -EOPNOTSUPP)
  1082. return 0;
  1083. return err;
  1084. }
  1085. if (nla_put(skb, IFLA_PHYS_SWITCH_ID, ppid.id_len, ppid.id))
  1086. return -EMSGSIZE;
  1087. return 0;
  1088. }
  1089. static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
  1090. struct net_device *dev)
  1091. {
  1092. struct rtnl_link_stats64 *sp;
  1093. struct nlattr *attr;
  1094. attr = nla_reserve_64bit(skb, IFLA_STATS64,
  1095. sizeof(struct rtnl_link_stats64), IFLA_PAD);
  1096. if (!attr)
  1097. return -EMSGSIZE;
  1098. sp = nla_data(attr);
  1099. dev_get_stats(dev, sp);
  1100. attr = nla_reserve(skb, IFLA_STATS,
  1101. sizeof(struct rtnl_link_stats));
  1102. if (!attr)
  1103. return -EMSGSIZE;
  1104. copy_rtnl_link_stats(nla_data(attr), sp);
  1105. return 0;
  1106. }
  1107. static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
  1108. struct net_device *dev,
  1109. int vfs_num,
  1110. u32 ext_filter_mask)
  1111. {
  1112. struct ifla_vf_rss_query_en vf_rss_query_en;
  1113. struct nlattr *vf, *vfstats, *vfvlanlist;
  1114. struct ifla_vf_link_state vf_linkstate;
  1115. struct ifla_vf_vlan_info vf_vlan_info;
  1116. struct ifla_vf_spoofchk vf_spoofchk;
  1117. struct ifla_vf_tx_rate vf_tx_rate;
  1118. struct ifla_vf_stats vf_stats;
  1119. struct ifla_vf_trust vf_trust;
  1120. struct ifla_vf_vlan vf_vlan;
  1121. struct ifla_vf_rate vf_rate;
  1122. struct ifla_vf_mac vf_mac;
  1123. struct ifla_vf_broadcast vf_broadcast;
  1124. struct ifla_vf_info ivi;
  1125. struct ifla_vf_guid node_guid;
  1126. struct ifla_vf_guid port_guid;
  1127. memset(&ivi, 0, sizeof(ivi));
  1128. /* Not all SR-IOV capable drivers support the
  1129. * spoofcheck and "RSS query enable" query. Preset to
  1130. * -1 so the user space tool can detect that the driver
  1131. * didn't report anything.
  1132. */
  1133. ivi.spoofchk = -1;
  1134. ivi.rss_query_en = -1;
  1135. ivi.trusted = -1;
  1136. /* The default value for VF link state is "auto"
  1137. * IFLA_VF_LINK_STATE_AUTO which equals zero
  1138. */
  1139. ivi.linkstate = 0;
  1140. /* VLAN Protocol by default is 802.1Q */
  1141. ivi.vlan_proto = htons(ETH_P_8021Q);
  1142. if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
  1143. return 0;
  1144. memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
  1145. memset(&node_guid, 0, sizeof(node_guid));
  1146. memset(&port_guid, 0, sizeof(port_guid));
  1147. vf_mac.vf =
  1148. vf_vlan.vf =
  1149. vf_vlan_info.vf =
  1150. vf_rate.vf =
  1151. vf_tx_rate.vf =
  1152. vf_spoofchk.vf =
  1153. vf_linkstate.vf =
  1154. vf_rss_query_en.vf =
  1155. vf_trust.vf =
  1156. node_guid.vf =
  1157. port_guid.vf = ivi.vf;
  1158. memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
  1159. memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
  1160. vf_vlan.vlan = ivi.vlan;
  1161. vf_vlan.qos = ivi.qos;
  1162. vf_vlan_info.vlan = ivi.vlan;
  1163. vf_vlan_info.qos = ivi.qos;
  1164. vf_vlan_info.vlan_proto = ivi.vlan_proto;
  1165. vf_tx_rate.rate = ivi.max_tx_rate;
  1166. vf_rate.min_tx_rate = ivi.min_tx_rate;
  1167. vf_rate.max_tx_rate = ivi.max_tx_rate;
  1168. vf_spoofchk.setting = ivi.spoofchk;
  1169. vf_linkstate.link_state = ivi.linkstate;
  1170. vf_rss_query_en.setting = ivi.rss_query_en;
  1171. vf_trust.setting = ivi.trusted;
  1172. vf = nla_nest_start_noflag(skb, IFLA_VF_INFO);
  1173. if (!vf)
  1174. return -EMSGSIZE;
  1175. if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
  1176. nla_put(skb, IFLA_VF_BROADCAST, sizeof(vf_broadcast), &vf_broadcast) ||
  1177. nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
  1178. nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
  1179. &vf_rate) ||
  1180. nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
  1181. &vf_tx_rate) ||
  1182. nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
  1183. &vf_spoofchk) ||
  1184. nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
  1185. &vf_linkstate) ||
  1186. nla_put(skb, IFLA_VF_RSS_QUERY_EN,
  1187. sizeof(vf_rss_query_en),
  1188. &vf_rss_query_en) ||
  1189. nla_put(skb, IFLA_VF_TRUST,
  1190. sizeof(vf_trust), &vf_trust))
  1191. goto nla_put_vf_failure;
  1192. if (dev->netdev_ops->ndo_get_vf_guid &&
  1193. !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
  1194. &port_guid)) {
  1195. if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
  1196. &node_guid) ||
  1197. nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
  1198. &port_guid))
  1199. goto nla_put_vf_failure;
  1200. }
  1201. vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
  1202. if (!vfvlanlist)
  1203. goto nla_put_vf_failure;
  1204. if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
  1205. &vf_vlan_info)) {
  1206. nla_nest_cancel(skb, vfvlanlist);
  1207. goto nla_put_vf_failure;
  1208. }
  1209. nla_nest_end(skb, vfvlanlist);
  1210. if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
  1211. memset(&vf_stats, 0, sizeof(vf_stats));
  1212. if (dev->netdev_ops->ndo_get_vf_stats)
  1213. dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
  1214. &vf_stats);
  1215. vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
  1216. if (!vfstats)
  1217. goto nla_put_vf_failure;
  1218. if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
  1219. vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
  1220. nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
  1221. vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
  1222. nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
  1223. vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
  1224. nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
  1225. vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
  1226. nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
  1227. vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
  1228. nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
  1229. vf_stats.multicast, IFLA_VF_STATS_PAD) ||
  1230. nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
  1231. vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
  1232. nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
  1233. vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
  1234. nla_nest_cancel(skb, vfstats);
  1235. goto nla_put_vf_failure;
  1236. }
  1237. nla_nest_end(skb, vfstats);
  1238. }
  1239. nla_nest_end(skb, vf);
  1240. return 0;
  1241. nla_put_vf_failure:
  1242. nla_nest_cancel(skb, vf);
  1243. return -EMSGSIZE;
  1244. }
  1245. static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb,
  1246. struct net_device *dev,
  1247. u32 ext_filter_mask)
  1248. {
  1249. struct nlattr *vfinfo;
  1250. int i, num_vfs;
  1251. if (!dev->dev.parent || ((ext_filter_mask & RTEXT_FILTER_VF) == 0))
  1252. return 0;
  1253. num_vfs = dev_num_vf(dev->dev.parent);
  1254. if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs))
  1255. return -EMSGSIZE;
  1256. if (!dev->netdev_ops->ndo_get_vf_config)
  1257. return 0;
  1258. vfinfo = nla_nest_start_noflag(skb, IFLA_VFINFO_LIST);
  1259. if (!vfinfo)
  1260. return -EMSGSIZE;
  1261. for (i = 0; i < num_vfs; i++) {
  1262. if (rtnl_fill_vfinfo(skb, dev, i, ext_filter_mask)) {
  1263. nla_nest_cancel(skb, vfinfo);
  1264. return -EMSGSIZE;
  1265. }
  1266. }
  1267. nla_nest_end(skb, vfinfo);
  1268. return 0;
  1269. }
  1270. static int rtnl_fill_link_ifmap(struct sk_buff *skb,
  1271. const struct net_device *dev)
  1272. {
  1273. struct rtnl_link_ifmap map;
  1274. memset(&map, 0, sizeof(map));
  1275. map.mem_start = READ_ONCE(dev->mem_start);
  1276. map.mem_end = READ_ONCE(dev->mem_end);
  1277. map.base_addr = READ_ONCE(dev->base_addr);
  1278. map.irq = READ_ONCE(dev->irq);
  1279. map.dma = READ_ONCE(dev->dma);
  1280. map.port = READ_ONCE(dev->if_port);
  1281. if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD))
  1282. return -EMSGSIZE;
  1283. return 0;
  1284. }
  1285. static u32 rtnl_xdp_prog_skb(struct net_device *dev)
  1286. {
  1287. const struct bpf_prog *generic_xdp_prog;
  1288. u32 res = 0;
  1289. rcu_read_lock();
  1290. generic_xdp_prog = rcu_dereference(dev->xdp_prog);
  1291. if (generic_xdp_prog)
  1292. res = generic_xdp_prog->aux->id;
  1293. rcu_read_unlock();
  1294. return res;
  1295. }
  1296. static u32 rtnl_xdp_prog_drv(struct net_device *dev)
  1297. {
  1298. return dev_xdp_prog_id(dev, XDP_MODE_DRV);
  1299. }
  1300. static u32 rtnl_xdp_prog_hw(struct net_device *dev)
  1301. {
  1302. return dev_xdp_prog_id(dev, XDP_MODE_HW);
  1303. }
  1304. static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev,
  1305. u32 *prog_id, u8 *mode, u8 tgt_mode, u32 attr,
  1306. u32 (*get_prog_id)(struct net_device *dev))
  1307. {
  1308. u32 curr_id;
  1309. int err;
  1310. curr_id = get_prog_id(dev);
  1311. if (!curr_id)
  1312. return 0;
  1313. *prog_id = curr_id;
  1314. err = nla_put_u32(skb, attr, curr_id);
  1315. if (err)
  1316. return err;
  1317. if (*mode != XDP_ATTACHED_NONE)
  1318. *mode = XDP_ATTACHED_MULTI;
  1319. else
  1320. *mode = tgt_mode;
  1321. return 0;
  1322. }
  1323. static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
  1324. {
  1325. struct nlattr *xdp;
  1326. u32 prog_id;
  1327. int err;
  1328. u8 mode;
  1329. xdp = nla_nest_start_noflag(skb, IFLA_XDP);
  1330. if (!xdp)
  1331. return -EMSGSIZE;
  1332. prog_id = 0;
  1333. mode = XDP_ATTACHED_NONE;
  1334. err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_SKB,
  1335. IFLA_XDP_SKB_PROG_ID, rtnl_xdp_prog_skb);
  1336. if (err)
  1337. goto err_cancel;
  1338. err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_DRV,
  1339. IFLA_XDP_DRV_PROG_ID, rtnl_xdp_prog_drv);
  1340. if (err)
  1341. goto err_cancel;
  1342. err = rtnl_xdp_report_one(skb, dev, &prog_id, &mode, XDP_ATTACHED_HW,
  1343. IFLA_XDP_HW_PROG_ID, rtnl_xdp_prog_hw);
  1344. if (err)
  1345. goto err_cancel;
  1346. err = nla_put_u8(skb, IFLA_XDP_ATTACHED, mode);
  1347. if (err)
  1348. goto err_cancel;
  1349. if (prog_id && mode != XDP_ATTACHED_MULTI) {
  1350. err = nla_put_u32(skb, IFLA_XDP_PROG_ID, prog_id);
  1351. if (err)
  1352. goto err_cancel;
  1353. }
  1354. nla_nest_end(skb, xdp);
  1355. return 0;
  1356. err_cancel:
  1357. nla_nest_cancel(skb, xdp);
  1358. return err;
  1359. }
  1360. static u32 rtnl_get_event(unsigned long event)
  1361. {
  1362. u32 rtnl_event_type = IFLA_EVENT_NONE;
  1363. switch (event) {
  1364. case NETDEV_REBOOT:
  1365. rtnl_event_type = IFLA_EVENT_REBOOT;
  1366. break;
  1367. case NETDEV_FEAT_CHANGE:
  1368. rtnl_event_type = IFLA_EVENT_FEATURES;
  1369. break;
  1370. case NETDEV_BONDING_FAILOVER:
  1371. rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER;
  1372. break;
  1373. case NETDEV_NOTIFY_PEERS:
  1374. rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS;
  1375. break;
  1376. case NETDEV_RESEND_IGMP:
  1377. rtnl_event_type = IFLA_EVENT_IGMP_RESEND;
  1378. break;
  1379. case NETDEV_CHANGEINFODATA:
  1380. rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS;
  1381. break;
  1382. default:
  1383. break;
  1384. }
  1385. return rtnl_event_type;
  1386. }
  1387. static int put_master_ifindex(struct sk_buff *skb, struct net_device *dev)
  1388. {
  1389. const struct net_device *upper_dev;
  1390. int ret = 0;
  1391. rcu_read_lock();
  1392. upper_dev = netdev_master_upper_dev_get_rcu(dev);
  1393. if (upper_dev)
  1394. ret = nla_put_u32(skb, IFLA_MASTER,
  1395. READ_ONCE(upper_dev->ifindex));
  1396. rcu_read_unlock();
  1397. return ret;
  1398. }
  1399. static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev,
  1400. bool force)
  1401. {
  1402. int iflink = dev_get_iflink(dev);
  1403. if (force || READ_ONCE(dev->ifindex) != iflink)
  1404. return nla_put_u32(skb, IFLA_LINK, iflink);
  1405. return 0;
  1406. }
  1407. static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
  1408. struct net_device *dev)
  1409. {
  1410. char buf[IFALIASZ];
  1411. int ret;
  1412. ret = dev_get_alias(dev, buf, sizeof(buf));
  1413. return ret > 0 ? nla_put_string(skb, IFLA_IFALIAS, buf) : 0;
  1414. }
  1415. static int rtnl_fill_link_netnsid(struct sk_buff *skb,
  1416. const struct net_device *dev,
  1417. struct net *src_net, gfp_t gfp)
  1418. {
  1419. bool put_iflink = false;
  1420. if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) {
  1421. struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
  1422. if (!net_eq(dev_net(dev), link_net)) {
  1423. int id = peernet2id_alloc(src_net, link_net, gfp);
  1424. if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
  1425. return -EMSGSIZE;
  1426. put_iflink = true;
  1427. }
  1428. }
  1429. return nla_put_iflink(skb, dev, put_iflink);
  1430. }
  1431. static int rtnl_fill_link_af(struct sk_buff *skb,
  1432. const struct net_device *dev,
  1433. u32 ext_filter_mask)
  1434. {
  1435. const struct rtnl_af_ops *af_ops;
  1436. struct nlattr *af_spec;
  1437. af_spec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
  1438. if (!af_spec)
  1439. return -EMSGSIZE;
  1440. list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
  1441. struct nlattr *af;
  1442. int err;
  1443. if (!af_ops->fill_link_af)
  1444. continue;
  1445. af = nla_nest_start_noflag(skb, af_ops->family);
  1446. if (!af)
  1447. return -EMSGSIZE;
  1448. err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
  1449. /*
  1450. * Caller may return ENODATA to indicate that there
  1451. * was no data to be dumped. This is not an error, it
  1452. * means we should trim the attribute header and
  1453. * continue.
  1454. */
  1455. if (err == -ENODATA)
  1456. nla_nest_cancel(skb, af);
  1457. else if (err < 0)
  1458. return -EMSGSIZE;
  1459. nla_nest_end(skb, af);
  1460. }
  1461. nla_nest_end(skb, af_spec);
  1462. return 0;
  1463. }
  1464. static int rtnl_fill_alt_ifnames(struct sk_buff *skb,
  1465. const struct net_device *dev)
  1466. {
  1467. struct netdev_name_node *name_node;
  1468. int count = 0;
  1469. list_for_each_entry_rcu(name_node, &dev->name_node->list, list) {
  1470. if (nla_put_string(skb, IFLA_ALT_IFNAME, name_node->name))
  1471. return -EMSGSIZE;
  1472. count++;
  1473. }
  1474. return count;
  1475. }
  1476. /* RCU protected. */
  1477. static int rtnl_fill_prop_list(struct sk_buff *skb,
  1478. const struct net_device *dev)
  1479. {
  1480. struct nlattr *prop_list;
  1481. int ret;
  1482. prop_list = nla_nest_start(skb, IFLA_PROP_LIST);
  1483. if (!prop_list)
  1484. return -EMSGSIZE;
  1485. ret = rtnl_fill_alt_ifnames(skb, dev);
  1486. if (ret <= 0)
  1487. goto nest_cancel;
  1488. nla_nest_end(skb, prop_list);
  1489. return 0;
  1490. nest_cancel:
  1491. nla_nest_cancel(skb, prop_list);
  1492. return ret;
  1493. }
  1494. static int rtnl_fill_proto_down(struct sk_buff *skb,
  1495. const struct net_device *dev)
  1496. {
  1497. struct nlattr *pr;
  1498. u32 preason;
  1499. if (nla_put_u8(skb, IFLA_PROTO_DOWN, READ_ONCE(dev->proto_down)))
  1500. goto nla_put_failure;
  1501. preason = READ_ONCE(dev->proto_down_reason);
  1502. if (!preason)
  1503. return 0;
  1504. pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON);
  1505. if (!pr)
  1506. return -EMSGSIZE;
  1507. if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) {
  1508. nla_nest_cancel(skb, pr);
  1509. goto nla_put_failure;
  1510. }
  1511. nla_nest_end(skb, pr);
  1512. return 0;
  1513. nla_put_failure:
  1514. return -EMSGSIZE;
  1515. }
  1516. static int rtnl_fill_devlink_port(struct sk_buff *skb,
  1517. const struct net_device *dev)
  1518. {
  1519. struct nlattr *devlink_port_nest;
  1520. int ret;
  1521. devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
  1522. if (!devlink_port_nest)
  1523. return -EMSGSIZE;
  1524. if (dev->devlink_port) {
  1525. ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
  1526. if (ret < 0)
  1527. goto nest_cancel;
  1528. }
  1529. nla_nest_end(skb, devlink_port_nest);
  1530. return 0;
  1531. nest_cancel:
  1532. nla_nest_cancel(skb, devlink_port_nest);
  1533. return ret;
  1534. }
  1535. static int rtnl_fill_dpll_pin(struct sk_buff *skb,
  1536. const struct net_device *dev)
  1537. {
  1538. struct nlattr *dpll_pin_nest;
  1539. int ret;
  1540. dpll_pin_nest = nla_nest_start(skb, IFLA_DPLL_PIN);
  1541. if (!dpll_pin_nest)
  1542. return -EMSGSIZE;
  1543. ret = dpll_netdev_add_pin_handle(skb, dev);
  1544. if (ret < 0)
  1545. goto nest_cancel;
  1546. nla_nest_end(skb, dpll_pin_nest);
  1547. return 0;
  1548. nest_cancel:
  1549. nla_nest_cancel(skb, dpll_pin_nest);
  1550. return ret;
  1551. }
  1552. static int rtnl_fill_ifinfo(struct sk_buff *skb,
  1553. struct net_device *dev, struct net *src_net,
  1554. int type, u32 pid, u32 seq, u32 change,
  1555. unsigned int flags, u32 ext_filter_mask,
  1556. u32 event, int *new_nsid, int new_ifindex,
  1557. int tgt_netnsid, gfp_t gfp)
  1558. {
  1559. char devname[IFNAMSIZ];
  1560. struct ifinfomsg *ifm;
  1561. struct nlmsghdr *nlh;
  1562. struct Qdisc *qdisc;
  1563. ASSERT_RTNL();
  1564. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
  1565. if (nlh == NULL)
  1566. return -EMSGSIZE;
  1567. ifm = nlmsg_data(nlh);
  1568. ifm->ifi_family = AF_UNSPEC;
  1569. ifm->__ifi_pad = 0;
  1570. ifm->ifi_type = READ_ONCE(dev->type);
  1571. ifm->ifi_index = READ_ONCE(dev->ifindex);
  1572. ifm->ifi_flags = dev_get_flags(dev);
  1573. ifm->ifi_change = change;
  1574. if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
  1575. goto nla_put_failure;
  1576. netdev_copy_name(dev, devname);
  1577. if (nla_put_string(skb, IFLA_IFNAME, devname))
  1578. goto nla_put_failure;
  1579. if (nla_put_u32(skb, IFLA_TXQLEN, READ_ONCE(dev->tx_queue_len)) ||
  1580. nla_put_u8(skb, IFLA_OPERSTATE,
  1581. netif_running(dev) ? READ_ONCE(dev->operstate) :
  1582. IF_OPER_DOWN) ||
  1583. nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) ||
  1584. nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
  1585. nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) ||
  1586. nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) ||
  1587. nla_put_u32(skb, IFLA_GROUP, READ_ONCE(dev->group)) ||
  1588. nla_put_u32(skb, IFLA_PROMISCUITY, READ_ONCE(dev->promiscuity)) ||
  1589. nla_put_u32(skb, IFLA_ALLMULTI, READ_ONCE(dev->allmulti)) ||
  1590. nla_put_u32(skb, IFLA_NUM_TX_QUEUES,
  1591. READ_ONCE(dev->num_tx_queues)) ||
  1592. nla_put_u32(skb, IFLA_GSO_MAX_SEGS,
  1593. READ_ONCE(dev->gso_max_segs)) ||
  1594. nla_put_u32(skb, IFLA_GSO_MAX_SIZE,
  1595. READ_ONCE(dev->gso_max_size)) ||
  1596. nla_put_u32(skb, IFLA_GRO_MAX_SIZE,
  1597. READ_ONCE(dev->gro_max_size)) ||
  1598. nla_put_u32(skb, IFLA_GSO_IPV4_MAX_SIZE,
  1599. READ_ONCE(dev->gso_ipv4_max_size)) ||
  1600. nla_put_u32(skb, IFLA_GRO_IPV4_MAX_SIZE,
  1601. READ_ONCE(dev->gro_ipv4_max_size)) ||
  1602. nla_put_u32(skb, IFLA_TSO_MAX_SIZE,
  1603. READ_ONCE(dev->tso_max_size)) ||
  1604. nla_put_u32(skb, IFLA_TSO_MAX_SEGS,
  1605. READ_ONCE(dev->tso_max_segs)) ||
  1606. #ifdef CONFIG_RPS
  1607. nla_put_u32(skb, IFLA_NUM_RX_QUEUES,
  1608. READ_ONCE(dev->num_rx_queues)) ||
  1609. #endif
  1610. put_master_ifindex(skb, dev) ||
  1611. nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
  1612. nla_put_ifalias(skb, dev) ||
  1613. nla_put_u32(skb, IFLA_CARRIER_CHANGES,
  1614. atomic_read(&dev->carrier_up_count) +
  1615. atomic_read(&dev->carrier_down_count)) ||
  1616. nla_put_u32(skb, IFLA_CARRIER_UP_COUNT,
  1617. atomic_read(&dev->carrier_up_count)) ||
  1618. nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT,
  1619. atomic_read(&dev->carrier_down_count)))
  1620. goto nla_put_failure;
  1621. if (rtnl_fill_proto_down(skb, dev))
  1622. goto nla_put_failure;
  1623. if (event != IFLA_EVENT_NONE) {
  1624. if (nla_put_u32(skb, IFLA_EVENT, event))
  1625. goto nla_put_failure;
  1626. }
  1627. if (dev->addr_len) {
  1628. if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
  1629. nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
  1630. goto nla_put_failure;
  1631. }
  1632. if (rtnl_phys_port_id_fill(skb, dev))
  1633. goto nla_put_failure;
  1634. if (rtnl_phys_port_name_fill(skb, dev))
  1635. goto nla_put_failure;
  1636. if (rtnl_phys_switch_id_fill(skb, dev))
  1637. goto nla_put_failure;
  1638. if (rtnl_fill_stats(skb, dev))
  1639. goto nla_put_failure;
  1640. if (rtnl_fill_vf(skb, dev, ext_filter_mask))
  1641. goto nla_put_failure;
  1642. if (rtnl_port_fill(skb, dev, ext_filter_mask))
  1643. goto nla_put_failure;
  1644. if (rtnl_xdp_fill(skb, dev))
  1645. goto nla_put_failure;
  1646. if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
  1647. if (rtnl_link_fill(skb, dev) < 0)
  1648. goto nla_put_failure;
  1649. }
  1650. if (new_nsid &&
  1651. nla_put_s32(skb, IFLA_NEW_NETNSID, *new_nsid) < 0)
  1652. goto nla_put_failure;
  1653. if (new_ifindex &&
  1654. nla_put_s32(skb, IFLA_NEW_IFINDEX, new_ifindex) < 0)
  1655. goto nla_put_failure;
  1656. if (memchr_inv(dev->perm_addr, '\0', dev->addr_len) &&
  1657. nla_put(skb, IFLA_PERM_ADDRESS, dev->addr_len, dev->perm_addr))
  1658. goto nla_put_failure;
  1659. rcu_read_lock();
  1660. if (rtnl_fill_link_netnsid(skb, dev, src_net, GFP_ATOMIC))
  1661. goto nla_put_failure_rcu;
  1662. qdisc = rcu_dereference(dev->qdisc);
  1663. if (qdisc && nla_put_string(skb, IFLA_QDISC, qdisc->ops->id))
  1664. goto nla_put_failure_rcu;
  1665. if (rtnl_fill_link_af(skb, dev, ext_filter_mask))
  1666. goto nla_put_failure_rcu;
  1667. if (rtnl_fill_link_ifmap(skb, dev))
  1668. goto nla_put_failure_rcu;
  1669. if (rtnl_fill_prop_list(skb, dev))
  1670. goto nla_put_failure_rcu;
  1671. rcu_read_unlock();
  1672. if (dev->dev.parent &&
  1673. nla_put_string(skb, IFLA_PARENT_DEV_NAME,
  1674. dev_name(dev->dev.parent)))
  1675. goto nla_put_failure;
  1676. if (dev->dev.parent && dev->dev.parent->bus &&
  1677. nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME,
  1678. dev->dev.parent->bus->name))
  1679. goto nla_put_failure;
  1680. if (rtnl_fill_devlink_port(skb, dev))
  1681. goto nla_put_failure;
  1682. if (rtnl_fill_dpll_pin(skb, dev))
  1683. goto nla_put_failure;
  1684. nlmsg_end(skb, nlh);
  1685. return 0;
  1686. nla_put_failure_rcu:
  1687. rcu_read_unlock();
  1688. nla_put_failure:
  1689. nlmsg_cancel(skb, nlh);
  1690. return -EMSGSIZE;
  1691. }
  1692. static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
  1693. [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
  1694. [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
  1695. [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
  1696. [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
  1697. [IFLA_MTU] = { .type = NLA_U32 },
  1698. [IFLA_LINK] = { .type = NLA_U32 },
  1699. [IFLA_MASTER] = { .type = NLA_U32 },
  1700. [IFLA_CARRIER] = { .type = NLA_U8 },
  1701. [IFLA_TXQLEN] = { .type = NLA_U32 },
  1702. [IFLA_WEIGHT] = { .type = NLA_U32 },
  1703. [IFLA_OPERSTATE] = { .type = NLA_U8 },
  1704. [IFLA_LINKMODE] = { .type = NLA_U8 },
  1705. [IFLA_LINKINFO] = { .type = NLA_NESTED },
  1706. [IFLA_NET_NS_PID] = { .type = NLA_U32 },
  1707. [IFLA_NET_NS_FD] = { .type = NLA_U32 },
  1708. /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
  1709. * allow 0-length string (needed to remove an alias).
  1710. */
  1711. [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
  1712. [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
  1713. [IFLA_VF_PORTS] = { .type = NLA_NESTED },
  1714. [IFLA_PORT_SELF] = { .type = NLA_NESTED },
  1715. [IFLA_AF_SPEC] = { .type = NLA_NESTED },
  1716. [IFLA_EXT_MASK] = { .type = NLA_U32 },
  1717. [IFLA_PROMISCUITY] = { .type = NLA_U32 },
  1718. [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
  1719. [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
  1720. [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 },
  1721. [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
  1722. [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
  1723. [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */
  1724. [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
  1725. [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
  1726. [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
  1727. [IFLA_XDP] = { .type = NLA_NESTED },
  1728. [IFLA_EVENT] = { .type = NLA_U32 },
  1729. [IFLA_GROUP] = { .type = NLA_U32 },
  1730. [IFLA_TARGET_NETNSID] = { .type = NLA_S32 },
  1731. [IFLA_CARRIER_UP_COUNT] = { .type = NLA_U32 },
  1732. [IFLA_CARRIER_DOWN_COUNT] = { .type = NLA_U32 },
  1733. [IFLA_MIN_MTU] = { .type = NLA_U32 },
  1734. [IFLA_MAX_MTU] = { .type = NLA_U32 },
  1735. [IFLA_PROP_LIST] = { .type = NLA_NESTED },
  1736. [IFLA_ALT_IFNAME] = { .type = NLA_STRING,
  1737. .len = ALTIFNAMSIZ - 1 },
  1738. [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT },
  1739. [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
  1740. [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
  1741. [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
  1742. [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
  1743. [IFLA_TSO_MAX_SIZE] = { .type = NLA_REJECT },
  1744. [IFLA_TSO_MAX_SEGS] = { .type = NLA_REJECT },
  1745. [IFLA_ALLMULTI] = { .type = NLA_REJECT },
  1746. [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1),
  1747. [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 },
  1748. };
  1749. static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
  1750. [IFLA_INFO_KIND] = { .type = NLA_STRING },
  1751. [IFLA_INFO_DATA] = { .type = NLA_NESTED },
  1752. [IFLA_INFO_SLAVE_KIND] = { .type = NLA_STRING },
  1753. [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
  1754. };
  1755. static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
  1756. [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
  1757. [IFLA_VF_BROADCAST] = { .type = NLA_REJECT },
  1758. [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
  1759. [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
  1760. [IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
  1761. [IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
  1762. [IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
  1763. [IFLA_VF_LINK_STATE] = { .len = sizeof(struct ifla_vf_link_state) },
  1764. [IFLA_VF_RSS_QUERY_EN] = { .len = sizeof(struct ifla_vf_rss_query_en) },
  1765. [IFLA_VF_STATS] = { .type = NLA_NESTED },
  1766. [IFLA_VF_TRUST] = { .len = sizeof(struct ifla_vf_trust) },
  1767. [IFLA_VF_IB_NODE_GUID] = { .len = sizeof(struct ifla_vf_guid) },
  1768. [IFLA_VF_IB_PORT_GUID] = { .len = sizeof(struct ifla_vf_guid) },
  1769. };
  1770. static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
  1771. [IFLA_PORT_VF] = { .type = NLA_U32 },
  1772. [IFLA_PORT_PROFILE] = { .type = NLA_STRING,
  1773. .len = PORT_PROFILE_MAX },
  1774. [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
  1775. .len = PORT_UUID_MAX },
  1776. [IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
  1777. .len = PORT_UUID_MAX },
  1778. [IFLA_PORT_REQUEST] = { .type = NLA_U8, },
  1779. [IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
  1780. /* Unused, but we need to keep it here since user space could
  1781. * fill it. It's also broken with regard to NLA_BINARY use in
  1782. * combination with structs.
  1783. */
  1784. [IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
  1785. .len = sizeof(struct ifla_port_vsi) },
  1786. };
  1787. static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
  1788. [IFLA_XDP_UNSPEC] = { .strict_start_type = IFLA_XDP_EXPECTED_FD },
  1789. [IFLA_XDP_FD] = { .type = NLA_S32 },
  1790. [IFLA_XDP_EXPECTED_FD] = { .type = NLA_S32 },
  1791. [IFLA_XDP_ATTACHED] = { .type = NLA_U8 },
  1792. [IFLA_XDP_FLAGS] = { .type = NLA_U32 },
  1793. [IFLA_XDP_PROG_ID] = { .type = NLA_U32 },
  1794. };
  1795. static const struct rtnl_link_ops *linkinfo_to_kind_ops(const struct nlattr *nla)
  1796. {
  1797. const struct rtnl_link_ops *ops = NULL;
  1798. struct nlattr *linfo[IFLA_INFO_MAX + 1];
  1799. if (nla_parse_nested_deprecated(linfo, IFLA_INFO_MAX, nla, ifla_info_policy, NULL) < 0)
  1800. return NULL;
  1801. if (linfo[IFLA_INFO_KIND]) {
  1802. char kind[MODULE_NAME_LEN];
  1803. nla_strscpy(kind, linfo[IFLA_INFO_KIND], sizeof(kind));
  1804. ops = rtnl_link_ops_get(kind);
  1805. }
  1806. return ops;
  1807. }
  1808. static bool link_master_filtered(struct net_device *dev, int master_idx)
  1809. {
  1810. struct net_device *master;
  1811. if (!master_idx)
  1812. return false;
  1813. master = netdev_master_upper_dev_get(dev);
  1814. /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need
  1815. * another invalid value for ifindex to denote "no master".
  1816. */
  1817. if (master_idx == -1)
  1818. return !!master;
  1819. if (!master || master->ifindex != master_idx)
  1820. return true;
  1821. return false;
  1822. }
  1823. static bool link_kind_filtered(const struct net_device *dev,
  1824. const struct rtnl_link_ops *kind_ops)
  1825. {
  1826. if (kind_ops && dev->rtnl_link_ops != kind_ops)
  1827. return true;
  1828. return false;
  1829. }
  1830. static bool link_dump_filtered(struct net_device *dev,
  1831. int master_idx,
  1832. const struct rtnl_link_ops *kind_ops)
  1833. {
  1834. if (link_master_filtered(dev, master_idx) ||
  1835. link_kind_filtered(dev, kind_ops))
  1836. return true;
  1837. return false;
  1838. }
  1839. /**
  1840. * rtnl_get_net_ns_capable - Get netns if sufficiently privileged.
  1841. * @sk: netlink socket
  1842. * @netnsid: network namespace identifier
  1843. *
  1844. * Returns the network namespace identified by netnsid on success or an error
  1845. * pointer on failure.
  1846. */
  1847. struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
  1848. {
  1849. struct net *net;
  1850. net = get_net_ns_by_id(sock_net(sk), netnsid);
  1851. if (!net)
  1852. return ERR_PTR(-EINVAL);
  1853. /* For now, the caller is required to have CAP_NET_ADMIN in
  1854. * the user namespace owning the target net ns.
  1855. */
  1856. if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
  1857. put_net(net);
  1858. return ERR_PTR(-EACCES);
  1859. }
  1860. return net;
  1861. }
  1862. EXPORT_SYMBOL_GPL(rtnl_get_net_ns_capable);
  1863. static int rtnl_valid_dump_ifinfo_req(const struct nlmsghdr *nlh,
  1864. bool strict_check, struct nlattr **tb,
  1865. struct netlink_ext_ack *extack)
  1866. {
  1867. int hdrlen;
  1868. if (strict_check) {
  1869. struct ifinfomsg *ifm;
  1870. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
  1871. NL_SET_ERR_MSG(extack, "Invalid header for link dump");
  1872. return -EINVAL;
  1873. }
  1874. ifm = nlmsg_data(nlh);
  1875. if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
  1876. ifm->ifi_change) {
  1877. NL_SET_ERR_MSG(extack, "Invalid values in header for link dump request");
  1878. return -EINVAL;
  1879. }
  1880. if (ifm->ifi_index) {
  1881. NL_SET_ERR_MSG(extack, "Filter by device index not supported for link dumps");
  1882. return -EINVAL;
  1883. }
  1884. return nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb,
  1885. IFLA_MAX, ifla_policy,
  1886. extack);
  1887. }
  1888. /* A hack to preserve kernel<->userspace interface.
  1889. * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
  1890. * However, before Linux v3.9 the code here assumed rtgenmsg and that's
  1891. * what iproute2 < v3.9.0 used.
  1892. * We can detect the old iproute2. Even including the IFLA_EXT_MASK
  1893. * attribute, its netlink message is shorter than struct ifinfomsg.
  1894. */
  1895. hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
  1896. sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
  1897. return nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy,
  1898. extack);
  1899. }
  1900. static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
  1901. {
  1902. const struct rtnl_link_ops *kind_ops = NULL;
  1903. struct netlink_ext_ack *extack = cb->extack;
  1904. const struct nlmsghdr *nlh = cb->nlh;
  1905. struct net *net = sock_net(skb->sk);
  1906. unsigned int flags = NLM_F_MULTI;
  1907. struct nlattr *tb[IFLA_MAX+1];
  1908. struct {
  1909. unsigned long ifindex;
  1910. } *ctx = (void *)cb->ctx;
  1911. struct net *tgt_net = net;
  1912. u32 ext_filter_mask = 0;
  1913. struct net_device *dev;
  1914. int master_idx = 0;
  1915. int netnsid = -1;
  1916. int err, i;
  1917. err = rtnl_valid_dump_ifinfo_req(nlh, cb->strict_check, tb, extack);
  1918. if (err < 0) {
  1919. if (cb->strict_check)
  1920. return err;
  1921. goto walk_entries;
  1922. }
  1923. for (i = 0; i <= IFLA_MAX; ++i) {
  1924. if (!tb[i])
  1925. continue;
  1926. /* new attributes should only be added with strict checking */
  1927. switch (i) {
  1928. case IFLA_TARGET_NETNSID:
  1929. netnsid = nla_get_s32(tb[i]);
  1930. tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
  1931. if (IS_ERR(tgt_net)) {
  1932. NL_SET_ERR_MSG(extack, "Invalid target network namespace id");
  1933. return PTR_ERR(tgt_net);
  1934. }
  1935. break;
  1936. case IFLA_EXT_MASK:
  1937. ext_filter_mask = nla_get_u32(tb[i]);
  1938. break;
  1939. case IFLA_MASTER:
  1940. master_idx = nla_get_u32(tb[i]);
  1941. break;
  1942. case IFLA_LINKINFO:
  1943. kind_ops = linkinfo_to_kind_ops(tb[i]);
  1944. break;
  1945. default:
  1946. if (cb->strict_check) {
  1947. NL_SET_ERR_MSG(extack, "Unsupported attribute in link dump request");
  1948. return -EINVAL;
  1949. }
  1950. }
  1951. }
  1952. if (master_idx || kind_ops)
  1953. flags |= NLM_F_DUMP_FILTERED;
  1954. walk_entries:
  1955. err = 0;
  1956. for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
  1957. if (link_dump_filtered(dev, master_idx, kind_ops))
  1958. continue;
  1959. err = rtnl_fill_ifinfo(skb, dev, net, RTM_NEWLINK,
  1960. NETLINK_CB(cb->skb).portid,
  1961. nlh->nlmsg_seq, 0, flags,
  1962. ext_filter_mask, 0, NULL, 0,
  1963. netnsid, GFP_KERNEL);
  1964. if (err < 0)
  1965. break;
  1966. }
  1967. cb->seq = tgt_net->dev_base_seq;
  1968. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  1969. if (netnsid >= 0)
  1970. put_net(tgt_net);
  1971. return err;
  1972. }
  1973. int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
  1974. struct netlink_ext_ack *exterr)
  1975. {
  1976. const struct ifinfomsg *ifmp;
  1977. const struct nlattr *attrs;
  1978. size_t len;
  1979. ifmp = nla_data(nla_peer);
  1980. attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
  1981. len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
  1982. if (ifmp->ifi_index < 0) {
  1983. NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
  1984. "ifindex can't be negative");
  1985. return -EINVAL;
  1986. }
  1987. return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
  1988. exterr);
  1989. }
  1990. EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
  1991. struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
  1992. {
  1993. struct net *net;
  1994. /* Examine the link attributes and figure out which
  1995. * network namespace we are talking about.
  1996. */
  1997. if (tb[IFLA_NET_NS_PID])
  1998. net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
  1999. else if (tb[IFLA_NET_NS_FD])
  2000. net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
  2001. else
  2002. net = get_net(src_net);
  2003. return net;
  2004. }
  2005. EXPORT_SYMBOL(rtnl_link_get_net);
  2006. /* Figure out which network namespace we are talking about by
  2007. * examining the link attributes in the following order:
  2008. *
  2009. * 1. IFLA_NET_NS_PID
  2010. * 2. IFLA_NET_NS_FD
  2011. * 3. IFLA_TARGET_NETNSID
  2012. */
  2013. static struct net *rtnl_link_get_net_by_nlattr(struct net *src_net,
  2014. struct nlattr *tb[])
  2015. {
  2016. struct net *net;
  2017. if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD])
  2018. return rtnl_link_get_net(src_net, tb);
  2019. if (!tb[IFLA_TARGET_NETNSID])
  2020. return get_net(src_net);
  2021. net = get_net_ns_by_id(src_net, nla_get_u32(tb[IFLA_TARGET_NETNSID]));
  2022. if (!net)
  2023. return ERR_PTR(-EINVAL);
  2024. return net;
  2025. }
  2026. static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
  2027. struct net *src_net,
  2028. struct nlattr *tb[], int cap)
  2029. {
  2030. struct net *net;
  2031. net = rtnl_link_get_net_by_nlattr(src_net, tb);
  2032. if (IS_ERR(net))
  2033. return net;
  2034. if (!netlink_ns_capable(skb, net->user_ns, cap)) {
  2035. put_net(net);
  2036. return ERR_PTR(-EPERM);
  2037. }
  2038. return net;
  2039. }
  2040. /* Verify that rtnetlink requests do not pass additional properties
  2041. * potentially referring to different network namespaces.
  2042. */
  2043. static int rtnl_ensure_unique_netns(struct nlattr *tb[],
  2044. struct netlink_ext_ack *extack,
  2045. bool netns_id_only)
  2046. {
  2047. if (netns_id_only) {
  2048. if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
  2049. return 0;
  2050. NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
  2051. return -EOPNOTSUPP;
  2052. }
  2053. if (tb[IFLA_TARGET_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
  2054. goto invalid_attr;
  2055. if (tb[IFLA_NET_NS_PID] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_FD]))
  2056. goto invalid_attr;
  2057. if (tb[IFLA_NET_NS_FD] && (tb[IFLA_TARGET_NETNSID] || tb[IFLA_NET_NS_PID]))
  2058. goto invalid_attr;
  2059. return 0;
  2060. invalid_attr:
  2061. NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
  2062. return -EINVAL;
  2063. }
  2064. static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
  2065. int max_tx_rate)
  2066. {
  2067. const struct net_device_ops *ops = dev->netdev_ops;
  2068. if (!ops->ndo_set_vf_rate)
  2069. return -EOPNOTSUPP;
  2070. if (max_tx_rate && max_tx_rate < min_tx_rate)
  2071. return -EINVAL;
  2072. return ops->ndo_set_vf_rate(dev, vf, min_tx_rate, max_tx_rate);
  2073. }
  2074. static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
  2075. struct netlink_ext_ack *extack)
  2076. {
  2077. if (tb[IFLA_ADDRESS] &&
  2078. nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
  2079. return -EINVAL;
  2080. if (tb[IFLA_BROADCAST] &&
  2081. nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
  2082. return -EINVAL;
  2083. if (tb[IFLA_GSO_MAX_SIZE] &&
  2084. nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
  2085. NL_SET_ERR_MSG(extack, "too big gso_max_size");
  2086. return -EINVAL;
  2087. }
  2088. if (tb[IFLA_GSO_MAX_SEGS] &&
  2089. (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
  2090. nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
  2091. NL_SET_ERR_MSG(extack, "too big gso_max_segs");
  2092. return -EINVAL;
  2093. }
  2094. if (tb[IFLA_GRO_MAX_SIZE] &&
  2095. nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
  2096. NL_SET_ERR_MSG(extack, "too big gro_max_size");
  2097. return -EINVAL;
  2098. }
  2099. if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
  2100. nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
  2101. NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
  2102. return -EINVAL;
  2103. }
  2104. if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
  2105. nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
  2106. NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
  2107. return -EINVAL;
  2108. }
  2109. if (tb[IFLA_AF_SPEC]) {
  2110. struct nlattr *af;
  2111. int rem, err;
  2112. nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
  2113. const struct rtnl_af_ops *af_ops;
  2114. af_ops = rtnl_af_lookup(nla_type(af));
  2115. if (!af_ops)
  2116. return -EAFNOSUPPORT;
  2117. if (!af_ops->set_link_af)
  2118. return -EOPNOTSUPP;
  2119. if (af_ops->validate_link_af) {
  2120. err = af_ops->validate_link_af(dev, af, extack);
  2121. if (err < 0)
  2122. return err;
  2123. }
  2124. }
  2125. }
  2126. return 0;
  2127. }
  2128. static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
  2129. int guid_type)
  2130. {
  2131. const struct net_device_ops *ops = dev->netdev_ops;
  2132. return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
  2133. }
  2134. static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
  2135. {
  2136. if (dev->type != ARPHRD_INFINIBAND)
  2137. return -EOPNOTSUPP;
  2138. return handle_infiniband_guid(dev, ivt, guid_type);
  2139. }
  2140. static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
  2141. {
  2142. const struct net_device_ops *ops = dev->netdev_ops;
  2143. int err = -EINVAL;
  2144. if (tb[IFLA_VF_MAC]) {
  2145. struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
  2146. if (ivm->vf >= INT_MAX)
  2147. return -EINVAL;
  2148. err = -EOPNOTSUPP;
  2149. if (ops->ndo_set_vf_mac)
  2150. err = ops->ndo_set_vf_mac(dev, ivm->vf,
  2151. ivm->mac);
  2152. if (err < 0)
  2153. return err;
  2154. }
  2155. if (tb[IFLA_VF_VLAN]) {
  2156. struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
  2157. if (ivv->vf >= INT_MAX)
  2158. return -EINVAL;
  2159. err = -EOPNOTSUPP;
  2160. if (ops->ndo_set_vf_vlan)
  2161. err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
  2162. ivv->qos,
  2163. htons(ETH_P_8021Q));
  2164. if (err < 0)
  2165. return err;
  2166. }
  2167. if (tb[IFLA_VF_VLAN_LIST]) {
  2168. struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
  2169. struct nlattr *attr;
  2170. int rem, len = 0;
  2171. err = -EOPNOTSUPP;
  2172. if (!ops->ndo_set_vf_vlan)
  2173. return err;
  2174. nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
  2175. if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
  2176. nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) {
  2177. return -EINVAL;
  2178. }
  2179. if (len >= MAX_VLAN_LIST_LEN)
  2180. return -EOPNOTSUPP;
  2181. ivvl[len] = nla_data(attr);
  2182. len++;
  2183. }
  2184. if (len == 0)
  2185. return -EINVAL;
  2186. if (ivvl[0]->vf >= INT_MAX)
  2187. return -EINVAL;
  2188. err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
  2189. ivvl[0]->qos, ivvl[0]->vlan_proto);
  2190. if (err < 0)
  2191. return err;
  2192. }
  2193. if (tb[IFLA_VF_TX_RATE]) {
  2194. struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
  2195. struct ifla_vf_info ivf;
  2196. if (ivt->vf >= INT_MAX)
  2197. return -EINVAL;
  2198. err = -EOPNOTSUPP;
  2199. if (ops->ndo_get_vf_config)
  2200. err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
  2201. if (err < 0)
  2202. return err;
  2203. err = rtnl_set_vf_rate(dev, ivt->vf,
  2204. ivf.min_tx_rate, ivt->rate);
  2205. if (err < 0)
  2206. return err;
  2207. }
  2208. if (tb[IFLA_VF_RATE]) {
  2209. struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
  2210. if (ivt->vf >= INT_MAX)
  2211. return -EINVAL;
  2212. err = rtnl_set_vf_rate(dev, ivt->vf,
  2213. ivt->min_tx_rate, ivt->max_tx_rate);
  2214. if (err < 0)
  2215. return err;
  2216. }
  2217. if (tb[IFLA_VF_SPOOFCHK]) {
  2218. struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
  2219. if (ivs->vf >= INT_MAX)
  2220. return -EINVAL;
  2221. err = -EOPNOTSUPP;
  2222. if (ops->ndo_set_vf_spoofchk)
  2223. err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
  2224. ivs->setting);
  2225. if (err < 0)
  2226. return err;
  2227. }
  2228. if (tb[IFLA_VF_LINK_STATE]) {
  2229. struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
  2230. if (ivl->vf >= INT_MAX)
  2231. return -EINVAL;
  2232. err = -EOPNOTSUPP;
  2233. if (ops->ndo_set_vf_link_state)
  2234. err = ops->ndo_set_vf_link_state(dev, ivl->vf,
  2235. ivl->link_state);
  2236. if (err < 0)
  2237. return err;
  2238. }
  2239. if (tb[IFLA_VF_RSS_QUERY_EN]) {
  2240. struct ifla_vf_rss_query_en *ivrssq_en;
  2241. err = -EOPNOTSUPP;
  2242. ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
  2243. if (ivrssq_en->vf >= INT_MAX)
  2244. return -EINVAL;
  2245. if (ops->ndo_set_vf_rss_query_en)
  2246. err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
  2247. ivrssq_en->setting);
  2248. if (err < 0)
  2249. return err;
  2250. }
  2251. if (tb[IFLA_VF_TRUST]) {
  2252. struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
  2253. if (ivt->vf >= INT_MAX)
  2254. return -EINVAL;
  2255. err = -EOPNOTSUPP;
  2256. if (ops->ndo_set_vf_trust)
  2257. err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
  2258. if (err < 0)
  2259. return err;
  2260. }
  2261. if (tb[IFLA_VF_IB_NODE_GUID]) {
  2262. struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
  2263. if (ivt->vf >= INT_MAX)
  2264. return -EINVAL;
  2265. if (!ops->ndo_set_vf_guid)
  2266. return -EOPNOTSUPP;
  2267. return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
  2268. }
  2269. if (tb[IFLA_VF_IB_PORT_GUID]) {
  2270. struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
  2271. if (ivt->vf >= INT_MAX)
  2272. return -EINVAL;
  2273. if (!ops->ndo_set_vf_guid)
  2274. return -EOPNOTSUPP;
  2275. return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
  2276. }
  2277. return err;
  2278. }
  2279. static int do_set_master(struct net_device *dev, int ifindex,
  2280. struct netlink_ext_ack *extack)
  2281. {
  2282. struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
  2283. const struct net_device_ops *ops;
  2284. int err;
  2285. if (upper_dev) {
  2286. if (upper_dev->ifindex == ifindex)
  2287. return 0;
  2288. ops = upper_dev->netdev_ops;
  2289. if (ops->ndo_del_slave) {
  2290. err = ops->ndo_del_slave(upper_dev, dev);
  2291. if (err)
  2292. return err;
  2293. } else {
  2294. return -EOPNOTSUPP;
  2295. }
  2296. }
  2297. if (ifindex) {
  2298. upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
  2299. if (!upper_dev)
  2300. return -EINVAL;
  2301. ops = upper_dev->netdev_ops;
  2302. if (ops->ndo_add_slave) {
  2303. err = ops->ndo_add_slave(upper_dev, dev, extack);
  2304. if (err)
  2305. return err;
  2306. } else {
  2307. return -EOPNOTSUPP;
  2308. }
  2309. }
  2310. return 0;
  2311. }
  2312. static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = {
  2313. [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 },
  2314. [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 },
  2315. };
  2316. static int do_set_proto_down(struct net_device *dev,
  2317. struct nlattr *nl_proto_down,
  2318. struct nlattr *nl_proto_down_reason,
  2319. struct netlink_ext_ack *extack)
  2320. {
  2321. struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
  2322. unsigned long mask = 0;
  2323. u32 value;
  2324. bool proto_down;
  2325. int err;
  2326. if (!dev->change_proto_down) {
  2327. NL_SET_ERR_MSG(extack, "Protodown not supported by device");
  2328. return -EOPNOTSUPP;
  2329. }
  2330. if (nl_proto_down_reason) {
  2331. err = nla_parse_nested_deprecated(pdreason,
  2332. IFLA_PROTO_DOWN_REASON_MAX,
  2333. nl_proto_down_reason,
  2334. ifla_proto_down_reason_policy,
  2335. NULL);
  2336. if (err < 0)
  2337. return err;
  2338. if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) {
  2339. NL_SET_ERR_MSG(extack, "Invalid protodown reason value");
  2340. return -EINVAL;
  2341. }
  2342. value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]);
  2343. if (pdreason[IFLA_PROTO_DOWN_REASON_MASK])
  2344. mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]);
  2345. dev_change_proto_down_reason(dev, mask, value);
  2346. }
  2347. if (nl_proto_down) {
  2348. proto_down = nla_get_u8(nl_proto_down);
  2349. /* Don't turn off protodown if there are active reasons */
  2350. if (!proto_down && dev->proto_down_reason) {
  2351. NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons");
  2352. return -EBUSY;
  2353. }
  2354. err = dev_change_proto_down(dev,
  2355. proto_down);
  2356. if (err)
  2357. return err;
  2358. }
  2359. return 0;
  2360. }
  2361. #define DO_SETLINK_MODIFIED 0x01
  2362. /* notify flag means notify + modified. */
  2363. #define DO_SETLINK_NOTIFY 0x03
  2364. static int do_setlink(const struct sk_buff *skb,
  2365. struct net_device *dev, struct ifinfomsg *ifm,
  2366. struct netlink_ext_ack *extack,
  2367. struct nlattr **tb, int status)
  2368. {
  2369. const struct net_device_ops *ops = dev->netdev_ops;
  2370. char ifname[IFNAMSIZ];
  2371. int err;
  2372. if (tb[IFLA_IFNAME])
  2373. nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  2374. else
  2375. ifname[0] = '\0';
  2376. if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) {
  2377. const char *pat = ifname[0] ? ifname : NULL;
  2378. struct net *net;
  2379. int new_ifindex;
  2380. net = rtnl_link_get_net_capable(skb, dev_net(dev),
  2381. tb, CAP_NET_ADMIN);
  2382. if (IS_ERR(net)) {
  2383. err = PTR_ERR(net);
  2384. goto errout;
  2385. }
  2386. if (tb[IFLA_NEW_IFINDEX])
  2387. new_ifindex = nla_get_s32(tb[IFLA_NEW_IFINDEX]);
  2388. else
  2389. new_ifindex = 0;
  2390. err = __dev_change_net_namespace(dev, net, pat, new_ifindex);
  2391. put_net(net);
  2392. if (err)
  2393. goto errout;
  2394. status |= DO_SETLINK_MODIFIED;
  2395. }
  2396. if (tb[IFLA_MAP]) {
  2397. struct rtnl_link_ifmap *u_map;
  2398. struct ifmap k_map;
  2399. if (!ops->ndo_set_config) {
  2400. err = -EOPNOTSUPP;
  2401. goto errout;
  2402. }
  2403. if (!netif_device_present(dev)) {
  2404. err = -ENODEV;
  2405. goto errout;
  2406. }
  2407. u_map = nla_data(tb[IFLA_MAP]);
  2408. k_map.mem_start = (unsigned long) u_map->mem_start;
  2409. k_map.mem_end = (unsigned long) u_map->mem_end;
  2410. k_map.base_addr = (unsigned short) u_map->base_addr;
  2411. k_map.irq = (unsigned char) u_map->irq;
  2412. k_map.dma = (unsigned char) u_map->dma;
  2413. k_map.port = (unsigned char) u_map->port;
  2414. err = ops->ndo_set_config(dev, &k_map);
  2415. if (err < 0)
  2416. goto errout;
  2417. status |= DO_SETLINK_NOTIFY;
  2418. }
  2419. if (tb[IFLA_ADDRESS]) {
  2420. struct sockaddr *sa;
  2421. int len;
  2422. len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
  2423. sizeof(*sa));
  2424. sa = kmalloc(len, GFP_KERNEL);
  2425. if (!sa) {
  2426. err = -ENOMEM;
  2427. goto errout;
  2428. }
  2429. sa->sa_family = dev->type;
  2430. memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
  2431. dev->addr_len);
  2432. err = dev_set_mac_address_user(dev, sa, extack);
  2433. kfree(sa);
  2434. if (err)
  2435. goto errout;
  2436. status |= DO_SETLINK_MODIFIED;
  2437. }
  2438. if (tb[IFLA_MTU]) {
  2439. err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack);
  2440. if (err < 0)
  2441. goto errout;
  2442. status |= DO_SETLINK_MODIFIED;
  2443. }
  2444. if (tb[IFLA_GROUP]) {
  2445. dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
  2446. status |= DO_SETLINK_NOTIFY;
  2447. }
  2448. /*
  2449. * Interface selected by interface index but interface
  2450. * name provided implies that a name change has been
  2451. * requested.
  2452. */
  2453. if (ifm->ifi_index > 0 && ifname[0]) {
  2454. err = dev_change_name(dev, ifname);
  2455. if (err < 0)
  2456. goto errout;
  2457. status |= DO_SETLINK_MODIFIED;
  2458. }
  2459. if (tb[IFLA_IFALIAS]) {
  2460. err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
  2461. nla_len(tb[IFLA_IFALIAS]));
  2462. if (err < 0)
  2463. goto errout;
  2464. status |= DO_SETLINK_NOTIFY;
  2465. }
  2466. if (tb[IFLA_BROADCAST]) {
  2467. nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
  2468. call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
  2469. }
  2470. if (ifm->ifi_flags || ifm->ifi_change) {
  2471. err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
  2472. extack);
  2473. if (err < 0)
  2474. goto errout;
  2475. }
  2476. if (tb[IFLA_MASTER]) {
  2477. err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
  2478. if (err)
  2479. goto errout;
  2480. status |= DO_SETLINK_MODIFIED;
  2481. }
  2482. if (tb[IFLA_CARRIER]) {
  2483. err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
  2484. if (err)
  2485. goto errout;
  2486. status |= DO_SETLINK_MODIFIED;
  2487. }
  2488. if (tb[IFLA_TXQLEN]) {
  2489. unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]);
  2490. err = dev_change_tx_queue_len(dev, value);
  2491. if (err)
  2492. goto errout;
  2493. status |= DO_SETLINK_MODIFIED;
  2494. }
  2495. if (tb[IFLA_GSO_MAX_SIZE]) {
  2496. u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
  2497. if (dev->gso_max_size ^ max_size) {
  2498. netif_set_gso_max_size(dev, max_size);
  2499. status |= DO_SETLINK_MODIFIED;
  2500. }
  2501. }
  2502. if (tb[IFLA_GSO_MAX_SEGS]) {
  2503. u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
  2504. if (dev->gso_max_segs ^ max_segs) {
  2505. netif_set_gso_max_segs(dev, max_segs);
  2506. status |= DO_SETLINK_MODIFIED;
  2507. }
  2508. }
  2509. if (tb[IFLA_GRO_MAX_SIZE]) {
  2510. u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
  2511. if (dev->gro_max_size ^ gro_max_size) {
  2512. netif_set_gro_max_size(dev, gro_max_size);
  2513. status |= DO_SETLINK_MODIFIED;
  2514. }
  2515. }
  2516. if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
  2517. u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
  2518. if (dev->gso_ipv4_max_size ^ max_size) {
  2519. netif_set_gso_ipv4_max_size(dev, max_size);
  2520. status |= DO_SETLINK_MODIFIED;
  2521. }
  2522. }
  2523. if (tb[IFLA_GRO_IPV4_MAX_SIZE]) {
  2524. u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]);
  2525. if (dev->gro_ipv4_max_size ^ gro_max_size) {
  2526. netif_set_gro_ipv4_max_size(dev, gro_max_size);
  2527. status |= DO_SETLINK_MODIFIED;
  2528. }
  2529. }
  2530. if (tb[IFLA_OPERSTATE])
  2531. set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
  2532. if (tb[IFLA_LINKMODE]) {
  2533. unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
  2534. if (dev->link_mode ^ value)
  2535. status |= DO_SETLINK_NOTIFY;
  2536. WRITE_ONCE(dev->link_mode, value);
  2537. }
  2538. if (tb[IFLA_VFINFO_LIST]) {
  2539. struct nlattr *vfinfo[IFLA_VF_MAX + 1];
  2540. struct nlattr *attr;
  2541. int rem;
  2542. nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
  2543. if (nla_type(attr) != IFLA_VF_INFO ||
  2544. nla_len(attr) < NLA_HDRLEN) {
  2545. err = -EINVAL;
  2546. goto errout;
  2547. }
  2548. err = nla_parse_nested_deprecated(vfinfo, IFLA_VF_MAX,
  2549. attr,
  2550. ifla_vf_policy,
  2551. NULL);
  2552. if (err < 0)
  2553. goto errout;
  2554. err = do_setvfinfo(dev, vfinfo);
  2555. if (err < 0)
  2556. goto errout;
  2557. status |= DO_SETLINK_NOTIFY;
  2558. }
  2559. }
  2560. err = 0;
  2561. if (tb[IFLA_VF_PORTS]) {
  2562. struct nlattr *port[IFLA_PORT_MAX+1];
  2563. struct nlattr *attr;
  2564. int vf;
  2565. int rem;
  2566. err = -EOPNOTSUPP;
  2567. if (!ops->ndo_set_vf_port)
  2568. goto errout;
  2569. nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
  2570. if (nla_type(attr) != IFLA_VF_PORT ||
  2571. nla_len(attr) < NLA_HDRLEN) {
  2572. err = -EINVAL;
  2573. goto errout;
  2574. }
  2575. err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
  2576. attr,
  2577. ifla_port_policy,
  2578. NULL);
  2579. if (err < 0)
  2580. goto errout;
  2581. if (!port[IFLA_PORT_VF]) {
  2582. err = -EOPNOTSUPP;
  2583. goto errout;
  2584. }
  2585. vf = nla_get_u32(port[IFLA_PORT_VF]);
  2586. err = ops->ndo_set_vf_port(dev, vf, port);
  2587. if (err < 0)
  2588. goto errout;
  2589. status |= DO_SETLINK_NOTIFY;
  2590. }
  2591. }
  2592. err = 0;
  2593. if (tb[IFLA_PORT_SELF]) {
  2594. struct nlattr *port[IFLA_PORT_MAX+1];
  2595. err = nla_parse_nested_deprecated(port, IFLA_PORT_MAX,
  2596. tb[IFLA_PORT_SELF],
  2597. ifla_port_policy, NULL);
  2598. if (err < 0)
  2599. goto errout;
  2600. err = -EOPNOTSUPP;
  2601. if (ops->ndo_set_vf_port)
  2602. err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
  2603. if (err < 0)
  2604. goto errout;
  2605. status |= DO_SETLINK_NOTIFY;
  2606. }
  2607. if (tb[IFLA_AF_SPEC]) {
  2608. struct nlattr *af;
  2609. int rem;
  2610. nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
  2611. const struct rtnl_af_ops *af_ops;
  2612. BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
  2613. err = af_ops->set_link_af(dev, af, extack);
  2614. if (err < 0)
  2615. goto errout;
  2616. status |= DO_SETLINK_NOTIFY;
  2617. }
  2618. }
  2619. err = 0;
  2620. if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) {
  2621. err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN],
  2622. tb[IFLA_PROTO_DOWN_REASON], extack);
  2623. if (err)
  2624. goto errout;
  2625. status |= DO_SETLINK_NOTIFY;
  2626. }
  2627. if (tb[IFLA_XDP]) {
  2628. struct nlattr *xdp[IFLA_XDP_MAX + 1];
  2629. u32 xdp_flags = 0;
  2630. err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX,
  2631. tb[IFLA_XDP],
  2632. ifla_xdp_policy, NULL);
  2633. if (err < 0)
  2634. goto errout;
  2635. if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) {
  2636. err = -EINVAL;
  2637. goto errout;
  2638. }
  2639. if (xdp[IFLA_XDP_FLAGS]) {
  2640. xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]);
  2641. if (xdp_flags & ~XDP_FLAGS_MASK) {
  2642. err = -EINVAL;
  2643. goto errout;
  2644. }
  2645. if (hweight32(xdp_flags & XDP_FLAGS_MODES) > 1) {
  2646. err = -EINVAL;
  2647. goto errout;
  2648. }
  2649. }
  2650. if (xdp[IFLA_XDP_FD]) {
  2651. int expected_fd = -1;
  2652. if (xdp_flags & XDP_FLAGS_REPLACE) {
  2653. if (!xdp[IFLA_XDP_EXPECTED_FD]) {
  2654. err = -EINVAL;
  2655. goto errout;
  2656. }
  2657. expected_fd =
  2658. nla_get_s32(xdp[IFLA_XDP_EXPECTED_FD]);
  2659. }
  2660. err = dev_change_xdp_fd(dev, extack,
  2661. nla_get_s32(xdp[IFLA_XDP_FD]),
  2662. expected_fd,
  2663. xdp_flags);
  2664. if (err)
  2665. goto errout;
  2666. status |= DO_SETLINK_NOTIFY;
  2667. }
  2668. }
  2669. errout:
  2670. if (status & DO_SETLINK_MODIFIED) {
  2671. if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
  2672. netdev_state_change(dev);
  2673. if (err < 0)
  2674. net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
  2675. dev->name);
  2676. }
  2677. return err;
  2678. }
  2679. static struct net_device *rtnl_dev_get(struct net *net,
  2680. struct nlattr *tb[])
  2681. {
  2682. char ifname[ALTIFNAMSIZ];
  2683. if (tb[IFLA_IFNAME])
  2684. nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  2685. else if (tb[IFLA_ALT_IFNAME])
  2686. nla_strscpy(ifname, tb[IFLA_ALT_IFNAME], ALTIFNAMSIZ);
  2687. else
  2688. return NULL;
  2689. return __dev_get_by_name(net, ifname);
  2690. }
  2691. static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
  2692. struct netlink_ext_ack *extack)
  2693. {
  2694. struct net *net = sock_net(skb->sk);
  2695. struct ifinfomsg *ifm;
  2696. struct net_device *dev;
  2697. int err;
  2698. struct nlattr *tb[IFLA_MAX+1];
  2699. err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
  2700. ifla_policy, extack);
  2701. if (err < 0)
  2702. goto errout;
  2703. err = rtnl_ensure_unique_netns(tb, extack, false);
  2704. if (err < 0)
  2705. goto errout;
  2706. err = -EINVAL;
  2707. ifm = nlmsg_data(nlh);
  2708. if (ifm->ifi_index > 0)
  2709. dev = __dev_get_by_index(net, ifm->ifi_index);
  2710. else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
  2711. dev = rtnl_dev_get(net, tb);
  2712. else
  2713. goto errout;
  2714. if (dev == NULL) {
  2715. err = -ENODEV;
  2716. goto errout;
  2717. }
  2718. err = validate_linkmsg(dev, tb, extack);
  2719. if (err < 0)
  2720. goto errout;
  2721. err = do_setlink(skb, dev, ifm, extack, tb, 0);
  2722. errout:
  2723. return err;
  2724. }
  2725. static int rtnl_group_dellink(const struct net *net, int group)
  2726. {
  2727. struct net_device *dev, *aux;
  2728. LIST_HEAD(list_kill);
  2729. bool found = false;
  2730. if (!group)
  2731. return -EPERM;
  2732. for_each_netdev(net, dev) {
  2733. if (dev->group == group) {
  2734. const struct rtnl_link_ops *ops;
  2735. found = true;
  2736. ops = dev->rtnl_link_ops;
  2737. if (!ops || !ops->dellink)
  2738. return -EOPNOTSUPP;
  2739. }
  2740. }
  2741. if (!found)
  2742. return -ENODEV;
  2743. for_each_netdev_safe(net, dev, aux) {
  2744. if (dev->group == group) {
  2745. const struct rtnl_link_ops *ops;
  2746. ops = dev->rtnl_link_ops;
  2747. ops->dellink(dev, &list_kill);
  2748. }
  2749. }
  2750. unregister_netdevice_many(&list_kill);
  2751. return 0;
  2752. }
  2753. int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
  2754. {
  2755. const struct rtnl_link_ops *ops;
  2756. LIST_HEAD(list_kill);
  2757. ops = dev->rtnl_link_ops;
  2758. if (!ops || !ops->dellink)
  2759. return -EOPNOTSUPP;
  2760. ops->dellink(dev, &list_kill);
  2761. unregister_netdevice_many_notify(&list_kill, portid, nlh);
  2762. return 0;
  2763. }
  2764. EXPORT_SYMBOL_GPL(rtnl_delete_link);
  2765. static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
  2766. struct netlink_ext_ack *extack)
  2767. {
  2768. struct net *net = sock_net(skb->sk);
  2769. u32 portid = NETLINK_CB(skb).portid;
  2770. struct net *tgt_net = net;
  2771. struct net_device *dev = NULL;
  2772. struct ifinfomsg *ifm;
  2773. struct nlattr *tb[IFLA_MAX+1];
  2774. int err;
  2775. int netnsid = -1;
  2776. err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
  2777. ifla_policy, extack);
  2778. if (err < 0)
  2779. return err;
  2780. err = rtnl_ensure_unique_netns(tb, extack, true);
  2781. if (err < 0)
  2782. return err;
  2783. if (tb[IFLA_TARGET_NETNSID]) {
  2784. netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
  2785. tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
  2786. if (IS_ERR(tgt_net))
  2787. return PTR_ERR(tgt_net);
  2788. }
  2789. err = -EINVAL;
  2790. ifm = nlmsg_data(nlh);
  2791. if (ifm->ifi_index > 0)
  2792. dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
  2793. else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
  2794. dev = rtnl_dev_get(tgt_net, tb);
  2795. else if (tb[IFLA_GROUP])
  2796. err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
  2797. else
  2798. goto out;
  2799. if (!dev) {
  2800. if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME] || ifm->ifi_index > 0)
  2801. err = -ENODEV;
  2802. goto out;
  2803. }
  2804. err = rtnl_delete_link(dev, portid, nlh);
  2805. out:
  2806. if (netnsid >= 0)
  2807. put_net(tgt_net);
  2808. return err;
  2809. }
  2810. int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
  2811. u32 portid, const struct nlmsghdr *nlh)
  2812. {
  2813. unsigned int old_flags;
  2814. int err;
  2815. old_flags = dev->flags;
  2816. if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
  2817. err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
  2818. NULL);
  2819. if (err < 0)
  2820. return err;
  2821. }
  2822. if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
  2823. __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
  2824. } else {
  2825. dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
  2826. __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
  2827. }
  2828. return 0;
  2829. }
  2830. EXPORT_SYMBOL(rtnl_configure_link);
  2831. struct net_device *rtnl_create_link(struct net *net, const char *ifname,
  2832. unsigned char name_assign_type,
  2833. const struct rtnl_link_ops *ops,
  2834. struct nlattr *tb[],
  2835. struct netlink_ext_ack *extack)
  2836. {
  2837. struct net_device *dev;
  2838. unsigned int num_tx_queues = 1;
  2839. unsigned int num_rx_queues = 1;
  2840. int err;
  2841. if (tb[IFLA_NUM_TX_QUEUES])
  2842. num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
  2843. else if (ops->get_num_tx_queues)
  2844. num_tx_queues = ops->get_num_tx_queues();
  2845. if (tb[IFLA_NUM_RX_QUEUES])
  2846. num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
  2847. else if (ops->get_num_rx_queues)
  2848. num_rx_queues = ops->get_num_rx_queues();
  2849. if (num_tx_queues < 1 || num_tx_queues > 4096) {
  2850. NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
  2851. return ERR_PTR(-EINVAL);
  2852. }
  2853. if (num_rx_queues < 1 || num_rx_queues > 4096) {
  2854. NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
  2855. return ERR_PTR(-EINVAL);
  2856. }
  2857. if (ops->alloc) {
  2858. dev = ops->alloc(tb, ifname, name_assign_type,
  2859. num_tx_queues, num_rx_queues);
  2860. if (IS_ERR(dev))
  2861. return dev;
  2862. } else {
  2863. dev = alloc_netdev_mqs(ops->priv_size, ifname,
  2864. name_assign_type, ops->setup,
  2865. num_tx_queues, num_rx_queues);
  2866. }
  2867. if (!dev)
  2868. return ERR_PTR(-ENOMEM);
  2869. err = validate_linkmsg(dev, tb, extack);
  2870. if (err < 0) {
  2871. free_netdev(dev);
  2872. return ERR_PTR(err);
  2873. }
  2874. dev_net_set(dev, net);
  2875. dev->rtnl_link_ops = ops;
  2876. dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
  2877. if (tb[IFLA_MTU]) {
  2878. u32 mtu = nla_get_u32(tb[IFLA_MTU]);
  2879. err = dev_validate_mtu(dev, mtu, extack);
  2880. if (err) {
  2881. free_netdev(dev);
  2882. return ERR_PTR(err);
  2883. }
  2884. dev->mtu = mtu;
  2885. }
  2886. if (tb[IFLA_ADDRESS]) {
  2887. __dev_addr_set(dev, nla_data(tb[IFLA_ADDRESS]),
  2888. nla_len(tb[IFLA_ADDRESS]));
  2889. dev->addr_assign_type = NET_ADDR_SET;
  2890. }
  2891. if (tb[IFLA_BROADCAST])
  2892. memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
  2893. nla_len(tb[IFLA_BROADCAST]));
  2894. if (tb[IFLA_TXQLEN])
  2895. dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
  2896. if (tb[IFLA_OPERSTATE])
  2897. set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
  2898. if (tb[IFLA_LINKMODE])
  2899. dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
  2900. if (tb[IFLA_GROUP])
  2901. dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
  2902. if (tb[IFLA_GSO_MAX_SIZE])
  2903. netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
  2904. if (tb[IFLA_GSO_MAX_SEGS])
  2905. netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
  2906. if (tb[IFLA_GRO_MAX_SIZE])
  2907. netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
  2908. if (tb[IFLA_GSO_IPV4_MAX_SIZE])
  2909. netif_set_gso_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]));
  2910. if (tb[IFLA_GRO_IPV4_MAX_SIZE])
  2911. netif_set_gro_ipv4_max_size(dev, nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]));
  2912. return dev;
  2913. }
  2914. EXPORT_SYMBOL(rtnl_create_link);
  2915. static int rtnl_group_changelink(const struct sk_buff *skb,
  2916. struct net *net, int group,
  2917. struct ifinfomsg *ifm,
  2918. struct netlink_ext_ack *extack,
  2919. struct nlattr **tb)
  2920. {
  2921. struct net_device *dev, *aux;
  2922. int err;
  2923. for_each_netdev_safe(net, dev, aux) {
  2924. if (dev->group == group) {
  2925. err = validate_linkmsg(dev, tb, extack);
  2926. if (err < 0)
  2927. return err;
  2928. err = do_setlink(skb, dev, ifm, extack, tb, 0);
  2929. if (err < 0)
  2930. return err;
  2931. }
  2932. }
  2933. return 0;
  2934. }
  2935. static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
  2936. const struct rtnl_link_ops *ops,
  2937. const struct nlmsghdr *nlh,
  2938. struct nlattr **tb, struct nlattr **data,
  2939. struct netlink_ext_ack *extack)
  2940. {
  2941. unsigned char name_assign_type = NET_NAME_USER;
  2942. struct net *net = sock_net(skb->sk);
  2943. u32 portid = NETLINK_CB(skb).portid;
  2944. struct net *dest_net, *link_net;
  2945. struct net_device *dev;
  2946. char ifname[IFNAMSIZ];
  2947. int err;
  2948. if (!ops->alloc && !ops->setup)
  2949. return -EOPNOTSUPP;
  2950. if (tb[IFLA_IFNAME]) {
  2951. nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
  2952. } else {
  2953. snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
  2954. name_assign_type = NET_NAME_ENUM;
  2955. }
  2956. dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
  2957. if (IS_ERR(dest_net))
  2958. return PTR_ERR(dest_net);
  2959. if (tb[IFLA_LINK_NETNSID]) {
  2960. int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
  2961. link_net = get_net_ns_by_id(dest_net, id);
  2962. if (!link_net) {
  2963. NL_SET_ERR_MSG(extack, "Unknown network namespace id");
  2964. err = -EINVAL;
  2965. goto out;
  2966. }
  2967. err = -EPERM;
  2968. if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
  2969. goto out;
  2970. } else {
  2971. link_net = NULL;
  2972. }
  2973. dev = rtnl_create_link(link_net ? : dest_net, ifname,
  2974. name_assign_type, ops, tb, extack);
  2975. if (IS_ERR(dev)) {
  2976. err = PTR_ERR(dev);
  2977. goto out;
  2978. }
  2979. dev->ifindex = ifm->ifi_index;
  2980. if (ops->newlink)
  2981. err = ops->newlink(link_net ? : net, dev, tb, data, extack);
  2982. else
  2983. err = register_netdevice(dev);
  2984. if (err < 0) {
  2985. free_netdev(dev);
  2986. goto out;
  2987. }
  2988. err = rtnl_configure_link(dev, ifm, portid, nlh);
  2989. if (err < 0)
  2990. goto out_unregister;
  2991. if (link_net) {
  2992. err = dev_change_net_namespace(dev, dest_net, ifname);
  2993. if (err < 0)
  2994. goto out_unregister;
  2995. }
  2996. if (tb[IFLA_MASTER]) {
  2997. err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
  2998. if (err)
  2999. goto out_unregister;
  3000. }
  3001. out:
  3002. if (link_net)
  3003. put_net(link_net);
  3004. put_net(dest_net);
  3005. return err;
  3006. out_unregister:
  3007. if (ops->newlink) {
  3008. LIST_HEAD(list_kill);
  3009. ops->dellink(dev, &list_kill);
  3010. unregister_netdevice_many(&list_kill);
  3011. } else {
  3012. unregister_netdevice(dev);
  3013. }
  3014. goto out;
  3015. }
  3016. struct rtnl_newlink_tbs {
  3017. struct nlattr *tb[IFLA_MAX + 1];
  3018. struct nlattr *attr[RTNL_MAX_TYPE + 1];
  3019. struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
  3020. };
  3021. static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
  3022. struct rtnl_newlink_tbs *tbs,
  3023. struct netlink_ext_ack *extack)
  3024. {
  3025. struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
  3026. struct nlattr ** const tb = tbs->tb;
  3027. const struct rtnl_link_ops *m_ops;
  3028. struct net_device *master_dev;
  3029. struct net *net = sock_net(skb->sk);
  3030. const struct rtnl_link_ops *ops;
  3031. struct nlattr **slave_data;
  3032. char kind[MODULE_NAME_LEN];
  3033. struct net_device *dev;
  3034. struct ifinfomsg *ifm;
  3035. struct nlattr **data;
  3036. bool link_specified;
  3037. int err;
  3038. #ifdef CONFIG_MODULES
  3039. replay:
  3040. #endif
  3041. err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
  3042. ifla_policy, extack);
  3043. if (err < 0)
  3044. return err;
  3045. err = rtnl_ensure_unique_netns(tb, extack, false);
  3046. if (err < 0)
  3047. return err;
  3048. ifm = nlmsg_data(nlh);
  3049. if (ifm->ifi_index > 0) {
  3050. link_specified = true;
  3051. dev = __dev_get_by_index(net, ifm->ifi_index);
  3052. } else if (ifm->ifi_index < 0) {
  3053. NL_SET_ERR_MSG(extack, "ifindex can't be negative");
  3054. return -EINVAL;
  3055. } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
  3056. link_specified = true;
  3057. dev = rtnl_dev_get(net, tb);
  3058. } else {
  3059. link_specified = false;
  3060. dev = NULL;
  3061. }
  3062. master_dev = NULL;
  3063. m_ops = NULL;
  3064. if (dev) {
  3065. master_dev = netdev_master_upper_dev_get(dev);
  3066. if (master_dev)
  3067. m_ops = master_dev->rtnl_link_ops;
  3068. }
  3069. if (tb[IFLA_LINKINFO]) {
  3070. err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX,
  3071. tb[IFLA_LINKINFO],
  3072. ifla_info_policy, NULL);
  3073. if (err < 0)
  3074. return err;
  3075. } else
  3076. memset(linkinfo, 0, sizeof(linkinfo));
  3077. if (linkinfo[IFLA_INFO_KIND]) {
  3078. nla_strscpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
  3079. ops = rtnl_link_ops_get(kind);
  3080. } else {
  3081. kind[0] = '\0';
  3082. ops = NULL;
  3083. }
  3084. data = NULL;
  3085. if (ops) {
  3086. if (ops->maxtype > RTNL_MAX_TYPE)
  3087. return -EINVAL;
  3088. if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
  3089. err = nla_parse_nested_deprecated(tbs->attr, ops->maxtype,
  3090. linkinfo[IFLA_INFO_DATA],
  3091. ops->policy, extack);
  3092. if (err < 0)
  3093. return err;
  3094. data = tbs->attr;
  3095. }
  3096. if (ops->validate) {
  3097. err = ops->validate(tb, data, extack);
  3098. if (err < 0)
  3099. return err;
  3100. }
  3101. }
  3102. slave_data = NULL;
  3103. if (m_ops) {
  3104. if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
  3105. return -EINVAL;
  3106. if (m_ops->slave_maxtype &&
  3107. linkinfo[IFLA_INFO_SLAVE_DATA]) {
  3108. err = nla_parse_nested_deprecated(tbs->slave_attr,
  3109. m_ops->slave_maxtype,
  3110. linkinfo[IFLA_INFO_SLAVE_DATA],
  3111. m_ops->slave_policy,
  3112. extack);
  3113. if (err < 0)
  3114. return err;
  3115. slave_data = tbs->slave_attr;
  3116. }
  3117. }
  3118. if (dev) {
  3119. int status = 0;
  3120. if (nlh->nlmsg_flags & NLM_F_EXCL)
  3121. return -EEXIST;
  3122. if (nlh->nlmsg_flags & NLM_F_REPLACE)
  3123. return -EOPNOTSUPP;
  3124. err = validate_linkmsg(dev, tb, extack);
  3125. if (err < 0)
  3126. return err;
  3127. if (linkinfo[IFLA_INFO_DATA]) {
  3128. if (!ops || ops != dev->rtnl_link_ops ||
  3129. !ops->changelink)
  3130. return -EOPNOTSUPP;
  3131. err = ops->changelink(dev, tb, data, extack);
  3132. if (err < 0)
  3133. return err;
  3134. status |= DO_SETLINK_NOTIFY;
  3135. }
  3136. if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
  3137. if (!m_ops || !m_ops->slave_changelink)
  3138. return -EOPNOTSUPP;
  3139. err = m_ops->slave_changelink(master_dev, dev, tb,
  3140. slave_data, extack);
  3141. if (err < 0)
  3142. return err;
  3143. status |= DO_SETLINK_NOTIFY;
  3144. }
  3145. return do_setlink(skb, dev, ifm, extack, tb, status);
  3146. }
  3147. if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
  3148. /* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
  3149. * or it's for a group
  3150. */
  3151. if (link_specified)
  3152. return -ENODEV;
  3153. if (tb[IFLA_GROUP])
  3154. return rtnl_group_changelink(skb, net,
  3155. nla_get_u32(tb[IFLA_GROUP]),
  3156. ifm, extack, tb);
  3157. return -ENODEV;
  3158. }
  3159. if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
  3160. return -EOPNOTSUPP;
  3161. if (!ops) {
  3162. #ifdef CONFIG_MODULES
  3163. if (kind[0]) {
  3164. __rtnl_unlock();
  3165. request_module("rtnl-link-%s", kind);
  3166. rtnl_lock();
  3167. ops = rtnl_link_ops_get(kind);
  3168. if (ops)
  3169. goto replay;
  3170. }
  3171. #endif
  3172. NL_SET_ERR_MSG(extack, "Unknown device type");
  3173. return -EOPNOTSUPP;
  3174. }
  3175. return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
  3176. }
  3177. static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
  3178. struct netlink_ext_ack *extack)
  3179. {
  3180. struct rtnl_newlink_tbs *tbs;
  3181. int ret;
  3182. tbs = kmalloc(sizeof(*tbs), GFP_KERNEL);
  3183. if (!tbs)
  3184. return -ENOMEM;
  3185. ret = __rtnl_newlink(skb, nlh, tbs, extack);
  3186. kfree(tbs);
  3187. return ret;
  3188. }
  3189. static int rtnl_valid_getlink_req(struct sk_buff *skb,
  3190. const struct nlmsghdr *nlh,
  3191. struct nlattr **tb,
  3192. struct netlink_ext_ack *extack)
  3193. {
  3194. struct ifinfomsg *ifm;
  3195. int i, err;
  3196. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
  3197. NL_SET_ERR_MSG(extack, "Invalid header for get link");
  3198. return -EINVAL;
  3199. }
  3200. if (!netlink_strict_get_check(skb))
  3201. return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFLA_MAX,
  3202. ifla_policy, extack);
  3203. ifm = nlmsg_data(nlh);
  3204. if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
  3205. ifm->ifi_change) {
  3206. NL_SET_ERR_MSG(extack, "Invalid values in header for get link request");
  3207. return -EINVAL;
  3208. }
  3209. err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFLA_MAX,
  3210. ifla_policy, extack);
  3211. if (err)
  3212. return err;
  3213. for (i = 0; i <= IFLA_MAX; i++) {
  3214. if (!tb[i])
  3215. continue;
  3216. switch (i) {
  3217. case IFLA_IFNAME:
  3218. case IFLA_ALT_IFNAME:
  3219. case IFLA_EXT_MASK:
  3220. case IFLA_TARGET_NETNSID:
  3221. break;
  3222. default:
  3223. NL_SET_ERR_MSG(extack, "Unsupported attribute in get link request");
  3224. return -EINVAL;
  3225. }
  3226. }
  3227. return 0;
  3228. }
  3229. static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
  3230. struct netlink_ext_ack *extack)
  3231. {
  3232. struct net *net = sock_net(skb->sk);
  3233. struct net *tgt_net = net;
  3234. struct ifinfomsg *ifm;
  3235. struct nlattr *tb[IFLA_MAX+1];
  3236. struct net_device *dev = NULL;
  3237. struct sk_buff *nskb;
  3238. int netnsid = -1;
  3239. int err;
  3240. u32 ext_filter_mask = 0;
  3241. err = rtnl_valid_getlink_req(skb, nlh, tb, extack);
  3242. if (err < 0)
  3243. return err;
  3244. err = rtnl_ensure_unique_netns(tb, extack, true);
  3245. if (err < 0)
  3246. return err;
  3247. if (tb[IFLA_TARGET_NETNSID]) {
  3248. netnsid = nla_get_s32(tb[IFLA_TARGET_NETNSID]);
  3249. tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
  3250. if (IS_ERR(tgt_net))
  3251. return PTR_ERR(tgt_net);
  3252. }
  3253. if (tb[IFLA_EXT_MASK])
  3254. ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
  3255. err = -EINVAL;
  3256. ifm = nlmsg_data(nlh);
  3257. if (ifm->ifi_index > 0)
  3258. dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
  3259. else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
  3260. dev = rtnl_dev_get(tgt_net, tb);
  3261. else
  3262. goto out;
  3263. err = -ENODEV;
  3264. if (dev == NULL)
  3265. goto out;
  3266. err = -ENOBUFS;
  3267. nskb = nlmsg_new_large(if_nlmsg_size(dev, ext_filter_mask));
  3268. if (nskb == NULL)
  3269. goto out;
  3270. /* Synchronize the carrier state so we don't report a state
  3271. * that we're not actually going to honour immediately; if
  3272. * the driver just did a carrier off->on transition, we can
  3273. * only TX if link watch work has run, but without this we'd
  3274. * already report carrier on, even if it doesn't work yet.
  3275. */
  3276. linkwatch_sync_dev(dev);
  3277. err = rtnl_fill_ifinfo(nskb, dev, net,
  3278. RTM_NEWLINK, NETLINK_CB(skb).portid,
  3279. nlh->nlmsg_seq, 0, 0, ext_filter_mask,
  3280. 0, NULL, 0, netnsid, GFP_KERNEL);
  3281. if (err < 0) {
  3282. /* -EMSGSIZE implies BUG in if_nlmsg_size */
  3283. WARN_ON(err == -EMSGSIZE);
  3284. kfree_skb(nskb);
  3285. } else
  3286. err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
  3287. out:
  3288. if (netnsid >= 0)
  3289. put_net(tgt_net);
  3290. return err;
  3291. }
  3292. static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
  3293. bool *changed, struct netlink_ext_ack *extack)
  3294. {
  3295. char *alt_ifname;
  3296. size_t size;
  3297. int err;
  3298. err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack);
  3299. if (err)
  3300. return err;
  3301. if (cmd == RTM_NEWLINKPROP) {
  3302. size = rtnl_prop_list_size(dev);
  3303. size += nla_total_size(ALTIFNAMSIZ);
  3304. if (size >= U16_MAX) {
  3305. NL_SET_ERR_MSG(extack,
  3306. "effective property list too long");
  3307. return -EINVAL;
  3308. }
  3309. }
  3310. alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT);
  3311. if (!alt_ifname)
  3312. return -ENOMEM;
  3313. if (cmd == RTM_NEWLINKPROP) {
  3314. err = netdev_name_node_alt_create(dev, alt_ifname);
  3315. if (!err)
  3316. alt_ifname = NULL;
  3317. } else if (cmd == RTM_DELLINKPROP) {
  3318. err = netdev_name_node_alt_destroy(dev, alt_ifname);
  3319. } else {
  3320. WARN_ON_ONCE(1);
  3321. err = -EINVAL;
  3322. }
  3323. kfree(alt_ifname);
  3324. if (!err)
  3325. *changed = true;
  3326. return err;
  3327. }
  3328. static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
  3329. struct netlink_ext_ack *extack)
  3330. {
  3331. struct net *net = sock_net(skb->sk);
  3332. struct nlattr *tb[IFLA_MAX + 1];
  3333. struct net_device *dev;
  3334. struct ifinfomsg *ifm;
  3335. bool changed = false;
  3336. struct nlattr *attr;
  3337. int err, rem;
  3338. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
  3339. if (err)
  3340. return err;
  3341. err = rtnl_ensure_unique_netns(tb, extack, true);
  3342. if (err)
  3343. return err;
  3344. ifm = nlmsg_data(nlh);
  3345. if (ifm->ifi_index > 0)
  3346. dev = __dev_get_by_index(net, ifm->ifi_index);
  3347. else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
  3348. dev = rtnl_dev_get(net, tb);
  3349. else
  3350. return -EINVAL;
  3351. if (!dev)
  3352. return -ENODEV;
  3353. if (!tb[IFLA_PROP_LIST])
  3354. return 0;
  3355. nla_for_each_nested(attr, tb[IFLA_PROP_LIST], rem) {
  3356. switch (nla_type(attr)) {
  3357. case IFLA_ALT_IFNAME:
  3358. err = rtnl_alt_ifname(cmd, dev, attr, &changed, extack);
  3359. if (err)
  3360. return err;
  3361. break;
  3362. }
  3363. }
  3364. if (changed)
  3365. netdev_state_change(dev);
  3366. return 0;
  3367. }
  3368. static int rtnl_newlinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
  3369. struct netlink_ext_ack *extack)
  3370. {
  3371. return rtnl_linkprop(RTM_NEWLINKPROP, skb, nlh, extack);
  3372. }
  3373. static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh,
  3374. struct netlink_ext_ack *extack)
  3375. {
  3376. return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack);
  3377. }
  3378. static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb,
  3379. struct nlmsghdr *nlh)
  3380. {
  3381. struct net *net = sock_net(skb->sk);
  3382. size_t min_ifinfo_dump_size = 0;
  3383. u32 ext_filter_mask = 0;
  3384. struct net_device *dev;
  3385. struct nlattr *nla;
  3386. int hdrlen, rem;
  3387. /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
  3388. hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
  3389. sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
  3390. if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
  3391. return NLMSG_GOODSIZE;
  3392. nla_for_each_attr_type(nla, IFLA_EXT_MASK,
  3393. nlmsg_attrdata(nlh, hdrlen),
  3394. nlmsg_attrlen(nlh, hdrlen), rem) {
  3395. if (nla_len(nla) == sizeof(u32))
  3396. ext_filter_mask = nla_get_u32(nla);
  3397. }
  3398. if (!ext_filter_mask)
  3399. return NLMSG_GOODSIZE;
  3400. /*
  3401. * traverse the list of net devices and compute the minimum
  3402. * buffer size based upon the filter mask.
  3403. */
  3404. rcu_read_lock();
  3405. for_each_netdev_rcu(net, dev) {
  3406. min_ifinfo_dump_size = max(min_ifinfo_dump_size,
  3407. if_nlmsg_size(dev, ext_filter_mask));
  3408. }
  3409. rcu_read_unlock();
  3410. return nlmsg_total_size(min_ifinfo_dump_size);
  3411. }
  3412. static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
  3413. {
  3414. int idx;
  3415. int s_idx = cb->family;
  3416. int type = cb->nlh->nlmsg_type - RTM_BASE;
  3417. int ret = 0;
  3418. if (s_idx == 0)
  3419. s_idx = 1;
  3420. for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
  3421. struct rtnl_link __rcu **tab;
  3422. struct rtnl_link *link;
  3423. rtnl_dumpit_func dumpit;
  3424. if (idx < s_idx || idx == PF_PACKET)
  3425. continue;
  3426. if (type < 0 || type >= RTM_NR_MSGTYPES)
  3427. continue;
  3428. tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
  3429. if (!tab)
  3430. continue;
  3431. link = rcu_dereference_rtnl(tab[type]);
  3432. if (!link)
  3433. continue;
  3434. dumpit = link->dumpit;
  3435. if (!dumpit)
  3436. continue;
  3437. if (idx > s_idx) {
  3438. memset(&cb->args[0], 0, sizeof(cb->args));
  3439. cb->prev_seq = 0;
  3440. cb->seq = 0;
  3441. }
  3442. ret = dumpit(skb, cb);
  3443. if (ret)
  3444. break;
  3445. }
  3446. cb->family = idx;
  3447. return skb->len ? : ret;
  3448. }
  3449. struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
  3450. unsigned int change,
  3451. u32 event, gfp_t flags, int *new_nsid,
  3452. int new_ifindex, u32 portid,
  3453. const struct nlmsghdr *nlh)
  3454. {
  3455. struct net *net = dev_net(dev);
  3456. struct sk_buff *skb;
  3457. int err = -ENOBUFS;
  3458. u32 seq = 0;
  3459. skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
  3460. if (skb == NULL)
  3461. goto errout;
  3462. if (nlmsg_report(nlh))
  3463. seq = nlmsg_seq(nlh);
  3464. else
  3465. portid = 0;
  3466. err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
  3467. type, portid, seq, change, 0, 0, event,
  3468. new_nsid, new_ifindex, -1, flags);
  3469. if (err < 0) {
  3470. /* -EMSGSIZE implies BUG in if_nlmsg_size() */
  3471. WARN_ON(err == -EMSGSIZE);
  3472. kfree_skb(skb);
  3473. goto errout;
  3474. }
  3475. return skb;
  3476. errout:
  3477. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  3478. return NULL;
  3479. }
  3480. void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
  3481. u32 portid, const struct nlmsghdr *nlh)
  3482. {
  3483. struct net *net = dev_net(dev);
  3484. rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
  3485. }
  3486. static void rtmsg_ifinfo_event(int type, struct net_device *dev,
  3487. unsigned int change, u32 event,
  3488. gfp_t flags, int *new_nsid, int new_ifindex,
  3489. u32 portid, const struct nlmsghdr *nlh)
  3490. {
  3491. struct sk_buff *skb;
  3492. if (dev->reg_state != NETREG_REGISTERED)
  3493. return;
  3494. skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
  3495. new_ifindex, portid, nlh);
  3496. if (skb)
  3497. rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
  3498. }
  3499. void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
  3500. gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
  3501. {
  3502. rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
  3503. NULL, 0, portid, nlh);
  3504. }
  3505. void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
  3506. gfp_t flags, int *new_nsid, int new_ifindex)
  3507. {
  3508. rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
  3509. new_nsid, new_ifindex, 0, NULL);
  3510. }
  3511. static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
  3512. struct net_device *dev,
  3513. u8 *addr, u16 vid, u32 pid, u32 seq,
  3514. int type, unsigned int flags,
  3515. int nlflags, u16 ndm_state)
  3516. {
  3517. struct nlmsghdr *nlh;
  3518. struct ndmsg *ndm;
  3519. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
  3520. if (!nlh)
  3521. return -EMSGSIZE;
  3522. ndm = nlmsg_data(nlh);
  3523. ndm->ndm_family = AF_BRIDGE;
  3524. ndm->ndm_pad1 = 0;
  3525. ndm->ndm_pad2 = 0;
  3526. ndm->ndm_flags = flags;
  3527. ndm->ndm_type = 0;
  3528. ndm->ndm_ifindex = dev->ifindex;
  3529. ndm->ndm_state = ndm_state;
  3530. if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr))
  3531. goto nla_put_failure;
  3532. if (vid)
  3533. if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid))
  3534. goto nla_put_failure;
  3535. nlmsg_end(skb, nlh);
  3536. return 0;
  3537. nla_put_failure:
  3538. nlmsg_cancel(skb, nlh);
  3539. return -EMSGSIZE;
  3540. }
  3541. static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev)
  3542. {
  3543. return NLMSG_ALIGN(sizeof(struct ndmsg)) +
  3544. nla_total_size(dev->addr_len) + /* NDA_LLADDR */
  3545. nla_total_size(sizeof(u16)) + /* NDA_VLAN */
  3546. 0;
  3547. }
  3548. static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
  3549. u16 ndm_state)
  3550. {
  3551. struct net *net = dev_net(dev);
  3552. struct sk_buff *skb;
  3553. int err = -ENOBUFS;
  3554. skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC);
  3555. if (!skb)
  3556. goto errout;
  3557. err = nlmsg_populate_fdb_fill(skb, dev, addr, vid,
  3558. 0, 0, type, NTF_SELF, 0, ndm_state);
  3559. if (err < 0) {
  3560. kfree_skb(skb);
  3561. goto errout;
  3562. }
  3563. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  3564. return;
  3565. errout:
  3566. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  3567. }
  3568. /*
  3569. * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
  3570. */
  3571. int ndo_dflt_fdb_add(struct ndmsg *ndm,
  3572. struct nlattr *tb[],
  3573. struct net_device *dev,
  3574. const unsigned char *addr, u16 vid,
  3575. u16 flags)
  3576. {
  3577. int err = -EINVAL;
  3578. /* If aging addresses are supported device will need to
  3579. * implement its own handler for this.
  3580. */
  3581. if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
  3582. netdev_info(dev, "default FDB implementation only supports local addresses\n");
  3583. return err;
  3584. }
  3585. if (tb[NDA_FLAGS_EXT]) {
  3586. netdev_info(dev, "invalid flags given to default FDB implementation\n");
  3587. return err;
  3588. }
  3589. if (vid) {
  3590. netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
  3591. return err;
  3592. }
  3593. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
  3594. err = dev_uc_add_excl(dev, addr);
  3595. else if (is_multicast_ether_addr(addr))
  3596. err = dev_mc_add_excl(dev, addr);
  3597. /* Only return duplicate errors if NLM_F_EXCL is set */
  3598. if (err == -EEXIST && !(flags & NLM_F_EXCL))
  3599. err = 0;
  3600. return err;
  3601. }
  3602. EXPORT_SYMBOL(ndo_dflt_fdb_add);
  3603. static int fdb_vid_parse(struct nlattr *vlan_attr, u16 *p_vid,
  3604. struct netlink_ext_ack *extack)
  3605. {
  3606. u16 vid = 0;
  3607. if (vlan_attr) {
  3608. if (nla_len(vlan_attr) != sizeof(u16)) {
  3609. NL_SET_ERR_MSG(extack, "invalid vlan attribute size");
  3610. return -EINVAL;
  3611. }
  3612. vid = nla_get_u16(vlan_attr);
  3613. if (!vid || vid >= VLAN_VID_MASK) {
  3614. NL_SET_ERR_MSG(extack, "invalid vlan id");
  3615. return -EINVAL;
  3616. }
  3617. }
  3618. *p_vid = vid;
  3619. return 0;
  3620. }
  3621. static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
  3622. struct netlink_ext_ack *extack)
  3623. {
  3624. struct net *net = sock_net(skb->sk);
  3625. struct ndmsg *ndm;
  3626. struct nlattr *tb[NDA_MAX+1];
  3627. struct net_device *dev;
  3628. u8 *addr;
  3629. u16 vid;
  3630. int err;
  3631. err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, NULL,
  3632. extack);
  3633. if (err < 0)
  3634. return err;
  3635. ndm = nlmsg_data(nlh);
  3636. if (ndm->ndm_ifindex == 0) {
  3637. NL_SET_ERR_MSG(extack, "invalid ifindex");
  3638. return -EINVAL;
  3639. }
  3640. dev = __dev_get_by_index(net, ndm->ndm_ifindex);
  3641. if (dev == NULL) {
  3642. NL_SET_ERR_MSG(extack, "unknown ifindex");
  3643. return -ENODEV;
  3644. }
  3645. if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
  3646. NL_SET_ERR_MSG(extack, "invalid address");
  3647. return -EINVAL;
  3648. }
  3649. if (dev->type != ARPHRD_ETHER) {
  3650. NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
  3651. return -EINVAL;
  3652. }
  3653. addr = nla_data(tb[NDA_LLADDR]);
  3654. err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
  3655. if (err)
  3656. return err;
  3657. err = -EOPNOTSUPP;
  3658. /* Support fdb on master device the net/bridge default case */
  3659. if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
  3660. netif_is_bridge_port(dev)) {
  3661. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  3662. const struct net_device_ops *ops = br_dev->netdev_ops;
  3663. err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid,
  3664. nlh->nlmsg_flags, extack);
  3665. if (err)
  3666. goto out;
  3667. else
  3668. ndm->ndm_flags &= ~NTF_MASTER;
  3669. }
  3670. /* Embedded bridge, macvlan, and any other device support */
  3671. if ((ndm->ndm_flags & NTF_SELF)) {
  3672. if (dev->netdev_ops->ndo_fdb_add)
  3673. err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
  3674. vid,
  3675. nlh->nlmsg_flags,
  3676. extack);
  3677. else
  3678. err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid,
  3679. nlh->nlmsg_flags);
  3680. if (!err) {
  3681. rtnl_fdb_notify(dev, addr, vid, RTM_NEWNEIGH,
  3682. ndm->ndm_state);
  3683. ndm->ndm_flags &= ~NTF_SELF;
  3684. }
  3685. }
  3686. out:
  3687. return err;
  3688. }
  3689. /*
  3690. * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
  3691. */
  3692. int ndo_dflt_fdb_del(struct ndmsg *ndm,
  3693. struct nlattr *tb[],
  3694. struct net_device *dev,
  3695. const unsigned char *addr, u16 vid)
  3696. {
  3697. int err = -EINVAL;
  3698. /* If aging addresses are supported device will need to
  3699. * implement its own handler for this.
  3700. */
  3701. if (!(ndm->ndm_state & NUD_PERMANENT)) {
  3702. netdev_info(dev, "default FDB implementation only supports local addresses\n");
  3703. return err;
  3704. }
  3705. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
  3706. err = dev_uc_del(dev, addr);
  3707. else if (is_multicast_ether_addr(addr))
  3708. err = dev_mc_del(dev, addr);
  3709. return err;
  3710. }
  3711. EXPORT_SYMBOL(ndo_dflt_fdb_del);
  3712. static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
  3713. struct netlink_ext_ack *extack)
  3714. {
  3715. bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
  3716. struct net *net = sock_net(skb->sk);
  3717. const struct net_device_ops *ops;
  3718. struct ndmsg *ndm;
  3719. struct nlattr *tb[NDA_MAX+1];
  3720. struct net_device *dev;
  3721. __u8 *addr = NULL;
  3722. int err;
  3723. u16 vid;
  3724. if (!del_bulk) {
  3725. err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
  3726. NULL, extack);
  3727. } else {
  3728. /* For bulk delete, the drivers will parse the message with
  3729. * policy.
  3730. */
  3731. err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
  3732. }
  3733. if (err < 0)
  3734. return err;
  3735. ndm = nlmsg_data(nlh);
  3736. if (ndm->ndm_ifindex == 0) {
  3737. NL_SET_ERR_MSG(extack, "invalid ifindex");
  3738. return -EINVAL;
  3739. }
  3740. dev = __dev_get_by_index(net, ndm->ndm_ifindex);
  3741. if (dev == NULL) {
  3742. NL_SET_ERR_MSG(extack, "unknown ifindex");
  3743. return -ENODEV;
  3744. }
  3745. if (!del_bulk) {
  3746. if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
  3747. NL_SET_ERR_MSG(extack, "invalid address");
  3748. return -EINVAL;
  3749. }
  3750. addr = nla_data(tb[NDA_LLADDR]);
  3751. err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
  3752. if (err)
  3753. return err;
  3754. }
  3755. if (dev->type != ARPHRD_ETHER) {
  3756. NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
  3757. return -EINVAL;
  3758. }
  3759. err = -EOPNOTSUPP;
  3760. /* Support fdb on master device the net/bridge default case */
  3761. if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
  3762. netif_is_bridge_port(dev)) {
  3763. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  3764. ops = br_dev->netdev_ops;
  3765. if (!del_bulk) {
  3766. if (ops->ndo_fdb_del)
  3767. err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
  3768. } else {
  3769. if (ops->ndo_fdb_del_bulk)
  3770. err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
  3771. }
  3772. if (err)
  3773. goto out;
  3774. else
  3775. ndm->ndm_flags &= ~NTF_MASTER;
  3776. }
  3777. /* Embedded bridge, macvlan, and any other device support */
  3778. if (ndm->ndm_flags & NTF_SELF) {
  3779. ops = dev->netdev_ops;
  3780. if (!del_bulk) {
  3781. if (ops->ndo_fdb_del)
  3782. err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid, extack);
  3783. else
  3784. err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
  3785. } else {
  3786. /* in case err was cleared by NTF_MASTER call */
  3787. err = -EOPNOTSUPP;
  3788. if (ops->ndo_fdb_del_bulk)
  3789. err = ops->ndo_fdb_del_bulk(nlh, dev, extack);
  3790. }
  3791. if (!err) {
  3792. if (!del_bulk)
  3793. rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH,
  3794. ndm->ndm_state);
  3795. ndm->ndm_flags &= ~NTF_SELF;
  3796. }
  3797. }
  3798. out:
  3799. return err;
  3800. }
  3801. static int nlmsg_populate_fdb(struct sk_buff *skb,
  3802. struct netlink_callback *cb,
  3803. struct net_device *dev,
  3804. int *idx,
  3805. struct netdev_hw_addr_list *list)
  3806. {
  3807. struct netdev_hw_addr *ha;
  3808. int err;
  3809. u32 portid, seq;
  3810. portid = NETLINK_CB(cb->skb).portid;
  3811. seq = cb->nlh->nlmsg_seq;
  3812. list_for_each_entry(ha, &list->list, list) {
  3813. if (*idx < cb->args[2])
  3814. goto skip;
  3815. err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 0,
  3816. portid, seq,
  3817. RTM_NEWNEIGH, NTF_SELF,
  3818. NLM_F_MULTI, NUD_PERMANENT);
  3819. if (err < 0)
  3820. return err;
  3821. skip:
  3822. *idx += 1;
  3823. }
  3824. return 0;
  3825. }
  3826. /**
  3827. * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
  3828. * @skb: socket buffer to store message in
  3829. * @cb: netlink callback
  3830. * @dev: netdevice
  3831. * @filter_dev: ignored
  3832. * @idx: the number of FDB table entries dumped is added to *@idx
  3833. *
  3834. * Default netdevice operation to dump the existing unicast address list.
  3835. * Returns number of addresses from list put in skb.
  3836. */
  3837. int ndo_dflt_fdb_dump(struct sk_buff *skb,
  3838. struct netlink_callback *cb,
  3839. struct net_device *dev,
  3840. struct net_device *filter_dev,
  3841. int *idx)
  3842. {
  3843. int err;
  3844. if (dev->type != ARPHRD_ETHER)
  3845. return -EINVAL;
  3846. netif_addr_lock_bh(dev);
  3847. err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
  3848. if (err)
  3849. goto out;
  3850. err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->mc);
  3851. out:
  3852. netif_addr_unlock_bh(dev);
  3853. return err;
  3854. }
  3855. EXPORT_SYMBOL(ndo_dflt_fdb_dump);
  3856. static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
  3857. int *br_idx, int *brport_idx,
  3858. struct netlink_ext_ack *extack)
  3859. {
  3860. struct nlattr *tb[NDA_MAX + 1];
  3861. struct ndmsg *ndm;
  3862. int err, i;
  3863. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
  3864. NL_SET_ERR_MSG(extack, "Invalid header for fdb dump request");
  3865. return -EINVAL;
  3866. }
  3867. ndm = nlmsg_data(nlh);
  3868. if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
  3869. ndm->ndm_flags || ndm->ndm_type) {
  3870. NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
  3871. return -EINVAL;
  3872. }
  3873. err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
  3874. NDA_MAX, NULL, extack);
  3875. if (err < 0)
  3876. return err;
  3877. *brport_idx = ndm->ndm_ifindex;
  3878. for (i = 0; i <= NDA_MAX; ++i) {
  3879. if (!tb[i])
  3880. continue;
  3881. switch (i) {
  3882. case NDA_IFINDEX:
  3883. if (nla_len(tb[i]) != sizeof(u32)) {
  3884. NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in fdb dump request");
  3885. return -EINVAL;
  3886. }
  3887. *brport_idx = nla_get_u32(tb[NDA_IFINDEX]);
  3888. break;
  3889. case NDA_MASTER:
  3890. if (nla_len(tb[i]) != sizeof(u32)) {
  3891. NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in fdb dump request");
  3892. return -EINVAL;
  3893. }
  3894. *br_idx = nla_get_u32(tb[NDA_MASTER]);
  3895. break;
  3896. default:
  3897. NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb dump request");
  3898. return -EINVAL;
  3899. }
  3900. }
  3901. return 0;
  3902. }
  3903. static int valid_fdb_dump_legacy(const struct nlmsghdr *nlh,
  3904. int *br_idx, int *brport_idx,
  3905. struct netlink_ext_ack *extack)
  3906. {
  3907. struct nlattr *tb[IFLA_MAX+1];
  3908. int err;
  3909. /* A hack to preserve kernel<->userspace interface.
  3910. * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
  3911. * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
  3912. * So, check for ndmsg with an optional u32 attribute (not used here).
  3913. * Fortunately these sizes don't conflict with the size of ifinfomsg
  3914. * with an optional attribute.
  3915. */
  3916. if (nlmsg_len(nlh) != sizeof(struct ndmsg) &&
  3917. (nlmsg_len(nlh) != sizeof(struct ndmsg) +
  3918. nla_attr_size(sizeof(u32)))) {
  3919. struct ifinfomsg *ifm;
  3920. err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
  3921. tb, IFLA_MAX, ifla_policy,
  3922. extack);
  3923. if (err < 0) {
  3924. return -EINVAL;
  3925. } else if (err == 0) {
  3926. if (tb[IFLA_MASTER])
  3927. *br_idx = nla_get_u32(tb[IFLA_MASTER]);
  3928. }
  3929. ifm = nlmsg_data(nlh);
  3930. *brport_idx = ifm->ifi_index;
  3931. }
  3932. return 0;
  3933. }
  3934. static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
  3935. {
  3936. struct net_device *dev;
  3937. struct net_device *br_dev = NULL;
  3938. const struct net_device_ops *ops = NULL;
  3939. const struct net_device_ops *cops = NULL;
  3940. struct net *net = sock_net(skb->sk);
  3941. struct hlist_head *head;
  3942. int brport_idx = 0;
  3943. int br_idx = 0;
  3944. int h, s_h;
  3945. int idx = 0, s_idx;
  3946. int err = 0;
  3947. int fidx = 0;
  3948. if (cb->strict_check)
  3949. err = valid_fdb_dump_strict(cb->nlh, &br_idx, &brport_idx,
  3950. cb->extack);
  3951. else
  3952. err = valid_fdb_dump_legacy(cb->nlh, &br_idx, &brport_idx,
  3953. cb->extack);
  3954. if (err < 0)
  3955. return err;
  3956. if (br_idx) {
  3957. br_dev = __dev_get_by_index(net, br_idx);
  3958. if (!br_dev)
  3959. return -ENODEV;
  3960. ops = br_dev->netdev_ops;
  3961. }
  3962. s_h = cb->args[0];
  3963. s_idx = cb->args[1];
  3964. for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
  3965. idx = 0;
  3966. head = &net->dev_index_head[h];
  3967. hlist_for_each_entry(dev, head, index_hlist) {
  3968. if (brport_idx && (dev->ifindex != brport_idx))
  3969. continue;
  3970. if (!br_idx) { /* user did not specify a specific bridge */
  3971. if (netif_is_bridge_port(dev)) {
  3972. br_dev = netdev_master_upper_dev_get(dev);
  3973. cops = br_dev->netdev_ops;
  3974. }
  3975. } else {
  3976. if (dev != br_dev &&
  3977. !netif_is_bridge_port(dev))
  3978. continue;
  3979. if (br_dev != netdev_master_upper_dev_get(dev) &&
  3980. !netif_is_bridge_master(dev))
  3981. continue;
  3982. cops = ops;
  3983. }
  3984. if (idx < s_idx)
  3985. goto cont;
  3986. if (netif_is_bridge_port(dev)) {
  3987. if (cops && cops->ndo_fdb_dump) {
  3988. err = cops->ndo_fdb_dump(skb, cb,
  3989. br_dev, dev,
  3990. &fidx);
  3991. if (err == -EMSGSIZE)
  3992. goto out;
  3993. }
  3994. }
  3995. if (dev->netdev_ops->ndo_fdb_dump)
  3996. err = dev->netdev_ops->ndo_fdb_dump(skb, cb,
  3997. dev, NULL,
  3998. &fidx);
  3999. else
  4000. err = ndo_dflt_fdb_dump(skb, cb, dev, NULL,
  4001. &fidx);
  4002. if (err == -EMSGSIZE)
  4003. goto out;
  4004. cops = NULL;
  4005. /* reset fdb offset to 0 for rest of the interfaces */
  4006. cb->args[2] = 0;
  4007. fidx = 0;
  4008. cont:
  4009. idx++;
  4010. }
  4011. }
  4012. out:
  4013. cb->args[0] = h;
  4014. cb->args[1] = idx;
  4015. cb->args[2] = fidx;
  4016. return skb->len;
  4017. }
  4018. static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
  4019. struct nlattr **tb, u8 *ndm_flags,
  4020. int *br_idx, int *brport_idx, u8 **addr,
  4021. u16 *vid, struct netlink_ext_ack *extack)
  4022. {
  4023. struct ndmsg *ndm;
  4024. int err, i;
  4025. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
  4026. NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
  4027. return -EINVAL;
  4028. }
  4029. ndm = nlmsg_data(nlh);
  4030. if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
  4031. ndm->ndm_type) {
  4032. NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
  4033. return -EINVAL;
  4034. }
  4035. if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
  4036. NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
  4037. return -EINVAL;
  4038. }
  4039. err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct ndmsg), tb,
  4040. NDA_MAX, nda_policy, extack);
  4041. if (err < 0)
  4042. return err;
  4043. *ndm_flags = ndm->ndm_flags;
  4044. *brport_idx = ndm->ndm_ifindex;
  4045. for (i = 0; i <= NDA_MAX; ++i) {
  4046. if (!tb[i])
  4047. continue;
  4048. switch (i) {
  4049. case NDA_MASTER:
  4050. *br_idx = nla_get_u32(tb[i]);
  4051. break;
  4052. case NDA_LLADDR:
  4053. if (nla_len(tb[i]) != ETH_ALEN) {
  4054. NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
  4055. return -EINVAL;
  4056. }
  4057. *addr = nla_data(tb[i]);
  4058. break;
  4059. case NDA_VLAN:
  4060. err = fdb_vid_parse(tb[i], vid, extack);
  4061. if (err)
  4062. return err;
  4063. break;
  4064. case NDA_VNI:
  4065. break;
  4066. default:
  4067. NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
  4068. return -EINVAL;
  4069. }
  4070. }
  4071. return 0;
  4072. }
  4073. static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
  4074. struct netlink_ext_ack *extack)
  4075. {
  4076. struct net_device *dev = NULL, *br_dev = NULL;
  4077. const struct net_device_ops *ops = NULL;
  4078. struct net *net = sock_net(in_skb->sk);
  4079. struct nlattr *tb[NDA_MAX + 1];
  4080. struct sk_buff *skb;
  4081. int brport_idx = 0;
  4082. u8 ndm_flags = 0;
  4083. int br_idx = 0;
  4084. u8 *addr = NULL;
  4085. u16 vid = 0;
  4086. int err;
  4087. err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
  4088. &brport_idx, &addr, &vid, extack);
  4089. if (err < 0)
  4090. return err;
  4091. if (!addr) {
  4092. NL_SET_ERR_MSG(extack, "Missing lookup address for fdb get request");
  4093. return -EINVAL;
  4094. }
  4095. if (brport_idx) {
  4096. dev = __dev_get_by_index(net, brport_idx);
  4097. if (!dev) {
  4098. NL_SET_ERR_MSG(extack, "Unknown device ifindex");
  4099. return -ENODEV;
  4100. }
  4101. }
  4102. if (br_idx) {
  4103. if (dev) {
  4104. NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
  4105. return -EINVAL;
  4106. }
  4107. br_dev = __dev_get_by_index(net, br_idx);
  4108. if (!br_dev) {
  4109. NL_SET_ERR_MSG(extack, "Invalid master ifindex");
  4110. return -EINVAL;
  4111. }
  4112. ops = br_dev->netdev_ops;
  4113. }
  4114. if (dev) {
  4115. if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
  4116. if (!netif_is_bridge_port(dev)) {
  4117. NL_SET_ERR_MSG(extack, "Device is not a bridge port");
  4118. return -EINVAL;
  4119. }
  4120. br_dev = netdev_master_upper_dev_get(dev);
  4121. if (!br_dev) {
  4122. NL_SET_ERR_MSG(extack, "Master of device not found");
  4123. return -EINVAL;
  4124. }
  4125. ops = br_dev->netdev_ops;
  4126. } else {
  4127. if (!(ndm_flags & NTF_SELF)) {
  4128. NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
  4129. return -EINVAL;
  4130. }
  4131. ops = dev->netdev_ops;
  4132. }
  4133. }
  4134. if (!br_dev && !dev) {
  4135. NL_SET_ERR_MSG(extack, "No device specified");
  4136. return -ENODEV;
  4137. }
  4138. if (!ops || !ops->ndo_fdb_get) {
  4139. NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
  4140. return -EOPNOTSUPP;
  4141. }
  4142. skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  4143. if (!skb)
  4144. return -ENOBUFS;
  4145. if (br_dev)
  4146. dev = br_dev;
  4147. err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
  4148. NETLINK_CB(in_skb).portid,
  4149. nlh->nlmsg_seq, extack);
  4150. if (err)
  4151. goto out;
  4152. return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
  4153. out:
  4154. kfree_skb(skb);
  4155. return err;
  4156. }
  4157. static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
  4158. unsigned int attrnum, unsigned int flag)
  4159. {
  4160. if (mask & flag)
  4161. return nla_put_u8(skb, attrnum, !!(flags & flag));
  4162. return 0;
  4163. }
  4164. int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  4165. struct net_device *dev, u16 mode,
  4166. u32 flags, u32 mask, int nlflags,
  4167. u32 filter_mask,
  4168. int (*vlan_fill)(struct sk_buff *skb,
  4169. struct net_device *dev,
  4170. u32 filter_mask))
  4171. {
  4172. struct nlmsghdr *nlh;
  4173. struct ifinfomsg *ifm;
  4174. struct nlattr *br_afspec;
  4175. struct nlattr *protinfo;
  4176. u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
  4177. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  4178. int err = 0;
  4179. nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
  4180. if (nlh == NULL)
  4181. return -EMSGSIZE;
  4182. ifm = nlmsg_data(nlh);
  4183. ifm->ifi_family = AF_BRIDGE;
  4184. ifm->__ifi_pad = 0;
  4185. ifm->ifi_type = dev->type;
  4186. ifm->ifi_index = dev->ifindex;
  4187. ifm->ifi_flags = dev_get_flags(dev);
  4188. ifm->ifi_change = 0;
  4189. if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
  4190. nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
  4191. nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
  4192. (br_dev &&
  4193. nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
  4194. (dev->addr_len &&
  4195. nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
  4196. (dev->ifindex != dev_get_iflink(dev) &&
  4197. nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
  4198. goto nla_put_failure;
  4199. br_afspec = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
  4200. if (!br_afspec)
  4201. goto nla_put_failure;
  4202. if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF)) {
  4203. nla_nest_cancel(skb, br_afspec);
  4204. goto nla_put_failure;
  4205. }
  4206. if (mode != BRIDGE_MODE_UNDEF) {
  4207. if (nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
  4208. nla_nest_cancel(skb, br_afspec);
  4209. goto nla_put_failure;
  4210. }
  4211. }
  4212. if (vlan_fill) {
  4213. err = vlan_fill(skb, dev, filter_mask);
  4214. if (err) {
  4215. nla_nest_cancel(skb, br_afspec);
  4216. goto nla_put_failure;
  4217. }
  4218. }
  4219. nla_nest_end(skb, br_afspec);
  4220. protinfo = nla_nest_start(skb, IFLA_PROTINFO);
  4221. if (!protinfo)
  4222. goto nla_put_failure;
  4223. if (brport_nla_put_flag(skb, flags, mask,
  4224. IFLA_BRPORT_MODE, BR_HAIRPIN_MODE) ||
  4225. brport_nla_put_flag(skb, flags, mask,
  4226. IFLA_BRPORT_GUARD, BR_BPDU_GUARD) ||
  4227. brport_nla_put_flag(skb, flags, mask,
  4228. IFLA_BRPORT_FAST_LEAVE,
  4229. BR_MULTICAST_FAST_LEAVE) ||
  4230. brport_nla_put_flag(skb, flags, mask,
  4231. IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK) ||
  4232. brport_nla_put_flag(skb, flags, mask,
  4233. IFLA_BRPORT_LEARNING, BR_LEARNING) ||
  4234. brport_nla_put_flag(skb, flags, mask,
  4235. IFLA_BRPORT_LEARNING_SYNC, BR_LEARNING_SYNC) ||
  4236. brport_nla_put_flag(skb, flags, mask,
  4237. IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD) ||
  4238. brport_nla_put_flag(skb, flags, mask,
  4239. IFLA_BRPORT_PROXYARP, BR_PROXYARP) ||
  4240. brport_nla_put_flag(skb, flags, mask,
  4241. IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD) ||
  4242. brport_nla_put_flag(skb, flags, mask,
  4243. IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD)) {
  4244. nla_nest_cancel(skb, protinfo);
  4245. goto nla_put_failure;
  4246. }
  4247. nla_nest_end(skb, protinfo);
  4248. nlmsg_end(skb, nlh);
  4249. return 0;
  4250. nla_put_failure:
  4251. nlmsg_cancel(skb, nlh);
  4252. return err ? err : -EMSGSIZE;
  4253. }
  4254. EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink);
  4255. static int valid_bridge_getlink_req(const struct nlmsghdr *nlh,
  4256. bool strict_check, u32 *filter_mask,
  4257. struct netlink_ext_ack *extack)
  4258. {
  4259. struct nlattr *tb[IFLA_MAX+1];
  4260. int err, i;
  4261. if (strict_check) {
  4262. struct ifinfomsg *ifm;
  4263. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
  4264. NL_SET_ERR_MSG(extack, "Invalid header for bridge link dump");
  4265. return -EINVAL;
  4266. }
  4267. ifm = nlmsg_data(nlh);
  4268. if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
  4269. ifm->ifi_change || ifm->ifi_index) {
  4270. NL_SET_ERR_MSG(extack, "Invalid values in header for bridge link dump request");
  4271. return -EINVAL;
  4272. }
  4273. err = nlmsg_parse_deprecated_strict(nlh,
  4274. sizeof(struct ifinfomsg),
  4275. tb, IFLA_MAX, ifla_policy,
  4276. extack);
  4277. } else {
  4278. err = nlmsg_parse_deprecated(nlh, sizeof(struct ifinfomsg),
  4279. tb, IFLA_MAX, ifla_policy,
  4280. extack);
  4281. }
  4282. if (err < 0)
  4283. return err;
  4284. /* new attributes should only be added with strict checking */
  4285. for (i = 0; i <= IFLA_MAX; ++i) {
  4286. if (!tb[i])
  4287. continue;
  4288. switch (i) {
  4289. case IFLA_EXT_MASK:
  4290. *filter_mask = nla_get_u32(tb[i]);
  4291. break;
  4292. default:
  4293. if (strict_check) {
  4294. NL_SET_ERR_MSG(extack, "Unsupported attribute in bridge link dump request");
  4295. return -EINVAL;
  4296. }
  4297. }
  4298. }
  4299. return 0;
  4300. }
  4301. static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
  4302. {
  4303. const struct nlmsghdr *nlh = cb->nlh;
  4304. struct net *net = sock_net(skb->sk);
  4305. struct net_device *dev;
  4306. int idx = 0;
  4307. u32 portid = NETLINK_CB(cb->skb).portid;
  4308. u32 seq = nlh->nlmsg_seq;
  4309. u32 filter_mask = 0;
  4310. int err;
  4311. err = valid_bridge_getlink_req(nlh, cb->strict_check, &filter_mask,
  4312. cb->extack);
  4313. if (err < 0 && cb->strict_check)
  4314. return err;
  4315. rcu_read_lock();
  4316. for_each_netdev_rcu(net, dev) {
  4317. const struct net_device_ops *ops = dev->netdev_ops;
  4318. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  4319. if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
  4320. if (idx >= cb->args[0]) {
  4321. err = br_dev->netdev_ops->ndo_bridge_getlink(
  4322. skb, portid, seq, dev,
  4323. filter_mask, NLM_F_MULTI);
  4324. if (err < 0 && err != -EOPNOTSUPP) {
  4325. if (likely(skb->len))
  4326. break;
  4327. goto out_err;
  4328. }
  4329. }
  4330. idx++;
  4331. }
  4332. if (ops->ndo_bridge_getlink) {
  4333. if (idx >= cb->args[0]) {
  4334. err = ops->ndo_bridge_getlink(skb, portid,
  4335. seq, dev,
  4336. filter_mask,
  4337. NLM_F_MULTI);
  4338. if (err < 0 && err != -EOPNOTSUPP) {
  4339. if (likely(skb->len))
  4340. break;
  4341. goto out_err;
  4342. }
  4343. }
  4344. idx++;
  4345. }
  4346. }
  4347. err = skb->len;
  4348. out_err:
  4349. rcu_read_unlock();
  4350. cb->args[0] = idx;
  4351. return err;
  4352. }
  4353. static inline size_t bridge_nlmsg_size(void)
  4354. {
  4355. return NLMSG_ALIGN(sizeof(struct ifinfomsg))
  4356. + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
  4357. + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
  4358. + nla_total_size(sizeof(u32)) /* IFLA_MASTER */
  4359. + nla_total_size(sizeof(u32)) /* IFLA_MTU */
  4360. + nla_total_size(sizeof(u32)) /* IFLA_LINK */
  4361. + nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
  4362. + nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
  4363. + nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
  4364. + nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
  4365. + nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
  4366. }
  4367. static int rtnl_bridge_notify(struct net_device *dev)
  4368. {
  4369. struct net *net = dev_net(dev);
  4370. struct sk_buff *skb;
  4371. int err = -EOPNOTSUPP;
  4372. if (!dev->netdev_ops->ndo_bridge_getlink)
  4373. return 0;
  4374. skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
  4375. if (!skb) {
  4376. err = -ENOMEM;
  4377. goto errout;
  4378. }
  4379. err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
  4380. if (err < 0)
  4381. goto errout;
  4382. /* Notification info is only filled for bridge ports, not the bridge
  4383. * device itself. Therefore, a zero notification length is valid and
  4384. * should not result in an error.
  4385. */
  4386. if (!skb->len)
  4387. goto errout;
  4388. rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
  4389. return 0;
  4390. errout:
  4391. WARN_ON(err == -EMSGSIZE);
  4392. kfree_skb(skb);
  4393. if (err)
  4394. rtnl_set_sk_err(net, RTNLGRP_LINK, err);
  4395. return err;
  4396. }
  4397. static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
  4398. struct netlink_ext_ack *extack)
  4399. {
  4400. struct net *net = sock_net(skb->sk);
  4401. struct ifinfomsg *ifm;
  4402. struct net_device *dev;
  4403. struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
  4404. int rem, err = -EOPNOTSUPP;
  4405. u16 flags = 0;
  4406. if (nlmsg_len(nlh) < sizeof(*ifm))
  4407. return -EINVAL;
  4408. ifm = nlmsg_data(nlh);
  4409. if (ifm->ifi_family != AF_BRIDGE)
  4410. return -EPFNOSUPPORT;
  4411. dev = __dev_get_by_index(net, ifm->ifi_index);
  4412. if (!dev) {
  4413. NL_SET_ERR_MSG(extack, "unknown ifindex");
  4414. return -ENODEV;
  4415. }
  4416. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  4417. if (br_spec) {
  4418. nla_for_each_nested(attr, br_spec, rem) {
  4419. if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
  4420. if (nla_len(attr) < sizeof(flags))
  4421. return -EINVAL;
  4422. br_flags_attr = attr;
  4423. flags = nla_get_u16(attr);
  4424. }
  4425. if (nla_type(attr) == IFLA_BRIDGE_MODE) {
  4426. if (nla_len(attr) < sizeof(u16))
  4427. return -EINVAL;
  4428. }
  4429. }
  4430. }
  4431. if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
  4432. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  4433. if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
  4434. err = -EOPNOTSUPP;
  4435. goto out;
  4436. }
  4437. err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
  4438. extack);
  4439. if (err)
  4440. goto out;
  4441. flags &= ~BRIDGE_FLAGS_MASTER;
  4442. }
  4443. if ((flags & BRIDGE_FLAGS_SELF)) {
  4444. if (!dev->netdev_ops->ndo_bridge_setlink)
  4445. err = -EOPNOTSUPP;
  4446. else
  4447. err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
  4448. flags,
  4449. extack);
  4450. if (!err) {
  4451. flags &= ~BRIDGE_FLAGS_SELF;
  4452. /* Generate event to notify upper layer of bridge
  4453. * change
  4454. */
  4455. err = rtnl_bridge_notify(dev);
  4456. }
  4457. }
  4458. if (br_flags_attr)
  4459. memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
  4460. out:
  4461. return err;
  4462. }
  4463. static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
  4464. struct netlink_ext_ack *extack)
  4465. {
  4466. struct net *net = sock_net(skb->sk);
  4467. struct ifinfomsg *ifm;
  4468. struct net_device *dev;
  4469. struct nlattr *br_spec, *attr = NULL;
  4470. int rem, err = -EOPNOTSUPP;
  4471. u16 flags = 0;
  4472. bool have_flags = false;
  4473. if (nlmsg_len(nlh) < sizeof(*ifm))
  4474. return -EINVAL;
  4475. ifm = nlmsg_data(nlh);
  4476. if (ifm->ifi_family != AF_BRIDGE)
  4477. return -EPFNOSUPPORT;
  4478. dev = __dev_get_by_index(net, ifm->ifi_index);
  4479. if (!dev) {
  4480. NL_SET_ERR_MSG(extack, "unknown ifindex");
  4481. return -ENODEV;
  4482. }
  4483. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  4484. if (br_spec) {
  4485. nla_for_each_nested_type(attr, IFLA_BRIDGE_FLAGS, br_spec,
  4486. rem) {
  4487. if (nla_len(attr) < sizeof(flags))
  4488. return -EINVAL;
  4489. have_flags = true;
  4490. flags = nla_get_u16(attr);
  4491. break;
  4492. }
  4493. }
  4494. if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
  4495. struct net_device *br_dev = netdev_master_upper_dev_get(dev);
  4496. if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
  4497. err = -EOPNOTSUPP;
  4498. goto out;
  4499. }
  4500. err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh, flags);
  4501. if (err)
  4502. goto out;
  4503. flags &= ~BRIDGE_FLAGS_MASTER;
  4504. }
  4505. if ((flags & BRIDGE_FLAGS_SELF)) {
  4506. if (!dev->netdev_ops->ndo_bridge_dellink)
  4507. err = -EOPNOTSUPP;
  4508. else
  4509. err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh,
  4510. flags);
  4511. if (!err) {
  4512. flags &= ~BRIDGE_FLAGS_SELF;
  4513. /* Generate event to notify upper layer of bridge
  4514. * change
  4515. */
  4516. err = rtnl_bridge_notify(dev);
  4517. }
  4518. }
  4519. if (have_flags)
  4520. memcpy(nla_data(attr), &flags, sizeof(flags));
  4521. out:
  4522. return err;
  4523. }
  4524. static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
  4525. {
  4526. return (mask & IFLA_STATS_FILTER_BIT(attrid)) &&
  4527. (!idxattr || idxattr == attrid);
  4528. }
  4529. static bool
  4530. rtnl_offload_xstats_have_ndo(const struct net_device *dev, int attr_id)
  4531. {
  4532. return dev->netdev_ops &&
  4533. dev->netdev_ops->ndo_has_offload_stats &&
  4534. dev->netdev_ops->ndo_get_offload_stats &&
  4535. dev->netdev_ops->ndo_has_offload_stats(dev, attr_id);
  4536. }
  4537. static unsigned int
  4538. rtnl_offload_xstats_get_size_ndo(const struct net_device *dev, int attr_id)
  4539. {
  4540. return rtnl_offload_xstats_have_ndo(dev, attr_id) ?
  4541. sizeof(struct rtnl_link_stats64) : 0;
  4542. }
  4543. static int
  4544. rtnl_offload_xstats_fill_ndo(struct net_device *dev, int attr_id,
  4545. struct sk_buff *skb)
  4546. {
  4547. unsigned int size = rtnl_offload_xstats_get_size_ndo(dev, attr_id);
  4548. struct nlattr *attr = NULL;
  4549. void *attr_data;
  4550. int err;
  4551. if (!size)
  4552. return -ENODATA;
  4553. attr = nla_reserve_64bit(skb, attr_id, size,
  4554. IFLA_OFFLOAD_XSTATS_UNSPEC);
  4555. if (!attr)
  4556. return -EMSGSIZE;
  4557. attr_data = nla_data(attr);
  4558. memset(attr_data, 0, size);
  4559. err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev, attr_data);
  4560. if (err)
  4561. return err;
  4562. return 0;
  4563. }
  4564. static unsigned int
  4565. rtnl_offload_xstats_get_size_stats(const struct net_device *dev,
  4566. enum netdev_offload_xstats_type type)
  4567. {
  4568. bool enabled = netdev_offload_xstats_enabled(dev, type);
  4569. return enabled ? sizeof(struct rtnl_hw_stats64) : 0;
  4570. }
  4571. struct rtnl_offload_xstats_request_used {
  4572. bool request;
  4573. bool used;
  4574. };
  4575. static int
  4576. rtnl_offload_xstats_get_stats(struct net_device *dev,
  4577. enum netdev_offload_xstats_type type,
  4578. struct rtnl_offload_xstats_request_used *ru,
  4579. struct rtnl_hw_stats64 *stats,
  4580. struct netlink_ext_ack *extack)
  4581. {
  4582. bool request;
  4583. bool used;
  4584. int err;
  4585. request = netdev_offload_xstats_enabled(dev, type);
  4586. if (!request) {
  4587. used = false;
  4588. goto out;
  4589. }
  4590. err = netdev_offload_xstats_get(dev, type, stats, &used, extack);
  4591. if (err)
  4592. return err;
  4593. out:
  4594. if (ru) {
  4595. ru->request = request;
  4596. ru->used = used;
  4597. }
  4598. return 0;
  4599. }
  4600. static int
  4601. rtnl_offload_xstats_fill_hw_s_info_one(struct sk_buff *skb, int attr_id,
  4602. struct rtnl_offload_xstats_request_used *ru)
  4603. {
  4604. struct nlattr *nest;
  4605. nest = nla_nest_start(skb, attr_id);
  4606. if (!nest)
  4607. return -EMSGSIZE;
  4608. if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, ru->request))
  4609. goto nla_put_failure;
  4610. if (nla_put_u8(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, ru->used))
  4611. goto nla_put_failure;
  4612. nla_nest_end(skb, nest);
  4613. return 0;
  4614. nla_put_failure:
  4615. nla_nest_cancel(skb, nest);
  4616. return -EMSGSIZE;
  4617. }
  4618. static int
  4619. rtnl_offload_xstats_fill_hw_s_info(struct sk_buff *skb, struct net_device *dev,
  4620. struct netlink_ext_ack *extack)
  4621. {
  4622. enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
  4623. struct rtnl_offload_xstats_request_used ru_l3;
  4624. struct nlattr *nest;
  4625. int err;
  4626. err = rtnl_offload_xstats_get_stats(dev, t_l3, &ru_l3, NULL, extack);
  4627. if (err)
  4628. return err;
  4629. nest = nla_nest_start(skb, IFLA_OFFLOAD_XSTATS_HW_S_INFO);
  4630. if (!nest)
  4631. return -EMSGSIZE;
  4632. if (rtnl_offload_xstats_fill_hw_s_info_one(skb,
  4633. IFLA_OFFLOAD_XSTATS_L3_STATS,
  4634. &ru_l3))
  4635. goto nla_put_failure;
  4636. nla_nest_end(skb, nest);
  4637. return 0;
  4638. nla_put_failure:
  4639. nla_nest_cancel(skb, nest);
  4640. return -EMSGSIZE;
  4641. }
  4642. static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
  4643. int *prividx, u32 off_filter_mask,
  4644. struct netlink_ext_ack *extack)
  4645. {
  4646. enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
  4647. int attr_id_hw_s_info = IFLA_OFFLOAD_XSTATS_HW_S_INFO;
  4648. int attr_id_l3_stats = IFLA_OFFLOAD_XSTATS_L3_STATS;
  4649. int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
  4650. bool have_data = false;
  4651. int err;
  4652. if (*prividx <= attr_id_cpu_hit &&
  4653. (off_filter_mask &
  4654. IFLA_STATS_FILTER_BIT(attr_id_cpu_hit))) {
  4655. err = rtnl_offload_xstats_fill_ndo(dev, attr_id_cpu_hit, skb);
  4656. if (!err) {
  4657. have_data = true;
  4658. } else if (err != -ENODATA) {
  4659. *prividx = attr_id_cpu_hit;
  4660. return err;
  4661. }
  4662. }
  4663. if (*prividx <= attr_id_hw_s_info &&
  4664. (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_hw_s_info))) {
  4665. *prividx = attr_id_hw_s_info;
  4666. err = rtnl_offload_xstats_fill_hw_s_info(skb, dev, extack);
  4667. if (err)
  4668. return err;
  4669. have_data = true;
  4670. *prividx = 0;
  4671. }
  4672. if (*prividx <= attr_id_l3_stats &&
  4673. (off_filter_mask & IFLA_STATS_FILTER_BIT(attr_id_l3_stats))) {
  4674. unsigned int size_l3;
  4675. struct nlattr *attr;
  4676. *prividx = attr_id_l3_stats;
  4677. size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
  4678. if (!size_l3)
  4679. goto skip_l3_stats;
  4680. attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
  4681. IFLA_OFFLOAD_XSTATS_UNSPEC);
  4682. if (!attr)
  4683. return -EMSGSIZE;
  4684. err = rtnl_offload_xstats_get_stats(dev, t_l3, NULL,
  4685. nla_data(attr), extack);
  4686. if (err)
  4687. return err;
  4688. have_data = true;
  4689. skip_l3_stats:
  4690. *prividx = 0;
  4691. }
  4692. if (!have_data)
  4693. return -ENODATA;
  4694. *prividx = 0;
  4695. return 0;
  4696. }
  4697. static unsigned int
  4698. rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
  4699. enum netdev_offload_xstats_type type)
  4700. {
  4701. return nla_total_size(0) +
  4702. /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
  4703. nla_total_size(sizeof(u8)) +
  4704. /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
  4705. nla_total_size(sizeof(u8)) +
  4706. 0;
  4707. }
  4708. static unsigned int
  4709. rtnl_offload_xstats_get_size_hw_s_info(const struct net_device *dev)
  4710. {
  4711. enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
  4712. return nla_total_size(0) +
  4713. /* IFLA_OFFLOAD_XSTATS_L3_STATS */
  4714. rtnl_offload_xstats_get_size_hw_s_info_one(dev, t_l3) +
  4715. 0;
  4716. }
  4717. static int rtnl_offload_xstats_get_size(const struct net_device *dev,
  4718. u32 off_filter_mask)
  4719. {
  4720. enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
  4721. int attr_id_cpu_hit = IFLA_OFFLOAD_XSTATS_CPU_HIT;
  4722. int nla_size = 0;
  4723. int size;
  4724. if (off_filter_mask &
  4725. IFLA_STATS_FILTER_BIT(attr_id_cpu_hit)) {
  4726. size = rtnl_offload_xstats_get_size_ndo(dev, attr_id_cpu_hit);
  4727. nla_size += nla_total_size_64bit(size);
  4728. }
  4729. if (off_filter_mask &
  4730. IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO))
  4731. nla_size += rtnl_offload_xstats_get_size_hw_s_info(dev);
  4732. if (off_filter_mask &
  4733. IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_L3_STATS)) {
  4734. size = rtnl_offload_xstats_get_size_stats(dev, t_l3);
  4735. nla_size += nla_total_size_64bit(size);
  4736. }
  4737. if (nla_size != 0)
  4738. nla_size += nla_total_size(0);
  4739. return nla_size;
  4740. }
  4741. struct rtnl_stats_dump_filters {
  4742. /* mask[0] filters outer attributes. Then individual nests have their
  4743. * filtering mask at the index of the nested attribute.
  4744. */
  4745. u32 mask[IFLA_STATS_MAX + 1];
  4746. };
  4747. static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
  4748. int type, u32 pid, u32 seq, u32 change,
  4749. unsigned int flags,
  4750. const struct rtnl_stats_dump_filters *filters,
  4751. int *idxattr, int *prividx,
  4752. struct netlink_ext_ack *extack)
  4753. {
  4754. unsigned int filter_mask = filters->mask[0];
  4755. struct if_stats_msg *ifsm;
  4756. struct nlmsghdr *nlh;
  4757. struct nlattr *attr;
  4758. int s_prividx = *prividx;
  4759. int err;
  4760. ASSERT_RTNL();
  4761. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifsm), flags);
  4762. if (!nlh)
  4763. return -EMSGSIZE;
  4764. ifsm = nlmsg_data(nlh);
  4765. ifsm->family = PF_UNSPEC;
  4766. ifsm->pad1 = 0;
  4767. ifsm->pad2 = 0;
  4768. ifsm->ifindex = dev->ifindex;
  4769. ifsm->filter_mask = filter_mask;
  4770. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, *idxattr)) {
  4771. struct rtnl_link_stats64 *sp;
  4772. attr = nla_reserve_64bit(skb, IFLA_STATS_LINK_64,
  4773. sizeof(struct rtnl_link_stats64),
  4774. IFLA_STATS_UNSPEC);
  4775. if (!attr) {
  4776. err = -EMSGSIZE;
  4777. goto nla_put_failure;
  4778. }
  4779. sp = nla_data(attr);
  4780. dev_get_stats(dev, sp);
  4781. }
  4782. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, *idxattr)) {
  4783. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  4784. if (ops && ops->fill_linkxstats) {
  4785. *idxattr = IFLA_STATS_LINK_XSTATS;
  4786. attr = nla_nest_start_noflag(skb,
  4787. IFLA_STATS_LINK_XSTATS);
  4788. if (!attr) {
  4789. err = -EMSGSIZE;
  4790. goto nla_put_failure;
  4791. }
  4792. err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
  4793. nla_nest_end(skb, attr);
  4794. if (err)
  4795. goto nla_put_failure;
  4796. *idxattr = 0;
  4797. }
  4798. }
  4799. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE,
  4800. *idxattr)) {
  4801. const struct rtnl_link_ops *ops = NULL;
  4802. const struct net_device *master;
  4803. master = netdev_master_upper_dev_get(dev);
  4804. if (master)
  4805. ops = master->rtnl_link_ops;
  4806. if (ops && ops->fill_linkxstats) {
  4807. *idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
  4808. attr = nla_nest_start_noflag(skb,
  4809. IFLA_STATS_LINK_XSTATS_SLAVE);
  4810. if (!attr) {
  4811. err = -EMSGSIZE;
  4812. goto nla_put_failure;
  4813. }
  4814. err = ops->fill_linkxstats(skb, dev, prividx, *idxattr);
  4815. nla_nest_end(skb, attr);
  4816. if (err)
  4817. goto nla_put_failure;
  4818. *idxattr = 0;
  4819. }
  4820. }
  4821. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
  4822. *idxattr)) {
  4823. u32 off_filter_mask;
  4824. off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
  4825. *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
  4826. attr = nla_nest_start_noflag(skb,
  4827. IFLA_STATS_LINK_OFFLOAD_XSTATS);
  4828. if (!attr) {
  4829. err = -EMSGSIZE;
  4830. goto nla_put_failure;
  4831. }
  4832. err = rtnl_offload_xstats_fill(skb, dev, prividx,
  4833. off_filter_mask, extack);
  4834. if (err == -ENODATA)
  4835. nla_nest_cancel(skb, attr);
  4836. else
  4837. nla_nest_end(skb, attr);
  4838. if (err && err != -ENODATA)
  4839. goto nla_put_failure;
  4840. *idxattr = 0;
  4841. }
  4842. if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
  4843. struct rtnl_af_ops *af_ops;
  4844. *idxattr = IFLA_STATS_AF_SPEC;
  4845. attr = nla_nest_start_noflag(skb, IFLA_STATS_AF_SPEC);
  4846. if (!attr) {
  4847. err = -EMSGSIZE;
  4848. goto nla_put_failure;
  4849. }
  4850. rcu_read_lock();
  4851. list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
  4852. if (af_ops->fill_stats_af) {
  4853. struct nlattr *af;
  4854. af = nla_nest_start_noflag(skb,
  4855. af_ops->family);
  4856. if (!af) {
  4857. rcu_read_unlock();
  4858. err = -EMSGSIZE;
  4859. goto nla_put_failure;
  4860. }
  4861. err = af_ops->fill_stats_af(skb, dev);
  4862. if (err == -ENODATA) {
  4863. nla_nest_cancel(skb, af);
  4864. } else if (err < 0) {
  4865. rcu_read_unlock();
  4866. goto nla_put_failure;
  4867. }
  4868. nla_nest_end(skb, af);
  4869. }
  4870. }
  4871. rcu_read_unlock();
  4872. nla_nest_end(skb, attr);
  4873. *idxattr = 0;
  4874. }
  4875. nlmsg_end(skb, nlh);
  4876. return 0;
  4877. nla_put_failure:
  4878. /* not a multi message or no progress mean a real error */
  4879. if (!(flags & NLM_F_MULTI) || s_prividx == *prividx)
  4880. nlmsg_cancel(skb, nlh);
  4881. else
  4882. nlmsg_end(skb, nlh);
  4883. return err;
  4884. }
  4885. static size_t if_nlmsg_stats_size(const struct net_device *dev,
  4886. const struct rtnl_stats_dump_filters *filters)
  4887. {
  4888. size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg));
  4889. unsigned int filter_mask = filters->mask[0];
  4890. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0))
  4891. size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
  4892. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS, 0)) {
  4893. const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
  4894. int attr = IFLA_STATS_LINK_XSTATS;
  4895. if (ops && ops->get_linkxstats_size) {
  4896. size += nla_total_size(ops->get_linkxstats_size(dev,
  4897. attr));
  4898. /* for IFLA_STATS_LINK_XSTATS */
  4899. size += nla_total_size(0);
  4900. }
  4901. }
  4902. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XSTATS_SLAVE, 0)) {
  4903. struct net_device *_dev = (struct net_device *)dev;
  4904. const struct rtnl_link_ops *ops = NULL;
  4905. const struct net_device *master;
  4906. /* netdev_master_upper_dev_get can't take const */
  4907. master = netdev_master_upper_dev_get(_dev);
  4908. if (master)
  4909. ops = master->rtnl_link_ops;
  4910. if (ops && ops->get_linkxstats_size) {
  4911. int attr = IFLA_STATS_LINK_XSTATS_SLAVE;
  4912. size += nla_total_size(ops->get_linkxstats_size(dev,
  4913. attr));
  4914. /* for IFLA_STATS_LINK_XSTATS_SLAVE */
  4915. size += nla_total_size(0);
  4916. }
  4917. }
  4918. if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0)) {
  4919. u32 off_filter_mask;
  4920. off_filter_mask = filters->mask[IFLA_STATS_LINK_OFFLOAD_XSTATS];
  4921. size += rtnl_offload_xstats_get_size(dev, off_filter_mask);
  4922. }
  4923. if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
  4924. struct rtnl_af_ops *af_ops;
  4925. /* for IFLA_STATS_AF_SPEC */
  4926. size += nla_total_size(0);
  4927. rcu_read_lock();
  4928. list_for_each_entry_rcu(af_ops, &rtnl_af_ops, list) {
  4929. if (af_ops->get_stats_af_size) {
  4930. size += nla_total_size(
  4931. af_ops->get_stats_af_size(dev));
  4932. /* for AF_* */
  4933. size += nla_total_size(0);
  4934. }
  4935. }
  4936. rcu_read_unlock();
  4937. }
  4938. return size;
  4939. }
  4940. #define RTNL_STATS_OFFLOAD_XSTATS_VALID ((1 << __IFLA_OFFLOAD_XSTATS_MAX) - 1)
  4941. static const struct nla_policy
  4942. rtnl_stats_get_policy_filters[IFLA_STATS_MAX + 1] = {
  4943. [IFLA_STATS_LINK_OFFLOAD_XSTATS] =
  4944. NLA_POLICY_MASK(NLA_U32, RTNL_STATS_OFFLOAD_XSTATS_VALID),
  4945. };
  4946. static const struct nla_policy
  4947. rtnl_stats_get_policy[IFLA_STATS_GETSET_MAX + 1] = {
  4948. [IFLA_STATS_GET_FILTERS] =
  4949. NLA_POLICY_NESTED(rtnl_stats_get_policy_filters),
  4950. };
  4951. static const struct nla_policy
  4952. ifla_stats_set_policy[IFLA_STATS_GETSET_MAX + 1] = {
  4953. [IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS] = NLA_POLICY_MAX(NLA_U8, 1),
  4954. };
  4955. static int rtnl_stats_get_parse_filters(struct nlattr *ifla_filters,
  4956. struct rtnl_stats_dump_filters *filters,
  4957. struct netlink_ext_ack *extack)
  4958. {
  4959. struct nlattr *tb[IFLA_STATS_MAX + 1];
  4960. int err;
  4961. int at;
  4962. err = nla_parse_nested(tb, IFLA_STATS_MAX, ifla_filters,
  4963. rtnl_stats_get_policy_filters, extack);
  4964. if (err < 0)
  4965. return err;
  4966. for (at = 1; at <= IFLA_STATS_MAX; at++) {
  4967. if (tb[at]) {
  4968. if (!(filters->mask[0] & IFLA_STATS_FILTER_BIT(at))) {
  4969. NL_SET_ERR_MSG(extack, "Filtered attribute not enabled in filter_mask");
  4970. return -EINVAL;
  4971. }
  4972. filters->mask[at] = nla_get_u32(tb[at]);
  4973. }
  4974. }
  4975. return 0;
  4976. }
  4977. static int rtnl_stats_get_parse(const struct nlmsghdr *nlh,
  4978. u32 filter_mask,
  4979. struct rtnl_stats_dump_filters *filters,
  4980. struct netlink_ext_ack *extack)
  4981. {
  4982. struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
  4983. int err;
  4984. int i;
  4985. filters->mask[0] = filter_mask;
  4986. for (i = 1; i < ARRAY_SIZE(filters->mask); i++)
  4987. filters->mask[i] = -1U;
  4988. err = nlmsg_parse(nlh, sizeof(struct if_stats_msg), tb,
  4989. IFLA_STATS_GETSET_MAX, rtnl_stats_get_policy, extack);
  4990. if (err < 0)
  4991. return err;
  4992. if (tb[IFLA_STATS_GET_FILTERS]) {
  4993. err = rtnl_stats_get_parse_filters(tb[IFLA_STATS_GET_FILTERS],
  4994. filters, extack);
  4995. if (err)
  4996. return err;
  4997. }
  4998. return 0;
  4999. }
  5000. static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
  5001. bool is_dump, struct netlink_ext_ack *extack)
  5002. {
  5003. struct if_stats_msg *ifsm;
  5004. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
  5005. NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
  5006. return -EINVAL;
  5007. }
  5008. if (!strict_check)
  5009. return 0;
  5010. ifsm = nlmsg_data(nlh);
  5011. /* only requests using strict checks can pass data to influence
  5012. * the dump. The legacy exception is filter_mask.
  5013. */
  5014. if (ifsm->pad1 || ifsm->pad2 || (is_dump && ifsm->ifindex)) {
  5015. NL_SET_ERR_MSG(extack, "Invalid values in header for stats dump request");
  5016. return -EINVAL;
  5017. }
  5018. if (ifsm->filter_mask >= IFLA_STATS_FILTER_BIT(IFLA_STATS_MAX + 1)) {
  5019. NL_SET_ERR_MSG(extack, "Invalid stats requested through filter mask");
  5020. return -EINVAL;
  5021. }
  5022. return 0;
  5023. }
  5024. static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh,
  5025. struct netlink_ext_ack *extack)
  5026. {
  5027. struct rtnl_stats_dump_filters filters;
  5028. struct net *net = sock_net(skb->sk);
  5029. struct net_device *dev = NULL;
  5030. int idxattr = 0, prividx = 0;
  5031. struct if_stats_msg *ifsm;
  5032. struct sk_buff *nskb;
  5033. int err;
  5034. err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
  5035. false, extack);
  5036. if (err)
  5037. return err;
  5038. ifsm = nlmsg_data(nlh);
  5039. if (ifsm->ifindex > 0)
  5040. dev = __dev_get_by_index(net, ifsm->ifindex);
  5041. else
  5042. return -EINVAL;
  5043. if (!dev)
  5044. return -ENODEV;
  5045. if (!ifsm->filter_mask) {
  5046. NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
  5047. return -EINVAL;
  5048. }
  5049. err = rtnl_stats_get_parse(nlh, ifsm->filter_mask, &filters, extack);
  5050. if (err)
  5051. return err;
  5052. nskb = nlmsg_new(if_nlmsg_stats_size(dev, &filters), GFP_KERNEL);
  5053. if (!nskb)
  5054. return -ENOBUFS;
  5055. err = rtnl_fill_statsinfo(nskb, dev, RTM_NEWSTATS,
  5056. NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
  5057. 0, &filters, &idxattr, &prividx, extack);
  5058. if (err < 0) {
  5059. /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
  5060. WARN_ON(err == -EMSGSIZE);
  5061. kfree_skb(nskb);
  5062. } else {
  5063. err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
  5064. }
  5065. return err;
  5066. }
  5067. static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
  5068. {
  5069. struct netlink_ext_ack *extack = cb->extack;
  5070. struct rtnl_stats_dump_filters filters;
  5071. struct net *net = sock_net(skb->sk);
  5072. unsigned int flags = NLM_F_MULTI;
  5073. struct if_stats_msg *ifsm;
  5074. struct {
  5075. unsigned long ifindex;
  5076. int idxattr;
  5077. int prividx;
  5078. } *ctx = (void *)cb->ctx;
  5079. struct net_device *dev;
  5080. int err;
  5081. cb->seq = net->dev_base_seq;
  5082. err = rtnl_valid_stats_req(cb->nlh, cb->strict_check, true, extack);
  5083. if (err)
  5084. return err;
  5085. ifsm = nlmsg_data(cb->nlh);
  5086. if (!ifsm->filter_mask) {
  5087. NL_SET_ERR_MSG(extack, "Filter mask must be set for stats dump");
  5088. return -EINVAL;
  5089. }
  5090. err = rtnl_stats_get_parse(cb->nlh, ifsm->filter_mask, &filters,
  5091. extack);
  5092. if (err)
  5093. return err;
  5094. for_each_netdev_dump(net, dev, ctx->ifindex) {
  5095. err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS,
  5096. NETLINK_CB(cb->skb).portid,
  5097. cb->nlh->nlmsg_seq, 0,
  5098. flags, &filters,
  5099. &ctx->idxattr, &ctx->prividx,
  5100. extack);
  5101. /* If we ran out of room on the first message,
  5102. * we're in trouble.
  5103. */
  5104. WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
  5105. if (err < 0)
  5106. break;
  5107. ctx->prividx = 0;
  5108. ctx->idxattr = 0;
  5109. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  5110. }
  5111. return err;
  5112. }
  5113. void rtnl_offload_xstats_notify(struct net_device *dev)
  5114. {
  5115. struct rtnl_stats_dump_filters response_filters = {};
  5116. struct net *net = dev_net(dev);
  5117. int idxattr = 0, prividx = 0;
  5118. struct sk_buff *skb;
  5119. int err = -ENOBUFS;
  5120. ASSERT_RTNL();
  5121. response_filters.mask[0] |=
  5122. IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
  5123. response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
  5124. IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
  5125. skb = nlmsg_new(if_nlmsg_stats_size(dev, &response_filters),
  5126. GFP_KERNEL);
  5127. if (!skb)
  5128. goto errout;
  5129. err = rtnl_fill_statsinfo(skb, dev, RTM_NEWSTATS, 0, 0, 0, 0,
  5130. &response_filters, &idxattr, &prividx, NULL);
  5131. if (err < 0) {
  5132. kfree_skb(skb);
  5133. goto errout;
  5134. }
  5135. rtnl_notify(skb, net, 0, RTNLGRP_STATS, NULL, GFP_KERNEL);
  5136. return;
  5137. errout:
  5138. rtnl_set_sk_err(net, RTNLGRP_STATS, err);
  5139. }
  5140. EXPORT_SYMBOL(rtnl_offload_xstats_notify);
  5141. static int rtnl_stats_set(struct sk_buff *skb, struct nlmsghdr *nlh,
  5142. struct netlink_ext_ack *extack)
  5143. {
  5144. enum netdev_offload_xstats_type t_l3 = NETDEV_OFFLOAD_XSTATS_TYPE_L3;
  5145. struct rtnl_stats_dump_filters response_filters = {};
  5146. struct nlattr *tb[IFLA_STATS_GETSET_MAX + 1];
  5147. struct net *net = sock_net(skb->sk);
  5148. struct net_device *dev = NULL;
  5149. struct if_stats_msg *ifsm;
  5150. bool notify = false;
  5151. int err;
  5152. err = rtnl_valid_stats_req(nlh, netlink_strict_get_check(skb),
  5153. false, extack);
  5154. if (err)
  5155. return err;
  5156. ifsm = nlmsg_data(nlh);
  5157. if (ifsm->family != AF_UNSPEC) {
  5158. NL_SET_ERR_MSG(extack, "Address family should be AF_UNSPEC");
  5159. return -EINVAL;
  5160. }
  5161. if (ifsm->ifindex > 0)
  5162. dev = __dev_get_by_index(net, ifsm->ifindex);
  5163. else
  5164. return -EINVAL;
  5165. if (!dev)
  5166. return -ENODEV;
  5167. if (ifsm->filter_mask) {
  5168. NL_SET_ERR_MSG(extack, "Filter mask must be 0 for stats set");
  5169. return -EINVAL;
  5170. }
  5171. err = nlmsg_parse(nlh, sizeof(*ifsm), tb, IFLA_STATS_GETSET_MAX,
  5172. ifla_stats_set_policy, extack);
  5173. if (err < 0)
  5174. return err;
  5175. if (tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]) {
  5176. u8 req = nla_get_u8(tb[IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS]);
  5177. if (req)
  5178. err = netdev_offload_xstats_enable(dev, t_l3, extack);
  5179. else
  5180. err = netdev_offload_xstats_disable(dev, t_l3);
  5181. if (!err)
  5182. notify = true;
  5183. else if (err != -EALREADY)
  5184. return err;
  5185. response_filters.mask[0] |=
  5186. IFLA_STATS_FILTER_BIT(IFLA_STATS_LINK_OFFLOAD_XSTATS);
  5187. response_filters.mask[IFLA_STATS_LINK_OFFLOAD_XSTATS] |=
  5188. IFLA_STATS_FILTER_BIT(IFLA_OFFLOAD_XSTATS_HW_S_INFO);
  5189. }
  5190. if (notify)
  5191. rtnl_offload_xstats_notify(dev);
  5192. return 0;
  5193. }
  5194. static int rtnl_mdb_valid_dump_req(const struct nlmsghdr *nlh,
  5195. struct netlink_ext_ack *extack)
  5196. {
  5197. struct br_port_msg *bpm;
  5198. if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
  5199. NL_SET_ERR_MSG(extack, "Invalid header for mdb dump request");
  5200. return -EINVAL;
  5201. }
  5202. bpm = nlmsg_data(nlh);
  5203. if (bpm->ifindex) {
  5204. NL_SET_ERR_MSG(extack, "Filtering by device index is not supported for mdb dump request");
  5205. return -EINVAL;
  5206. }
  5207. if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
  5208. NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
  5209. return -EINVAL;
  5210. }
  5211. return 0;
  5212. }
  5213. struct rtnl_mdb_dump_ctx {
  5214. long idx;
  5215. };
  5216. static int rtnl_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
  5217. {
  5218. struct rtnl_mdb_dump_ctx *ctx = (void *)cb->ctx;
  5219. struct net *net = sock_net(skb->sk);
  5220. struct net_device *dev;
  5221. int idx, s_idx;
  5222. int err;
  5223. NL_ASSERT_DUMP_CTX_FITS(struct rtnl_mdb_dump_ctx);
  5224. if (cb->strict_check) {
  5225. err = rtnl_mdb_valid_dump_req(cb->nlh, cb->extack);
  5226. if (err)
  5227. return err;
  5228. }
  5229. s_idx = ctx->idx;
  5230. idx = 0;
  5231. for_each_netdev(net, dev) {
  5232. if (idx < s_idx)
  5233. goto skip;
  5234. if (!dev->netdev_ops->ndo_mdb_dump)
  5235. goto skip;
  5236. err = dev->netdev_ops->ndo_mdb_dump(dev, skb, cb);
  5237. if (err == -EMSGSIZE)
  5238. goto out;
  5239. /* Moving on to next device, reset markers and sequence
  5240. * counters since they are all maintained per-device.
  5241. */
  5242. memset(cb->ctx, 0, sizeof(cb->ctx));
  5243. cb->prev_seq = 0;
  5244. cb->seq = 0;
  5245. skip:
  5246. idx++;
  5247. }
  5248. out:
  5249. ctx->idx = idx;
  5250. return skb->len;
  5251. }
  5252. static int rtnl_validate_mdb_entry_get(const struct nlattr *attr,
  5253. struct netlink_ext_ack *extack)
  5254. {
  5255. struct br_mdb_entry *entry = nla_data(attr);
  5256. if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
  5257. NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
  5258. return -EINVAL;
  5259. }
  5260. if (entry->ifindex) {
  5261. NL_SET_ERR_MSG(extack, "Entry ifindex cannot be specified");
  5262. return -EINVAL;
  5263. }
  5264. if (entry->state) {
  5265. NL_SET_ERR_MSG(extack, "Entry state cannot be specified");
  5266. return -EINVAL;
  5267. }
  5268. if (entry->flags) {
  5269. NL_SET_ERR_MSG(extack, "Entry flags cannot be specified");
  5270. return -EINVAL;
  5271. }
  5272. if (entry->vid >= VLAN_VID_MASK) {
  5273. NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
  5274. return -EINVAL;
  5275. }
  5276. if (entry->addr.proto != htons(ETH_P_IP) &&
  5277. entry->addr.proto != htons(ETH_P_IPV6) &&
  5278. entry->addr.proto != 0) {
  5279. NL_SET_ERR_MSG(extack, "Unknown entry protocol");
  5280. return -EINVAL;
  5281. }
  5282. return 0;
  5283. }
  5284. static const struct nla_policy mdba_get_policy[MDBA_GET_ENTRY_MAX + 1] = {
  5285. [MDBA_GET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
  5286. rtnl_validate_mdb_entry_get,
  5287. sizeof(struct br_mdb_entry)),
  5288. [MDBA_GET_ENTRY_ATTRS] = { .type = NLA_NESTED },
  5289. };
  5290. static int rtnl_mdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
  5291. struct netlink_ext_ack *extack)
  5292. {
  5293. struct nlattr *tb[MDBA_GET_ENTRY_MAX + 1];
  5294. struct net *net = sock_net(in_skb->sk);
  5295. struct br_port_msg *bpm;
  5296. struct net_device *dev;
  5297. int err;
  5298. err = nlmsg_parse(nlh, sizeof(struct br_port_msg), tb,
  5299. MDBA_GET_ENTRY_MAX, mdba_get_policy, extack);
  5300. if (err)
  5301. return err;
  5302. bpm = nlmsg_data(nlh);
  5303. if (!bpm->ifindex) {
  5304. NL_SET_ERR_MSG(extack, "Invalid ifindex");
  5305. return -EINVAL;
  5306. }
  5307. dev = __dev_get_by_index(net, bpm->ifindex);
  5308. if (!dev) {
  5309. NL_SET_ERR_MSG(extack, "Device doesn't exist");
  5310. return -ENODEV;
  5311. }
  5312. if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_GET_ENTRY)) {
  5313. NL_SET_ERR_MSG(extack, "Missing MDBA_GET_ENTRY attribute");
  5314. return -EINVAL;
  5315. }
  5316. if (!dev->netdev_ops->ndo_mdb_get) {
  5317. NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
  5318. return -EOPNOTSUPP;
  5319. }
  5320. return dev->netdev_ops->ndo_mdb_get(dev, tb, NETLINK_CB(in_skb).portid,
  5321. nlh->nlmsg_seq, extack);
  5322. }
  5323. static int rtnl_validate_mdb_entry(const struct nlattr *attr,
  5324. struct netlink_ext_ack *extack)
  5325. {
  5326. struct br_mdb_entry *entry = nla_data(attr);
  5327. if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
  5328. NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
  5329. return -EINVAL;
  5330. }
  5331. if (entry->ifindex == 0) {
  5332. NL_SET_ERR_MSG(extack, "Zero entry ifindex is not allowed");
  5333. return -EINVAL;
  5334. }
  5335. if (entry->addr.proto == htons(ETH_P_IP)) {
  5336. if (!ipv4_is_multicast(entry->addr.u.ip4) &&
  5337. !ipv4_is_zeronet(entry->addr.u.ip4)) {
  5338. NL_SET_ERR_MSG(extack, "IPv4 entry group address is not multicast or 0.0.0.0");
  5339. return -EINVAL;
  5340. }
  5341. if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
  5342. NL_SET_ERR_MSG(extack, "IPv4 entry group address is local multicast");
  5343. return -EINVAL;
  5344. }
  5345. #if IS_ENABLED(CONFIG_IPV6)
  5346. } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
  5347. if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
  5348. NL_SET_ERR_MSG(extack, "IPv6 entry group address is link-local all nodes");
  5349. return -EINVAL;
  5350. }
  5351. #endif
  5352. } else if (entry->addr.proto == 0) {
  5353. /* L2 mdb */
  5354. if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
  5355. NL_SET_ERR_MSG(extack, "L2 entry group is not multicast");
  5356. return -EINVAL;
  5357. }
  5358. } else {
  5359. NL_SET_ERR_MSG(extack, "Unknown entry protocol");
  5360. return -EINVAL;
  5361. }
  5362. if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
  5363. NL_SET_ERR_MSG(extack, "Unknown entry state");
  5364. return -EINVAL;
  5365. }
  5366. if (entry->vid >= VLAN_VID_MASK) {
  5367. NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
  5368. return -EINVAL;
  5369. }
  5370. return 0;
  5371. }
  5372. static const struct nla_policy mdba_policy[MDBA_SET_ENTRY_MAX + 1] = {
  5373. [MDBA_SET_ENTRY_UNSPEC] = { .strict_start_type = MDBA_SET_ENTRY_ATTRS + 1 },
  5374. [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
  5375. rtnl_validate_mdb_entry,
  5376. sizeof(struct br_mdb_entry)),
  5377. [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
  5378. };
  5379. static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
  5380. struct netlink_ext_ack *extack)
  5381. {
  5382. struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
  5383. struct net *net = sock_net(skb->sk);
  5384. struct br_port_msg *bpm;
  5385. struct net_device *dev;
  5386. int err;
  5387. err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
  5388. MDBA_SET_ENTRY_MAX, mdba_policy, extack);
  5389. if (err)
  5390. return err;
  5391. bpm = nlmsg_data(nlh);
  5392. if (!bpm->ifindex) {
  5393. NL_SET_ERR_MSG(extack, "Invalid ifindex");
  5394. return -EINVAL;
  5395. }
  5396. dev = __dev_get_by_index(net, bpm->ifindex);
  5397. if (!dev) {
  5398. NL_SET_ERR_MSG(extack, "Device doesn't exist");
  5399. return -ENODEV;
  5400. }
  5401. if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
  5402. NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
  5403. return -EINVAL;
  5404. }
  5405. if (!dev->netdev_ops->ndo_mdb_add) {
  5406. NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
  5407. return -EOPNOTSUPP;
  5408. }
  5409. return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
  5410. }
  5411. static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
  5412. struct netlink_ext_ack *extack)
  5413. {
  5414. struct br_mdb_entry *entry = nla_data(attr);
  5415. struct br_mdb_entry zero_entry = {};
  5416. if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
  5417. NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
  5418. return -EINVAL;
  5419. }
  5420. if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
  5421. NL_SET_ERR_MSG(extack, "Unknown entry state");
  5422. return -EINVAL;
  5423. }
  5424. if (entry->flags) {
  5425. NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
  5426. return -EINVAL;
  5427. }
  5428. if (entry->vid >= VLAN_N_VID - 1) {
  5429. NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
  5430. return -EINVAL;
  5431. }
  5432. if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
  5433. NL_SET_ERR_MSG(extack, "Entry address cannot be set");
  5434. return -EINVAL;
  5435. }
  5436. return 0;
  5437. }
  5438. static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
  5439. [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
  5440. rtnl_validate_mdb_entry_del_bulk,
  5441. sizeof(struct br_mdb_entry)),
  5442. [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
  5443. };
  5444. static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
  5445. struct netlink_ext_ack *extack)
  5446. {
  5447. bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
  5448. struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
  5449. struct net *net = sock_net(skb->sk);
  5450. struct br_port_msg *bpm;
  5451. struct net_device *dev;
  5452. int err;
  5453. if (!del_bulk)
  5454. err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
  5455. MDBA_SET_ENTRY_MAX, mdba_policy,
  5456. extack);
  5457. else
  5458. err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
  5459. mdba_del_bulk_policy, extack);
  5460. if (err)
  5461. return err;
  5462. bpm = nlmsg_data(nlh);
  5463. if (!bpm->ifindex) {
  5464. NL_SET_ERR_MSG(extack, "Invalid ifindex");
  5465. return -EINVAL;
  5466. }
  5467. dev = __dev_get_by_index(net, bpm->ifindex);
  5468. if (!dev) {
  5469. NL_SET_ERR_MSG(extack, "Device doesn't exist");
  5470. return -ENODEV;
  5471. }
  5472. if (NL_REQ_ATTR_CHECK(extack, NULL, tb, MDBA_SET_ENTRY)) {
  5473. NL_SET_ERR_MSG(extack, "Missing MDBA_SET_ENTRY attribute");
  5474. return -EINVAL;
  5475. }
  5476. if (del_bulk) {
  5477. if (!dev->netdev_ops->ndo_mdb_del_bulk) {
  5478. NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
  5479. return -EOPNOTSUPP;
  5480. }
  5481. return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
  5482. }
  5483. if (!dev->netdev_ops->ndo_mdb_del) {
  5484. NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
  5485. return -EOPNOTSUPP;
  5486. }
  5487. return dev->netdev_ops->ndo_mdb_del(dev, tb, extack);
  5488. }
  5489. /* Process one rtnetlink message. */
  5490. static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
  5491. {
  5492. const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED);
  5493. rtnl_dumpit_func dumpit = cb->data;
  5494. int err;
  5495. /* Previous iteration have already finished, avoid calling->dumpit()
  5496. * again, it may not expect to be called after it reached the end.
  5497. */
  5498. if (!dumpit)
  5499. return 0;
  5500. if (needs_lock)
  5501. rtnl_lock();
  5502. err = dumpit(skb, cb);
  5503. if (needs_lock)
  5504. rtnl_unlock();
  5505. /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
  5506. * Some applications which parse netlink manually depend on this.
  5507. */
  5508. if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
  5509. if (err < 0 && err != -EMSGSIZE)
  5510. return err;
  5511. if (!err)
  5512. cb->data = NULL;
  5513. return skb->len;
  5514. }
  5515. return err;
  5516. }
  5517. static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
  5518. const struct nlmsghdr *nlh,
  5519. struct netlink_dump_control *control)
  5520. {
  5521. if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE ||
  5522. !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) {
  5523. WARN_ON(control->data);
  5524. control->data = control->dump;
  5525. control->dump = rtnl_dumpit;
  5526. }
  5527. return netlink_dump_start(ssk, skb, nlh, control);
  5528. }
  5529. static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
  5530. struct netlink_ext_ack *extack)
  5531. {
  5532. struct net *net = sock_net(skb->sk);
  5533. struct rtnl_link *link;
  5534. enum rtnl_kinds kind;
  5535. struct module *owner;
  5536. int err = -EOPNOTSUPP;
  5537. rtnl_doit_func doit;
  5538. unsigned int flags;
  5539. int family;
  5540. int type;
  5541. type = nlh->nlmsg_type;
  5542. if (type > RTM_MAX)
  5543. return -EOPNOTSUPP;
  5544. type -= RTM_BASE;
  5545. /* All the messages must have at least 1 byte length */
  5546. if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
  5547. return 0;
  5548. family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
  5549. kind = rtnl_msgtype_kind(type);
  5550. if (kind != RTNL_KIND_GET && !netlink_net_capable(skb, CAP_NET_ADMIN))
  5551. return -EPERM;
  5552. rcu_read_lock();
  5553. if (kind == RTNL_KIND_GET && (nlh->nlmsg_flags & NLM_F_DUMP)) {
  5554. struct sock *rtnl;
  5555. rtnl_dumpit_func dumpit;
  5556. u32 min_dump_alloc = 0;
  5557. link = rtnl_get_link(family, type);
  5558. if (!link || !link->dumpit) {
  5559. family = PF_UNSPEC;
  5560. link = rtnl_get_link(family, type);
  5561. if (!link || !link->dumpit)
  5562. goto err_unlock;
  5563. }
  5564. owner = link->owner;
  5565. dumpit = link->dumpit;
  5566. flags = link->flags;
  5567. if (type == RTM_GETLINK - RTM_BASE)
  5568. min_dump_alloc = rtnl_calcit(skb, nlh);
  5569. err = 0;
  5570. /* need to do this before rcu_read_unlock() */
  5571. if (!try_module_get(owner))
  5572. err = -EPROTONOSUPPORT;
  5573. rcu_read_unlock();
  5574. rtnl = net->rtnl;
  5575. if (err == 0) {
  5576. struct netlink_dump_control c = {
  5577. .dump = dumpit,
  5578. .min_dump_alloc = min_dump_alloc,
  5579. .module = owner,
  5580. .flags = flags,
  5581. };
  5582. err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
  5583. /* netlink_dump_start() will keep a reference on
  5584. * module if dump is still in progress.
  5585. */
  5586. module_put(owner);
  5587. }
  5588. return err;
  5589. }
  5590. link = rtnl_get_link(family, type);
  5591. if (!link || !link->doit) {
  5592. family = PF_UNSPEC;
  5593. link = rtnl_get_link(PF_UNSPEC, type);
  5594. if (!link || !link->doit)
  5595. goto out_unlock;
  5596. }
  5597. owner = link->owner;
  5598. if (!try_module_get(owner)) {
  5599. err = -EPROTONOSUPPORT;
  5600. goto out_unlock;
  5601. }
  5602. flags = link->flags;
  5603. if (kind == RTNL_KIND_DEL && (nlh->nlmsg_flags & NLM_F_BULK) &&
  5604. !(flags & RTNL_FLAG_BULK_DEL_SUPPORTED)) {
  5605. NL_SET_ERR_MSG(extack, "Bulk delete is not supported");
  5606. module_put(owner);
  5607. goto err_unlock;
  5608. }
  5609. if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
  5610. doit = link->doit;
  5611. rcu_read_unlock();
  5612. if (doit)
  5613. err = doit(skb, nlh, extack);
  5614. module_put(owner);
  5615. return err;
  5616. }
  5617. rcu_read_unlock();
  5618. rtnl_lock();
  5619. link = rtnl_get_link(family, type);
  5620. if (link && link->doit)
  5621. err = link->doit(skb, nlh, extack);
  5622. rtnl_unlock();
  5623. module_put(owner);
  5624. return err;
  5625. out_unlock:
  5626. rcu_read_unlock();
  5627. return err;
  5628. err_unlock:
  5629. rcu_read_unlock();
  5630. return -EOPNOTSUPP;
  5631. }
  5632. static void rtnetlink_rcv(struct sk_buff *skb)
  5633. {
  5634. netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
  5635. }
  5636. static int rtnetlink_bind(struct net *net, int group)
  5637. {
  5638. switch (group) {
  5639. case RTNLGRP_IPV4_MROUTE_R:
  5640. case RTNLGRP_IPV6_MROUTE_R:
  5641. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  5642. return -EPERM;
  5643. break;
  5644. }
  5645. return 0;
  5646. }
  5647. static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
  5648. {
  5649. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  5650. switch (event) {
  5651. case NETDEV_REBOOT:
  5652. case NETDEV_CHANGEMTU:
  5653. case NETDEV_CHANGEADDR:
  5654. case NETDEV_CHANGENAME:
  5655. case NETDEV_FEAT_CHANGE:
  5656. case NETDEV_BONDING_FAILOVER:
  5657. case NETDEV_POST_TYPE_CHANGE:
  5658. case NETDEV_NOTIFY_PEERS:
  5659. case NETDEV_CHANGEUPPER:
  5660. case NETDEV_RESEND_IGMP:
  5661. case NETDEV_CHANGEINFODATA:
  5662. case NETDEV_CHANGELOWERSTATE:
  5663. case NETDEV_CHANGE_TX_QUEUE_LEN:
  5664. rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
  5665. GFP_KERNEL, NULL, 0, 0, NULL);
  5666. break;
  5667. default:
  5668. break;
  5669. }
  5670. return NOTIFY_DONE;
  5671. }
  5672. static struct notifier_block rtnetlink_dev_notifier = {
  5673. .notifier_call = rtnetlink_event,
  5674. };
  5675. static int __net_init rtnetlink_net_init(struct net *net)
  5676. {
  5677. struct sock *sk;
  5678. struct netlink_kernel_cfg cfg = {
  5679. .groups = RTNLGRP_MAX,
  5680. .input = rtnetlink_rcv,
  5681. .flags = NL_CFG_F_NONROOT_RECV,
  5682. .bind = rtnetlink_bind,
  5683. };
  5684. sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
  5685. if (!sk)
  5686. return -ENOMEM;
  5687. net->rtnl = sk;
  5688. return 0;
  5689. }
  5690. static void __net_exit rtnetlink_net_exit(struct net *net)
  5691. {
  5692. netlink_kernel_release(net->rtnl);
  5693. net->rtnl = NULL;
  5694. }
  5695. static struct pernet_operations rtnetlink_net_ops = {
  5696. .init = rtnetlink_net_init,
  5697. .exit = rtnetlink_net_exit,
  5698. };
  5699. void __init rtnetlink_init(void)
  5700. {
  5701. if (register_pernet_subsys(&rtnetlink_net_ops))
  5702. panic("rtnetlink_init: cannot initialize rtnetlink\n");
  5703. register_netdevice_notifier(&rtnetlink_dev_notifier);
  5704. rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
  5705. rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
  5706. rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
  5707. rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
  5708. rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
  5709. rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0);
  5710. rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0);
  5711. rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0);
  5712. rtnl_register(PF_UNSPEC, RTM_NEWLINKPROP, rtnl_newlinkprop, NULL, 0);
  5713. rtnl_register(PF_UNSPEC, RTM_DELLINKPROP, rtnl_dellinkprop, NULL, 0);
  5714. rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
  5715. rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL,
  5716. RTNL_FLAG_BULK_DEL_SUPPORTED);
  5717. rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
  5718. rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
  5719. rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
  5720. rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0);
  5721. rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump,
  5722. 0);
  5723. rtnl_register(PF_UNSPEC, RTM_SETSTATS, rtnl_stats_set, NULL, 0);
  5724. rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
  5725. rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
  5726. rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
  5727. RTNL_FLAG_BULK_DEL_SUPPORTED);
  5728. }