trace_events_hist.c 172 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace_events_hist - trace event hist triggers
  4. *
  5. * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/security.h>
  10. #include <linux/mutex.h>
  11. #include <linux/slab.h>
  12. #include <linux/stacktrace.h>
  13. #include <linux/rculist.h>
  14. #include <linux/tracefs.h>
  15. /* for gfp flag names */
  16. #include <linux/trace_events.h>
  17. #include <trace/events/mmflags.h>
  18. #include "tracing_map.h"
  19. #include "trace_synth.h"
  20. #define ERRORS \
  21. C(NONE, "No error"), \
  22. C(DUPLICATE_VAR, "Variable already defined"), \
  23. C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
  24. C(TOO_MANY_VARS, "Too many variables defined"), \
  25. C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
  26. C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
  27. C(TRIGGER_EEXIST, "Hist trigger already exists"), \
  28. C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
  29. C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
  30. C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
  31. C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
  32. C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
  33. C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
  34. C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
  35. C(HIST_NOT_FOUND, "Matching event histogram not found"), \
  36. C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
  37. C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
  38. C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
  39. C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
  40. C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
  41. C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
  42. C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
  43. C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
  44. C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
  45. C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
  46. C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
  47. C(TOO_MANY_PARAMS, "Too many action params"), \
  48. C(PARAM_NOT_FOUND, "Couldn't find param"), \
  49. C(INVALID_PARAM, "Invalid action param"), \
  50. C(ACTION_NOT_FOUND, "No action found"), \
  51. C(NO_SAVE_PARAMS, "No params found for save()"), \
  52. C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
  53. C(ACTION_MISMATCH, "Handler doesn't support action"), \
  54. C(NO_CLOSING_PAREN, "No closing paren found"), \
  55. C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
  56. C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
  57. C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
  58. C(VAR_NOT_FOUND, "Couldn't find variable"), \
  59. C(FIELD_NOT_FOUND, "Couldn't find field"), \
  60. C(EMPTY_ASSIGNMENT, "Empty assignment"), \
  61. C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
  62. C(EMPTY_SORT_FIELD, "Empty sort field"), \
  63. C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
  64. C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
  65. C(INVALID_STR_OPERAND, "String type can not be an operand in expression"), \
  66. C(EXPECT_NUMBER, "Expecting numeric literal"), \
  67. C(UNARY_MINUS_SUBEXPR, "Unary minus not supported in sub-expressions"), \
  68. C(DIVISION_BY_ZERO, "Division by zero"), \
  69. C(NEED_NOHC_VAL, "Non-hitcount value is required for 'nohitcount'"),
  70. #undef C
  71. #define C(a, b) HIST_ERR_##a
  72. enum { ERRORS };
  73. #undef C
  74. #define C(a, b) b
  75. static const char *err_text[] = { ERRORS };
  76. struct hist_field;
  77. typedef u64 (*hist_field_fn_t) (struct hist_field *field,
  78. struct tracing_map_elt *elt,
  79. struct trace_buffer *buffer,
  80. struct ring_buffer_event *rbe,
  81. void *event);
  82. #define HIST_FIELD_OPERANDS_MAX 2
  83. #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
  84. #define HIST_ACTIONS_MAX 8
  85. #define HIST_CONST_DIGITS_MAX 21
  86. #define HIST_DIV_SHIFT 20 /* For optimizing division by constants */
  87. enum field_op_id {
  88. FIELD_OP_NONE,
  89. FIELD_OP_PLUS,
  90. FIELD_OP_MINUS,
  91. FIELD_OP_UNARY_MINUS,
  92. FIELD_OP_DIV,
  93. FIELD_OP_MULT,
  94. };
  95. enum hist_field_fn {
  96. HIST_FIELD_FN_NOP,
  97. HIST_FIELD_FN_VAR_REF,
  98. HIST_FIELD_FN_COUNTER,
  99. HIST_FIELD_FN_CONST,
  100. HIST_FIELD_FN_LOG2,
  101. HIST_FIELD_FN_BUCKET,
  102. HIST_FIELD_FN_TIMESTAMP,
  103. HIST_FIELD_FN_CPU,
  104. HIST_FIELD_FN_STRING,
  105. HIST_FIELD_FN_DYNSTRING,
  106. HIST_FIELD_FN_RELDYNSTRING,
  107. HIST_FIELD_FN_PSTRING,
  108. HIST_FIELD_FN_S64,
  109. HIST_FIELD_FN_U64,
  110. HIST_FIELD_FN_S32,
  111. HIST_FIELD_FN_U32,
  112. HIST_FIELD_FN_S16,
  113. HIST_FIELD_FN_U16,
  114. HIST_FIELD_FN_S8,
  115. HIST_FIELD_FN_U8,
  116. HIST_FIELD_FN_UMINUS,
  117. HIST_FIELD_FN_MINUS,
  118. HIST_FIELD_FN_PLUS,
  119. HIST_FIELD_FN_DIV,
  120. HIST_FIELD_FN_MULT,
  121. HIST_FIELD_FN_DIV_POWER2,
  122. HIST_FIELD_FN_DIV_NOT_POWER2,
  123. HIST_FIELD_FN_DIV_MULT_SHIFT,
  124. HIST_FIELD_FN_EXECNAME,
  125. HIST_FIELD_FN_STACK,
  126. };
  127. /*
  128. * A hist_var (histogram variable) contains variable information for
  129. * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
  130. * flag set. A hist_var has a variable name e.g. ts0, and is
  131. * associated with a given histogram trigger, as specified by
  132. * hist_data. The hist_var idx is the unique index assigned to the
  133. * variable by the hist trigger's tracing_map. The idx is what is
  134. * used to set a variable's value and, by a variable reference, to
  135. * retrieve it.
  136. */
  137. struct hist_var {
  138. char *name;
  139. struct hist_trigger_data *hist_data;
  140. unsigned int idx;
  141. };
  142. struct hist_field {
  143. struct ftrace_event_field *field;
  144. unsigned long flags;
  145. unsigned long buckets;
  146. const char *type;
  147. struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
  148. struct hist_trigger_data *hist_data;
  149. enum hist_field_fn fn_num;
  150. unsigned int ref;
  151. unsigned int size;
  152. unsigned int offset;
  153. unsigned int is_signed;
  154. /*
  155. * Variable fields contain variable-specific info in var.
  156. */
  157. struct hist_var var;
  158. enum field_op_id operator;
  159. char *system;
  160. char *event_name;
  161. /*
  162. * The name field is used for EXPR and VAR_REF fields. VAR
  163. * fields contain the variable name in var.name.
  164. */
  165. char *name;
  166. /*
  167. * When a histogram trigger is hit, if it has any references
  168. * to variables, the values of those variables are collected
  169. * into a var_ref_vals array by resolve_var_refs(). The
  170. * current value of each variable is read from the tracing_map
  171. * using the hist field's hist_var.idx and entered into the
  172. * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
  173. */
  174. unsigned int var_ref_idx;
  175. bool read_once;
  176. unsigned int var_str_idx;
  177. /* Numeric literals are represented as u64 */
  178. u64 constant;
  179. /* Used to optimize division by constants */
  180. u64 div_multiplier;
  181. };
  182. static u64 hist_fn_call(struct hist_field *hist_field,
  183. struct tracing_map_elt *elt,
  184. struct trace_buffer *buffer,
  185. struct ring_buffer_event *rbe,
  186. void *event);
  187. static u64 hist_field_const(struct hist_field *field,
  188. struct tracing_map_elt *elt,
  189. struct trace_buffer *buffer,
  190. struct ring_buffer_event *rbe,
  191. void *event)
  192. {
  193. return field->constant;
  194. }
  195. static u64 hist_field_counter(struct hist_field *field,
  196. struct tracing_map_elt *elt,
  197. struct trace_buffer *buffer,
  198. struct ring_buffer_event *rbe,
  199. void *event)
  200. {
  201. return 1;
  202. }
  203. static u64 hist_field_string(struct hist_field *hist_field,
  204. struct tracing_map_elt *elt,
  205. struct trace_buffer *buffer,
  206. struct ring_buffer_event *rbe,
  207. void *event)
  208. {
  209. char *addr = (char *)(event + hist_field->field->offset);
  210. return (u64)(unsigned long)addr;
  211. }
  212. static u64 hist_field_dynstring(struct hist_field *hist_field,
  213. struct tracing_map_elt *elt,
  214. struct trace_buffer *buffer,
  215. struct ring_buffer_event *rbe,
  216. void *event)
  217. {
  218. u32 str_item = *(u32 *)(event + hist_field->field->offset);
  219. int str_loc = str_item & 0xffff;
  220. char *addr = (char *)(event + str_loc);
  221. return (u64)(unsigned long)addr;
  222. }
  223. static u64 hist_field_reldynstring(struct hist_field *hist_field,
  224. struct tracing_map_elt *elt,
  225. struct trace_buffer *buffer,
  226. struct ring_buffer_event *rbe,
  227. void *event)
  228. {
  229. u32 *item = event + hist_field->field->offset;
  230. u32 str_item = *item;
  231. int str_loc = str_item & 0xffff;
  232. char *addr = (char *)&item[1] + str_loc;
  233. return (u64)(unsigned long)addr;
  234. }
  235. static u64 hist_field_pstring(struct hist_field *hist_field,
  236. struct tracing_map_elt *elt,
  237. struct trace_buffer *buffer,
  238. struct ring_buffer_event *rbe,
  239. void *event)
  240. {
  241. char **addr = (char **)(event + hist_field->field->offset);
  242. return (u64)(unsigned long)*addr;
  243. }
  244. static u64 hist_field_log2(struct hist_field *hist_field,
  245. struct tracing_map_elt *elt,
  246. struct trace_buffer *buffer,
  247. struct ring_buffer_event *rbe,
  248. void *event)
  249. {
  250. struct hist_field *operand = hist_field->operands[0];
  251. u64 val = hist_fn_call(operand, elt, buffer, rbe, event);
  252. return (u64) ilog2(roundup_pow_of_two(val));
  253. }
  254. static u64 hist_field_bucket(struct hist_field *hist_field,
  255. struct tracing_map_elt *elt,
  256. struct trace_buffer *buffer,
  257. struct ring_buffer_event *rbe,
  258. void *event)
  259. {
  260. struct hist_field *operand = hist_field->operands[0];
  261. unsigned long buckets = hist_field->buckets;
  262. u64 val = hist_fn_call(operand, elt, buffer, rbe, event);
  263. if (WARN_ON_ONCE(!buckets))
  264. return val;
  265. if (val >= LONG_MAX)
  266. val = div64_ul(val, buckets);
  267. else
  268. val = (u64)((unsigned long)val / buckets);
  269. return val * buckets;
  270. }
  271. static u64 hist_field_plus(struct hist_field *hist_field,
  272. struct tracing_map_elt *elt,
  273. struct trace_buffer *buffer,
  274. struct ring_buffer_event *rbe,
  275. void *event)
  276. {
  277. struct hist_field *operand1 = hist_field->operands[0];
  278. struct hist_field *operand2 = hist_field->operands[1];
  279. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  280. u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
  281. return val1 + val2;
  282. }
  283. static u64 hist_field_minus(struct hist_field *hist_field,
  284. struct tracing_map_elt *elt,
  285. struct trace_buffer *buffer,
  286. struct ring_buffer_event *rbe,
  287. void *event)
  288. {
  289. struct hist_field *operand1 = hist_field->operands[0];
  290. struct hist_field *operand2 = hist_field->operands[1];
  291. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  292. u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
  293. return val1 - val2;
  294. }
  295. static u64 hist_field_div(struct hist_field *hist_field,
  296. struct tracing_map_elt *elt,
  297. struct trace_buffer *buffer,
  298. struct ring_buffer_event *rbe,
  299. void *event)
  300. {
  301. struct hist_field *operand1 = hist_field->operands[0];
  302. struct hist_field *operand2 = hist_field->operands[1];
  303. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  304. u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
  305. /* Return -1 for the undefined case */
  306. if (!val2)
  307. return -1;
  308. /* Use shift if the divisor is a power of 2 */
  309. if (!(val2 & (val2 - 1)))
  310. return val1 >> __ffs64(val2);
  311. return div64_u64(val1, val2);
  312. }
  313. static u64 div_by_power_of_two(struct hist_field *hist_field,
  314. struct tracing_map_elt *elt,
  315. struct trace_buffer *buffer,
  316. struct ring_buffer_event *rbe,
  317. void *event)
  318. {
  319. struct hist_field *operand1 = hist_field->operands[0];
  320. struct hist_field *operand2 = hist_field->operands[1];
  321. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  322. return val1 >> __ffs64(operand2->constant);
  323. }
  324. static u64 div_by_not_power_of_two(struct hist_field *hist_field,
  325. struct tracing_map_elt *elt,
  326. struct trace_buffer *buffer,
  327. struct ring_buffer_event *rbe,
  328. void *event)
  329. {
  330. struct hist_field *operand1 = hist_field->operands[0];
  331. struct hist_field *operand2 = hist_field->operands[1];
  332. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  333. return div64_u64(val1, operand2->constant);
  334. }
  335. static u64 div_by_mult_and_shift(struct hist_field *hist_field,
  336. struct tracing_map_elt *elt,
  337. struct trace_buffer *buffer,
  338. struct ring_buffer_event *rbe,
  339. void *event)
  340. {
  341. struct hist_field *operand1 = hist_field->operands[0];
  342. struct hist_field *operand2 = hist_field->operands[1];
  343. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  344. /*
  345. * If the divisor is a constant, do a multiplication and shift instead.
  346. *
  347. * Choose Z = some power of 2. If Y <= Z, then:
  348. * X / Y = (X * (Z / Y)) / Z
  349. *
  350. * (Z / Y) is a constant (mult) which is calculated at parse time, so:
  351. * X / Y = (X * mult) / Z
  352. *
  353. * The division by Z can be replaced by a shift since Z is a power of 2:
  354. * X / Y = (X * mult) >> HIST_DIV_SHIFT
  355. *
  356. * As long, as X < Z the results will not be off by more than 1.
  357. */
  358. if (val1 < (1 << HIST_DIV_SHIFT)) {
  359. u64 mult = operand2->div_multiplier;
  360. return (val1 * mult + ((1 << HIST_DIV_SHIFT) - 1)) >> HIST_DIV_SHIFT;
  361. }
  362. return div64_u64(val1, operand2->constant);
  363. }
  364. static u64 hist_field_mult(struct hist_field *hist_field,
  365. struct tracing_map_elt *elt,
  366. struct trace_buffer *buffer,
  367. struct ring_buffer_event *rbe,
  368. void *event)
  369. {
  370. struct hist_field *operand1 = hist_field->operands[0];
  371. struct hist_field *operand2 = hist_field->operands[1];
  372. u64 val1 = hist_fn_call(operand1, elt, buffer, rbe, event);
  373. u64 val2 = hist_fn_call(operand2, elt, buffer, rbe, event);
  374. return val1 * val2;
  375. }
  376. static u64 hist_field_unary_minus(struct hist_field *hist_field,
  377. struct tracing_map_elt *elt,
  378. struct trace_buffer *buffer,
  379. struct ring_buffer_event *rbe,
  380. void *event)
  381. {
  382. struct hist_field *operand = hist_field->operands[0];
  383. s64 sval = (s64)hist_fn_call(operand, elt, buffer, rbe, event);
  384. u64 val = (u64)-sval;
  385. return val;
  386. }
  387. #define DEFINE_HIST_FIELD_FN(type) \
  388. static u64 hist_field_##type(struct hist_field *hist_field, \
  389. struct tracing_map_elt *elt, \
  390. struct trace_buffer *buffer, \
  391. struct ring_buffer_event *rbe, \
  392. void *event) \
  393. { \
  394. type *addr = (type *)(event + hist_field->field->offset); \
  395. \
  396. return (u64)(unsigned long)*addr; \
  397. }
  398. DEFINE_HIST_FIELD_FN(s64);
  399. DEFINE_HIST_FIELD_FN(u64);
  400. DEFINE_HIST_FIELD_FN(s32);
  401. DEFINE_HIST_FIELD_FN(u32);
  402. DEFINE_HIST_FIELD_FN(s16);
  403. DEFINE_HIST_FIELD_FN(u16);
  404. DEFINE_HIST_FIELD_FN(s8);
  405. DEFINE_HIST_FIELD_FN(u8);
  406. #define for_each_hist_field(i, hist_data) \
  407. for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
  408. #define for_each_hist_val_field(i, hist_data) \
  409. for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
  410. #define for_each_hist_key_field(i, hist_data) \
  411. for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
  412. #define HITCOUNT_IDX 0
  413. #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
  414. enum hist_field_flags {
  415. HIST_FIELD_FL_HITCOUNT = 1 << 0,
  416. HIST_FIELD_FL_KEY = 1 << 1,
  417. HIST_FIELD_FL_STRING = 1 << 2,
  418. HIST_FIELD_FL_HEX = 1 << 3,
  419. HIST_FIELD_FL_SYM = 1 << 4,
  420. HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
  421. HIST_FIELD_FL_EXECNAME = 1 << 6,
  422. HIST_FIELD_FL_SYSCALL = 1 << 7,
  423. HIST_FIELD_FL_STACKTRACE = 1 << 8,
  424. HIST_FIELD_FL_LOG2 = 1 << 9,
  425. HIST_FIELD_FL_TIMESTAMP = 1 << 10,
  426. HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
  427. HIST_FIELD_FL_VAR = 1 << 12,
  428. HIST_FIELD_FL_EXPR = 1 << 13,
  429. HIST_FIELD_FL_VAR_REF = 1 << 14,
  430. HIST_FIELD_FL_CPU = 1 << 15,
  431. HIST_FIELD_FL_ALIAS = 1 << 16,
  432. HIST_FIELD_FL_BUCKET = 1 << 17,
  433. HIST_FIELD_FL_CONST = 1 << 18,
  434. HIST_FIELD_FL_PERCENT = 1 << 19,
  435. HIST_FIELD_FL_GRAPH = 1 << 20,
  436. };
  437. struct var_defs {
  438. unsigned int n_vars;
  439. char *name[TRACING_MAP_VARS_MAX];
  440. char *expr[TRACING_MAP_VARS_MAX];
  441. };
  442. struct hist_trigger_attrs {
  443. char *keys_str;
  444. char *vals_str;
  445. char *sort_key_str;
  446. char *name;
  447. char *clock;
  448. bool pause;
  449. bool cont;
  450. bool clear;
  451. bool ts_in_usecs;
  452. bool no_hitcount;
  453. unsigned int map_bits;
  454. char *assignment_str[TRACING_MAP_VARS_MAX];
  455. unsigned int n_assignments;
  456. char *action_str[HIST_ACTIONS_MAX];
  457. unsigned int n_actions;
  458. struct var_defs var_defs;
  459. };
  460. struct field_var {
  461. struct hist_field *var;
  462. struct hist_field *val;
  463. };
  464. struct field_var_hist {
  465. struct hist_trigger_data *hist_data;
  466. char *cmd;
  467. };
  468. struct hist_trigger_data {
  469. struct hist_field *fields[HIST_FIELDS_MAX];
  470. unsigned int n_vals;
  471. unsigned int n_keys;
  472. unsigned int n_fields;
  473. unsigned int n_vars;
  474. unsigned int n_var_str;
  475. unsigned int key_size;
  476. struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
  477. unsigned int n_sort_keys;
  478. struct trace_event_file *event_file;
  479. struct hist_trigger_attrs *attrs;
  480. struct tracing_map *map;
  481. bool enable_timestamps;
  482. bool remove;
  483. struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
  484. unsigned int n_var_refs;
  485. struct action_data *actions[HIST_ACTIONS_MAX];
  486. unsigned int n_actions;
  487. struct field_var *field_vars[SYNTH_FIELDS_MAX];
  488. unsigned int n_field_vars;
  489. unsigned int n_field_var_str;
  490. struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
  491. unsigned int n_field_var_hists;
  492. struct field_var *save_vars[SYNTH_FIELDS_MAX];
  493. unsigned int n_save_vars;
  494. unsigned int n_save_var_str;
  495. };
  496. struct action_data;
  497. typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
  498. struct tracing_map_elt *elt,
  499. struct trace_buffer *buffer, void *rec,
  500. struct ring_buffer_event *rbe, void *key,
  501. struct action_data *data, u64 *var_ref_vals);
  502. typedef bool (*check_track_val_fn_t) (u64 track_val, u64 var_val);
  503. enum handler_id {
  504. HANDLER_ONMATCH = 1,
  505. HANDLER_ONMAX,
  506. HANDLER_ONCHANGE,
  507. };
  508. enum action_id {
  509. ACTION_SAVE = 1,
  510. ACTION_TRACE,
  511. ACTION_SNAPSHOT,
  512. };
  513. struct action_data {
  514. enum handler_id handler;
  515. enum action_id action;
  516. char *action_name;
  517. action_fn_t fn;
  518. unsigned int n_params;
  519. char *params[SYNTH_FIELDS_MAX];
  520. /*
  521. * When a histogram trigger is hit, the values of any
  522. * references to variables, including variables being passed
  523. * as parameters to synthetic events, are collected into a
  524. * var_ref_vals array. This var_ref_idx array is an array of
  525. * indices into the var_ref_vals array, one for each synthetic
  526. * event param, and is passed to the synthetic event
  527. * invocation.
  528. */
  529. unsigned int var_ref_idx[SYNTH_FIELDS_MAX];
  530. struct synth_event *synth_event;
  531. bool use_trace_keyword;
  532. char *synth_event_name;
  533. union {
  534. struct {
  535. char *event;
  536. char *event_system;
  537. } match_data;
  538. struct {
  539. /*
  540. * var_str contains the $-unstripped variable
  541. * name referenced by var_ref, and used when
  542. * printing the action. Because var_ref
  543. * creation is deferred to create_actions(),
  544. * we need a per-action way to save it until
  545. * then, thus var_str.
  546. */
  547. char *var_str;
  548. /*
  549. * var_ref refers to the variable being
  550. * tracked e.g onmax($var).
  551. */
  552. struct hist_field *var_ref;
  553. /*
  554. * track_var contains the 'invisible' tracking
  555. * variable created to keep the current
  556. * e.g. max value.
  557. */
  558. struct hist_field *track_var;
  559. check_track_val_fn_t check_val;
  560. action_fn_t save_data;
  561. } track_data;
  562. };
  563. };
  564. struct track_data {
  565. u64 track_val;
  566. bool updated;
  567. unsigned int key_len;
  568. void *key;
  569. struct tracing_map_elt elt;
  570. struct action_data *action_data;
  571. struct hist_trigger_data *hist_data;
  572. };
  573. struct hist_elt_data {
  574. char *comm;
  575. u64 *var_ref_vals;
  576. char **field_var_str;
  577. int n_field_var_str;
  578. };
  579. struct snapshot_context {
  580. struct tracing_map_elt *elt;
  581. void *key;
  582. };
  583. /*
  584. * Returns the specific division function to use if the divisor
  585. * is constant. This avoids extra branches when the trigger is hit.
  586. */
  587. static enum hist_field_fn hist_field_get_div_fn(struct hist_field *divisor)
  588. {
  589. u64 div = divisor->constant;
  590. if (!(div & (div - 1)))
  591. return HIST_FIELD_FN_DIV_POWER2;
  592. /* If the divisor is too large, do a regular division */
  593. if (div > (1 << HIST_DIV_SHIFT))
  594. return HIST_FIELD_FN_DIV_NOT_POWER2;
  595. divisor->div_multiplier = div64_u64((u64)(1 << HIST_DIV_SHIFT), div);
  596. return HIST_FIELD_FN_DIV_MULT_SHIFT;
  597. }
  598. static void track_data_free(struct track_data *track_data)
  599. {
  600. struct hist_elt_data *elt_data;
  601. if (!track_data)
  602. return;
  603. kfree(track_data->key);
  604. elt_data = track_data->elt.private_data;
  605. if (elt_data) {
  606. kfree(elt_data->comm);
  607. kfree(elt_data);
  608. }
  609. kfree(track_data);
  610. }
  611. static struct track_data *track_data_alloc(unsigned int key_len,
  612. struct action_data *action_data,
  613. struct hist_trigger_data *hist_data)
  614. {
  615. struct track_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
  616. struct hist_elt_data *elt_data;
  617. if (!data)
  618. return ERR_PTR(-ENOMEM);
  619. data->key = kzalloc(key_len, GFP_KERNEL);
  620. if (!data->key) {
  621. track_data_free(data);
  622. return ERR_PTR(-ENOMEM);
  623. }
  624. data->key_len = key_len;
  625. data->action_data = action_data;
  626. data->hist_data = hist_data;
  627. elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
  628. if (!elt_data) {
  629. track_data_free(data);
  630. return ERR_PTR(-ENOMEM);
  631. }
  632. data->elt.private_data = elt_data;
  633. elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
  634. if (!elt_data->comm) {
  635. track_data_free(data);
  636. return ERR_PTR(-ENOMEM);
  637. }
  638. return data;
  639. }
  640. #define HIST_PREFIX "hist:"
  641. static char *last_cmd;
  642. static char last_cmd_loc[MAX_FILTER_STR_VAL];
  643. static int errpos(char *str)
  644. {
  645. if (!str || !last_cmd)
  646. return 0;
  647. return err_pos(last_cmd, str);
  648. }
  649. static void last_cmd_set(struct trace_event_file *file, char *str)
  650. {
  651. const char *system = NULL, *name = NULL;
  652. struct trace_event_call *call;
  653. if (!str)
  654. return;
  655. kfree(last_cmd);
  656. last_cmd = kasprintf(GFP_KERNEL, HIST_PREFIX "%s", str);
  657. if (!last_cmd)
  658. return;
  659. if (file) {
  660. call = file->event_call;
  661. system = call->class->system;
  662. if (system) {
  663. name = trace_event_name(call);
  664. if (!name)
  665. system = NULL;
  666. }
  667. }
  668. if (system)
  669. snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, HIST_PREFIX "%s:%s", system, name);
  670. }
  671. static void hist_err(struct trace_array *tr, u8 err_type, u16 err_pos)
  672. {
  673. if (!last_cmd)
  674. return;
  675. tracing_log_err(tr, last_cmd_loc, last_cmd, err_text,
  676. err_type, err_pos);
  677. }
  678. static void hist_err_clear(void)
  679. {
  680. if (last_cmd)
  681. last_cmd[0] = '\0';
  682. last_cmd_loc[0] = '\0';
  683. }
  684. typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
  685. unsigned int *var_ref_idx);
  686. static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
  687. unsigned int *var_ref_idx)
  688. {
  689. struct tracepoint *tp = event->tp;
  690. if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
  691. struct tracepoint_func *probe_func_ptr;
  692. synth_probe_func_t probe_func;
  693. void *__data;
  694. if (!(cpu_online(raw_smp_processor_id())))
  695. return;
  696. probe_func_ptr = rcu_dereference_sched((tp)->funcs);
  697. if (probe_func_ptr) {
  698. do {
  699. probe_func = probe_func_ptr->func;
  700. __data = probe_func_ptr->data;
  701. probe_func(__data, var_ref_vals, var_ref_idx);
  702. } while ((++probe_func_ptr)->func);
  703. }
  704. }
  705. }
  706. static void action_trace(struct hist_trigger_data *hist_data,
  707. struct tracing_map_elt *elt,
  708. struct trace_buffer *buffer, void *rec,
  709. struct ring_buffer_event *rbe, void *key,
  710. struct action_data *data, u64 *var_ref_vals)
  711. {
  712. struct synth_event *event = data->synth_event;
  713. trace_synth(event, var_ref_vals, data->var_ref_idx);
  714. }
  715. struct hist_var_data {
  716. struct list_head list;
  717. struct hist_trigger_data *hist_data;
  718. };
  719. static u64 hist_field_timestamp(struct hist_field *hist_field,
  720. struct tracing_map_elt *elt,
  721. struct trace_buffer *buffer,
  722. struct ring_buffer_event *rbe,
  723. void *event)
  724. {
  725. struct hist_trigger_data *hist_data = hist_field->hist_data;
  726. struct trace_array *tr = hist_data->event_file->tr;
  727. u64 ts = ring_buffer_event_time_stamp(buffer, rbe);
  728. if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
  729. ts = ns2usecs(ts);
  730. return ts;
  731. }
  732. static u64 hist_field_cpu(struct hist_field *hist_field,
  733. struct tracing_map_elt *elt,
  734. struct trace_buffer *buffer,
  735. struct ring_buffer_event *rbe,
  736. void *event)
  737. {
  738. int cpu = smp_processor_id();
  739. return cpu;
  740. }
  741. /**
  742. * check_field_for_var_ref - Check if a VAR_REF field references a variable
  743. * @hist_field: The VAR_REF field to check
  744. * @var_data: The hist trigger that owns the variable
  745. * @var_idx: The trigger variable identifier
  746. *
  747. * Check the given VAR_REF field to see whether or not it references
  748. * the given variable associated with the given trigger.
  749. *
  750. * Return: The VAR_REF field if it does reference the variable, NULL if not
  751. */
  752. static struct hist_field *
  753. check_field_for_var_ref(struct hist_field *hist_field,
  754. struct hist_trigger_data *var_data,
  755. unsigned int var_idx)
  756. {
  757. WARN_ON(!(hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF));
  758. if (hist_field && hist_field->var.idx == var_idx &&
  759. hist_field->var.hist_data == var_data)
  760. return hist_field;
  761. return NULL;
  762. }
  763. /**
  764. * find_var_ref - Check if a trigger has a reference to a trigger variable
  765. * @hist_data: The hist trigger that might have a reference to the variable
  766. * @var_data: The hist trigger that owns the variable
  767. * @var_idx: The trigger variable identifier
  768. *
  769. * Check the list of var_refs[] on the first hist trigger to see
  770. * whether any of them are references to the variable on the second
  771. * trigger.
  772. *
  773. * Return: The VAR_REF field referencing the variable if so, NULL if not
  774. */
  775. static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
  776. struct hist_trigger_data *var_data,
  777. unsigned int var_idx)
  778. {
  779. struct hist_field *hist_field;
  780. unsigned int i;
  781. for (i = 0; i < hist_data->n_var_refs; i++) {
  782. hist_field = hist_data->var_refs[i];
  783. if (check_field_for_var_ref(hist_field, var_data, var_idx))
  784. return hist_field;
  785. }
  786. return NULL;
  787. }
  788. /**
  789. * find_any_var_ref - Check if there is a reference to a given trigger variable
  790. * @hist_data: The hist trigger
  791. * @var_idx: The trigger variable identifier
  792. *
  793. * Check to see whether the given variable is currently referenced by
  794. * any other trigger.
  795. *
  796. * The trigger the variable is defined on is explicitly excluded - the
  797. * assumption being that a self-reference doesn't prevent a trigger
  798. * from being removed.
  799. *
  800. * Return: The VAR_REF field referencing the variable if so, NULL if not
  801. */
  802. static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
  803. unsigned int var_idx)
  804. {
  805. struct trace_array *tr = hist_data->event_file->tr;
  806. struct hist_field *found = NULL;
  807. struct hist_var_data *var_data;
  808. list_for_each_entry(var_data, &tr->hist_vars, list) {
  809. if (var_data->hist_data == hist_data)
  810. continue;
  811. found = find_var_ref(var_data->hist_data, hist_data, var_idx);
  812. if (found)
  813. break;
  814. }
  815. return found;
  816. }
  817. /**
  818. * check_var_refs - Check if there is a reference to any of trigger's variables
  819. * @hist_data: The hist trigger
  820. *
  821. * A trigger can define one or more variables. If any one of them is
  822. * currently referenced by any other trigger, this function will
  823. * determine that.
  824. *
  825. * Typically used to determine whether or not a trigger can be removed
  826. * - if there are any references to a trigger's variables, it cannot.
  827. *
  828. * Return: True if there is a reference to any of trigger's variables
  829. */
  830. static bool check_var_refs(struct hist_trigger_data *hist_data)
  831. {
  832. struct hist_field *field;
  833. bool found = false;
  834. int i;
  835. for_each_hist_field(i, hist_data) {
  836. field = hist_data->fields[i];
  837. if (field && field->flags & HIST_FIELD_FL_VAR) {
  838. if (find_any_var_ref(hist_data, field->var.idx)) {
  839. found = true;
  840. break;
  841. }
  842. }
  843. }
  844. return found;
  845. }
  846. static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
  847. {
  848. struct trace_array *tr = hist_data->event_file->tr;
  849. struct hist_var_data *var_data, *found = NULL;
  850. list_for_each_entry(var_data, &tr->hist_vars, list) {
  851. if (var_data->hist_data == hist_data) {
  852. found = var_data;
  853. break;
  854. }
  855. }
  856. return found;
  857. }
  858. static bool field_has_hist_vars(struct hist_field *hist_field,
  859. unsigned int level)
  860. {
  861. int i;
  862. if (level > 3)
  863. return false;
  864. if (!hist_field)
  865. return false;
  866. if (hist_field->flags & HIST_FIELD_FL_VAR ||
  867. hist_field->flags & HIST_FIELD_FL_VAR_REF)
  868. return true;
  869. for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
  870. struct hist_field *operand;
  871. operand = hist_field->operands[i];
  872. if (field_has_hist_vars(operand, level + 1))
  873. return true;
  874. }
  875. return false;
  876. }
  877. static bool has_hist_vars(struct hist_trigger_data *hist_data)
  878. {
  879. struct hist_field *hist_field;
  880. int i;
  881. for_each_hist_field(i, hist_data) {
  882. hist_field = hist_data->fields[i];
  883. if (field_has_hist_vars(hist_field, 0))
  884. return true;
  885. }
  886. return false;
  887. }
  888. static int save_hist_vars(struct hist_trigger_data *hist_data)
  889. {
  890. struct trace_array *tr = hist_data->event_file->tr;
  891. struct hist_var_data *var_data;
  892. var_data = find_hist_vars(hist_data);
  893. if (var_data)
  894. return 0;
  895. if (tracing_check_open_get_tr(tr))
  896. return -ENODEV;
  897. var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
  898. if (!var_data) {
  899. trace_array_put(tr);
  900. return -ENOMEM;
  901. }
  902. var_data->hist_data = hist_data;
  903. list_add(&var_data->list, &tr->hist_vars);
  904. return 0;
  905. }
  906. static void remove_hist_vars(struct hist_trigger_data *hist_data)
  907. {
  908. struct trace_array *tr = hist_data->event_file->tr;
  909. struct hist_var_data *var_data;
  910. var_data = find_hist_vars(hist_data);
  911. if (!var_data)
  912. return;
  913. if (WARN_ON(check_var_refs(hist_data)))
  914. return;
  915. list_del(&var_data->list);
  916. kfree(var_data);
  917. trace_array_put(tr);
  918. }
  919. static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
  920. const char *var_name)
  921. {
  922. struct hist_field *hist_field, *found = NULL;
  923. int i;
  924. for_each_hist_field(i, hist_data) {
  925. hist_field = hist_data->fields[i];
  926. if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
  927. strcmp(hist_field->var.name, var_name) == 0) {
  928. found = hist_field;
  929. break;
  930. }
  931. }
  932. return found;
  933. }
  934. static struct hist_field *find_var(struct hist_trigger_data *hist_data,
  935. struct trace_event_file *file,
  936. const char *var_name)
  937. {
  938. struct hist_trigger_data *test_data;
  939. struct event_trigger_data *test;
  940. struct hist_field *hist_field;
  941. lockdep_assert_held(&event_mutex);
  942. hist_field = find_var_field(hist_data, var_name);
  943. if (hist_field)
  944. return hist_field;
  945. list_for_each_entry(test, &file->triggers, list) {
  946. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  947. test_data = test->private_data;
  948. hist_field = find_var_field(test_data, var_name);
  949. if (hist_field)
  950. return hist_field;
  951. }
  952. }
  953. return NULL;
  954. }
  955. static struct trace_event_file *find_var_file(struct trace_array *tr,
  956. char *system,
  957. char *event_name,
  958. char *var_name)
  959. {
  960. struct hist_trigger_data *var_hist_data;
  961. struct hist_var_data *var_data;
  962. struct trace_event_file *file, *found = NULL;
  963. if (system)
  964. return find_event_file(tr, system, event_name);
  965. list_for_each_entry(var_data, &tr->hist_vars, list) {
  966. var_hist_data = var_data->hist_data;
  967. file = var_hist_data->event_file;
  968. if (file == found)
  969. continue;
  970. if (find_var_field(var_hist_data, var_name)) {
  971. if (found) {
  972. hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name));
  973. return NULL;
  974. }
  975. found = file;
  976. }
  977. }
  978. return found;
  979. }
  980. static struct hist_field *find_file_var(struct trace_event_file *file,
  981. const char *var_name)
  982. {
  983. struct hist_trigger_data *test_data;
  984. struct event_trigger_data *test;
  985. struct hist_field *hist_field;
  986. lockdep_assert_held(&event_mutex);
  987. list_for_each_entry(test, &file->triggers, list) {
  988. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  989. test_data = test->private_data;
  990. hist_field = find_var_field(test_data, var_name);
  991. if (hist_field)
  992. return hist_field;
  993. }
  994. }
  995. return NULL;
  996. }
  997. static struct hist_field *
  998. find_match_var(struct hist_trigger_data *hist_data, char *var_name)
  999. {
  1000. struct trace_array *tr = hist_data->event_file->tr;
  1001. struct hist_field *hist_field, *found = NULL;
  1002. struct trace_event_file *file;
  1003. unsigned int i;
  1004. for (i = 0; i < hist_data->n_actions; i++) {
  1005. struct action_data *data = hist_data->actions[i];
  1006. if (data->handler == HANDLER_ONMATCH) {
  1007. char *system = data->match_data.event_system;
  1008. char *event_name = data->match_data.event;
  1009. file = find_var_file(tr, system, event_name, var_name);
  1010. if (!file)
  1011. continue;
  1012. hist_field = find_file_var(file, var_name);
  1013. if (hist_field) {
  1014. if (found) {
  1015. hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE,
  1016. errpos(var_name));
  1017. return ERR_PTR(-EINVAL);
  1018. }
  1019. found = hist_field;
  1020. }
  1021. }
  1022. }
  1023. return found;
  1024. }
  1025. static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
  1026. char *system,
  1027. char *event_name,
  1028. char *var_name)
  1029. {
  1030. struct trace_array *tr = hist_data->event_file->tr;
  1031. struct hist_field *hist_field = NULL;
  1032. struct trace_event_file *file;
  1033. if (!system || !event_name) {
  1034. hist_field = find_match_var(hist_data, var_name);
  1035. if (IS_ERR(hist_field))
  1036. return NULL;
  1037. if (hist_field)
  1038. return hist_field;
  1039. }
  1040. file = find_var_file(tr, system, event_name, var_name);
  1041. if (!file)
  1042. return NULL;
  1043. hist_field = find_file_var(file, var_name);
  1044. return hist_field;
  1045. }
  1046. static u64 hist_field_var_ref(struct hist_field *hist_field,
  1047. struct tracing_map_elt *elt,
  1048. struct trace_buffer *buffer,
  1049. struct ring_buffer_event *rbe,
  1050. void *event)
  1051. {
  1052. struct hist_elt_data *elt_data;
  1053. u64 var_val = 0;
  1054. if (WARN_ON_ONCE(!elt))
  1055. return var_val;
  1056. elt_data = elt->private_data;
  1057. var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
  1058. return var_val;
  1059. }
  1060. static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
  1061. u64 *var_ref_vals, bool self)
  1062. {
  1063. struct hist_trigger_data *var_data;
  1064. struct tracing_map_elt *var_elt;
  1065. struct hist_field *hist_field;
  1066. unsigned int i, var_idx;
  1067. bool resolved = true;
  1068. u64 var_val = 0;
  1069. for (i = 0; i < hist_data->n_var_refs; i++) {
  1070. hist_field = hist_data->var_refs[i];
  1071. var_idx = hist_field->var.idx;
  1072. var_data = hist_field->var.hist_data;
  1073. if (var_data == NULL) {
  1074. resolved = false;
  1075. break;
  1076. }
  1077. if ((self && var_data != hist_data) ||
  1078. (!self && var_data == hist_data))
  1079. continue;
  1080. var_elt = tracing_map_lookup(var_data->map, key);
  1081. if (!var_elt) {
  1082. resolved = false;
  1083. break;
  1084. }
  1085. if (!tracing_map_var_set(var_elt, var_idx)) {
  1086. resolved = false;
  1087. break;
  1088. }
  1089. if (self || !hist_field->read_once)
  1090. var_val = tracing_map_read_var(var_elt, var_idx);
  1091. else
  1092. var_val = tracing_map_read_var_once(var_elt, var_idx);
  1093. var_ref_vals[i] = var_val;
  1094. }
  1095. return resolved;
  1096. }
  1097. static const char *hist_field_name(struct hist_field *field,
  1098. unsigned int level)
  1099. {
  1100. const char *field_name = "";
  1101. if (WARN_ON_ONCE(!field))
  1102. return field_name;
  1103. if (level > 1)
  1104. return field_name;
  1105. if (field->field)
  1106. field_name = field->field->name;
  1107. else if (field->flags & HIST_FIELD_FL_LOG2 ||
  1108. field->flags & HIST_FIELD_FL_ALIAS ||
  1109. field->flags & HIST_FIELD_FL_BUCKET)
  1110. field_name = hist_field_name(field->operands[0], ++level);
  1111. else if (field->flags & HIST_FIELD_FL_CPU)
  1112. field_name = "common_cpu";
  1113. else if (field->flags & HIST_FIELD_FL_EXPR ||
  1114. field->flags & HIST_FIELD_FL_VAR_REF) {
  1115. if (field->system) {
  1116. static char full_name[MAX_FILTER_STR_VAL];
  1117. strcat(full_name, field->system);
  1118. strcat(full_name, ".");
  1119. strcat(full_name, field->event_name);
  1120. strcat(full_name, ".");
  1121. strcat(full_name, field->name);
  1122. field_name = full_name;
  1123. } else
  1124. field_name = field->name;
  1125. } else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
  1126. field_name = "common_timestamp";
  1127. else if (field->flags & HIST_FIELD_FL_STACKTRACE) {
  1128. if (field->field)
  1129. field_name = field->field->name;
  1130. else
  1131. field_name = "common_stacktrace";
  1132. } else if (field->flags & HIST_FIELD_FL_HITCOUNT)
  1133. field_name = "hitcount";
  1134. if (field_name == NULL)
  1135. field_name = "";
  1136. return field_name;
  1137. }
  1138. static enum hist_field_fn select_value_fn(int field_size, int field_is_signed)
  1139. {
  1140. switch (field_size) {
  1141. case 8:
  1142. if (field_is_signed)
  1143. return HIST_FIELD_FN_S64;
  1144. else
  1145. return HIST_FIELD_FN_U64;
  1146. case 4:
  1147. if (field_is_signed)
  1148. return HIST_FIELD_FN_S32;
  1149. else
  1150. return HIST_FIELD_FN_U32;
  1151. case 2:
  1152. if (field_is_signed)
  1153. return HIST_FIELD_FN_S16;
  1154. else
  1155. return HIST_FIELD_FN_U16;
  1156. case 1:
  1157. if (field_is_signed)
  1158. return HIST_FIELD_FN_S8;
  1159. else
  1160. return HIST_FIELD_FN_U8;
  1161. }
  1162. return HIST_FIELD_FN_NOP;
  1163. }
  1164. static int parse_map_size(char *str)
  1165. {
  1166. unsigned long size, map_bits;
  1167. int ret;
  1168. ret = kstrtoul(str, 0, &size);
  1169. if (ret)
  1170. goto out;
  1171. map_bits = ilog2(roundup_pow_of_two(size));
  1172. if (map_bits < TRACING_MAP_BITS_MIN ||
  1173. map_bits > TRACING_MAP_BITS_MAX)
  1174. ret = -EINVAL;
  1175. else
  1176. ret = map_bits;
  1177. out:
  1178. return ret;
  1179. }
  1180. static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
  1181. {
  1182. unsigned int i;
  1183. if (!attrs)
  1184. return;
  1185. for (i = 0; i < attrs->n_assignments; i++)
  1186. kfree(attrs->assignment_str[i]);
  1187. for (i = 0; i < attrs->n_actions; i++)
  1188. kfree(attrs->action_str[i]);
  1189. kfree(attrs->name);
  1190. kfree(attrs->sort_key_str);
  1191. kfree(attrs->keys_str);
  1192. kfree(attrs->vals_str);
  1193. kfree(attrs->clock);
  1194. kfree(attrs);
  1195. }
  1196. static int parse_action(char *str, struct hist_trigger_attrs *attrs)
  1197. {
  1198. int ret = -EINVAL;
  1199. if (attrs->n_actions >= HIST_ACTIONS_MAX)
  1200. return ret;
  1201. if ((str_has_prefix(str, "onmatch(")) ||
  1202. (str_has_prefix(str, "onmax(")) ||
  1203. (str_has_prefix(str, "onchange("))) {
  1204. attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
  1205. if (!attrs->action_str[attrs->n_actions]) {
  1206. ret = -ENOMEM;
  1207. return ret;
  1208. }
  1209. attrs->n_actions++;
  1210. ret = 0;
  1211. }
  1212. return ret;
  1213. }
  1214. static int parse_assignment(struct trace_array *tr,
  1215. char *str, struct hist_trigger_attrs *attrs)
  1216. {
  1217. int len, ret = 0;
  1218. if ((len = str_has_prefix(str, "key=")) ||
  1219. (len = str_has_prefix(str, "keys="))) {
  1220. attrs->keys_str = kstrdup(str + len, GFP_KERNEL);
  1221. if (!attrs->keys_str) {
  1222. ret = -ENOMEM;
  1223. goto out;
  1224. }
  1225. } else if ((len = str_has_prefix(str, "val=")) ||
  1226. (len = str_has_prefix(str, "vals=")) ||
  1227. (len = str_has_prefix(str, "values="))) {
  1228. attrs->vals_str = kstrdup(str + len, GFP_KERNEL);
  1229. if (!attrs->vals_str) {
  1230. ret = -ENOMEM;
  1231. goto out;
  1232. }
  1233. } else if ((len = str_has_prefix(str, "sort="))) {
  1234. attrs->sort_key_str = kstrdup(str + len, GFP_KERNEL);
  1235. if (!attrs->sort_key_str) {
  1236. ret = -ENOMEM;
  1237. goto out;
  1238. }
  1239. } else if (str_has_prefix(str, "name=")) {
  1240. attrs->name = kstrdup(str, GFP_KERNEL);
  1241. if (!attrs->name) {
  1242. ret = -ENOMEM;
  1243. goto out;
  1244. }
  1245. } else if ((len = str_has_prefix(str, "clock="))) {
  1246. str += len;
  1247. str = strstrip(str);
  1248. attrs->clock = kstrdup(str, GFP_KERNEL);
  1249. if (!attrs->clock) {
  1250. ret = -ENOMEM;
  1251. goto out;
  1252. }
  1253. } else if ((len = str_has_prefix(str, "size="))) {
  1254. int map_bits = parse_map_size(str + len);
  1255. if (map_bits < 0) {
  1256. ret = map_bits;
  1257. goto out;
  1258. }
  1259. attrs->map_bits = map_bits;
  1260. } else {
  1261. char *assignment;
  1262. if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
  1263. hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str));
  1264. ret = -EINVAL;
  1265. goto out;
  1266. }
  1267. assignment = kstrdup(str, GFP_KERNEL);
  1268. if (!assignment) {
  1269. ret = -ENOMEM;
  1270. goto out;
  1271. }
  1272. attrs->assignment_str[attrs->n_assignments++] = assignment;
  1273. }
  1274. out:
  1275. return ret;
  1276. }
  1277. static struct hist_trigger_attrs *
  1278. parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str)
  1279. {
  1280. struct hist_trigger_attrs *attrs;
  1281. int ret = 0;
  1282. attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
  1283. if (!attrs)
  1284. return ERR_PTR(-ENOMEM);
  1285. while (trigger_str) {
  1286. char *str = strsep(&trigger_str, ":");
  1287. char *rhs;
  1288. rhs = strchr(str, '=');
  1289. if (rhs) {
  1290. if (!strlen(++rhs)) {
  1291. ret = -EINVAL;
  1292. hist_err(tr, HIST_ERR_EMPTY_ASSIGNMENT, errpos(str));
  1293. goto free;
  1294. }
  1295. ret = parse_assignment(tr, str, attrs);
  1296. if (ret)
  1297. goto free;
  1298. } else if (strcmp(str, "nohitcount") == 0 ||
  1299. strcmp(str, "NOHC") == 0)
  1300. attrs->no_hitcount = true;
  1301. else if (strcmp(str, "pause") == 0)
  1302. attrs->pause = true;
  1303. else if ((strcmp(str, "cont") == 0) ||
  1304. (strcmp(str, "continue") == 0))
  1305. attrs->cont = true;
  1306. else if (strcmp(str, "clear") == 0)
  1307. attrs->clear = true;
  1308. else {
  1309. ret = parse_action(str, attrs);
  1310. if (ret)
  1311. goto free;
  1312. }
  1313. }
  1314. if (!attrs->keys_str) {
  1315. ret = -EINVAL;
  1316. goto free;
  1317. }
  1318. if (!attrs->clock) {
  1319. attrs->clock = kstrdup("global", GFP_KERNEL);
  1320. if (!attrs->clock) {
  1321. ret = -ENOMEM;
  1322. goto free;
  1323. }
  1324. }
  1325. return attrs;
  1326. free:
  1327. destroy_hist_trigger_attrs(attrs);
  1328. return ERR_PTR(ret);
  1329. }
  1330. static inline void save_comm(char *comm, struct task_struct *task)
  1331. {
  1332. if (!task->pid) {
  1333. strcpy(comm, "<idle>");
  1334. return;
  1335. }
  1336. if (WARN_ON_ONCE(task->pid < 0)) {
  1337. strcpy(comm, "<XXX>");
  1338. return;
  1339. }
  1340. strncpy(comm, task->comm, TASK_COMM_LEN);
  1341. }
  1342. static void hist_elt_data_free(struct hist_elt_data *elt_data)
  1343. {
  1344. unsigned int i;
  1345. for (i = 0; i < elt_data->n_field_var_str; i++)
  1346. kfree(elt_data->field_var_str[i]);
  1347. kfree(elt_data->field_var_str);
  1348. kfree(elt_data->comm);
  1349. kfree(elt_data);
  1350. }
  1351. static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
  1352. {
  1353. struct hist_elt_data *elt_data = elt->private_data;
  1354. hist_elt_data_free(elt_data);
  1355. }
  1356. static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
  1357. {
  1358. struct hist_trigger_data *hist_data = elt->map->private_data;
  1359. unsigned int size = TASK_COMM_LEN;
  1360. struct hist_elt_data *elt_data;
  1361. struct hist_field *hist_field;
  1362. unsigned int i, n_str;
  1363. elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
  1364. if (!elt_data)
  1365. return -ENOMEM;
  1366. for_each_hist_field(i, hist_data) {
  1367. hist_field = hist_data->fields[i];
  1368. if (hist_field->flags & HIST_FIELD_FL_EXECNAME) {
  1369. elt_data->comm = kzalloc(size, GFP_KERNEL);
  1370. if (!elt_data->comm) {
  1371. kfree(elt_data);
  1372. return -ENOMEM;
  1373. }
  1374. break;
  1375. }
  1376. }
  1377. n_str = hist_data->n_field_var_str + hist_data->n_save_var_str +
  1378. hist_data->n_var_str;
  1379. if (n_str > SYNTH_FIELDS_MAX) {
  1380. hist_elt_data_free(elt_data);
  1381. return -EINVAL;
  1382. }
  1383. BUILD_BUG_ON(STR_VAR_LEN_MAX & (sizeof(u64) - 1));
  1384. size = STR_VAR_LEN_MAX;
  1385. elt_data->field_var_str = kcalloc(n_str, sizeof(char *), GFP_KERNEL);
  1386. if (!elt_data->field_var_str) {
  1387. hist_elt_data_free(elt_data);
  1388. return -EINVAL;
  1389. }
  1390. elt_data->n_field_var_str = n_str;
  1391. for (i = 0; i < n_str; i++) {
  1392. elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
  1393. if (!elt_data->field_var_str[i]) {
  1394. hist_elt_data_free(elt_data);
  1395. return -ENOMEM;
  1396. }
  1397. }
  1398. elt->private_data = elt_data;
  1399. return 0;
  1400. }
  1401. static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
  1402. {
  1403. struct hist_elt_data *elt_data = elt->private_data;
  1404. if (elt_data->comm)
  1405. save_comm(elt_data->comm, current);
  1406. }
  1407. static const struct tracing_map_ops hist_trigger_elt_data_ops = {
  1408. .elt_alloc = hist_trigger_elt_data_alloc,
  1409. .elt_free = hist_trigger_elt_data_free,
  1410. .elt_init = hist_trigger_elt_data_init,
  1411. };
  1412. static const char *get_hist_field_flags(struct hist_field *hist_field)
  1413. {
  1414. const char *flags_str = NULL;
  1415. if (hist_field->flags & HIST_FIELD_FL_HEX)
  1416. flags_str = "hex";
  1417. else if (hist_field->flags & HIST_FIELD_FL_SYM)
  1418. flags_str = "sym";
  1419. else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
  1420. flags_str = "sym-offset";
  1421. else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
  1422. flags_str = "execname";
  1423. else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
  1424. flags_str = "syscall";
  1425. else if (hist_field->flags & HIST_FIELD_FL_LOG2)
  1426. flags_str = "log2";
  1427. else if (hist_field->flags & HIST_FIELD_FL_BUCKET)
  1428. flags_str = "buckets";
  1429. else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
  1430. flags_str = "usecs";
  1431. else if (hist_field->flags & HIST_FIELD_FL_PERCENT)
  1432. flags_str = "percent";
  1433. else if (hist_field->flags & HIST_FIELD_FL_GRAPH)
  1434. flags_str = "graph";
  1435. else if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
  1436. flags_str = "stacktrace";
  1437. return flags_str;
  1438. }
  1439. static void expr_field_str(struct hist_field *field, char *expr)
  1440. {
  1441. if (field->flags & HIST_FIELD_FL_VAR_REF)
  1442. strcat(expr, "$");
  1443. else if (field->flags & HIST_FIELD_FL_CONST) {
  1444. char str[HIST_CONST_DIGITS_MAX];
  1445. snprintf(str, HIST_CONST_DIGITS_MAX, "%llu", field->constant);
  1446. strcat(expr, str);
  1447. }
  1448. strcat(expr, hist_field_name(field, 0));
  1449. if (field->flags && !(field->flags & HIST_FIELD_FL_VAR_REF)) {
  1450. const char *flags_str = get_hist_field_flags(field);
  1451. if (flags_str) {
  1452. strcat(expr, ".");
  1453. strcat(expr, flags_str);
  1454. }
  1455. }
  1456. }
  1457. static char *expr_str(struct hist_field *field, unsigned int level)
  1458. {
  1459. char *expr;
  1460. if (level > 1)
  1461. return NULL;
  1462. expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
  1463. if (!expr)
  1464. return NULL;
  1465. if (!field->operands[0]) {
  1466. expr_field_str(field, expr);
  1467. return expr;
  1468. }
  1469. if (field->operator == FIELD_OP_UNARY_MINUS) {
  1470. char *subexpr;
  1471. strcat(expr, "-(");
  1472. subexpr = expr_str(field->operands[0], ++level);
  1473. if (!subexpr) {
  1474. kfree(expr);
  1475. return NULL;
  1476. }
  1477. strcat(expr, subexpr);
  1478. strcat(expr, ")");
  1479. kfree(subexpr);
  1480. return expr;
  1481. }
  1482. expr_field_str(field->operands[0], expr);
  1483. switch (field->operator) {
  1484. case FIELD_OP_MINUS:
  1485. strcat(expr, "-");
  1486. break;
  1487. case FIELD_OP_PLUS:
  1488. strcat(expr, "+");
  1489. break;
  1490. case FIELD_OP_DIV:
  1491. strcat(expr, "/");
  1492. break;
  1493. case FIELD_OP_MULT:
  1494. strcat(expr, "*");
  1495. break;
  1496. default:
  1497. kfree(expr);
  1498. return NULL;
  1499. }
  1500. expr_field_str(field->operands[1], expr);
  1501. return expr;
  1502. }
  1503. /*
  1504. * If field_op != FIELD_OP_NONE, *sep points to the root operator
  1505. * of the expression tree to be evaluated.
  1506. */
  1507. static int contains_operator(char *str, char **sep)
  1508. {
  1509. enum field_op_id field_op = FIELD_OP_NONE;
  1510. char *minus_op, *plus_op, *div_op, *mult_op;
  1511. /*
  1512. * Report the last occurrence of the operators first, so that the
  1513. * expression is evaluated left to right. This is important since
  1514. * subtraction and division are not associative.
  1515. *
  1516. * e.g
  1517. * 64/8/4/2 is 1, i.e 64/8/4/2 = ((64/8)/4)/2
  1518. * 14-7-5-2 is 0, i.e 14-7-5-2 = ((14-7)-5)-2
  1519. */
  1520. /*
  1521. * First, find lower precedence addition and subtraction
  1522. * since the expression will be evaluated recursively.
  1523. */
  1524. minus_op = strrchr(str, '-');
  1525. if (minus_op) {
  1526. /*
  1527. * Unary minus is not supported in sub-expressions. If
  1528. * present, it is always the next root operator.
  1529. */
  1530. if (minus_op == str) {
  1531. field_op = FIELD_OP_UNARY_MINUS;
  1532. goto out;
  1533. }
  1534. field_op = FIELD_OP_MINUS;
  1535. }
  1536. plus_op = strrchr(str, '+');
  1537. if (plus_op || minus_op) {
  1538. /*
  1539. * For operators of the same precedence use to rightmost as the
  1540. * root, so that the expression is evaluated left to right.
  1541. */
  1542. if (plus_op > minus_op)
  1543. field_op = FIELD_OP_PLUS;
  1544. goto out;
  1545. }
  1546. /*
  1547. * Multiplication and division have higher precedence than addition and
  1548. * subtraction.
  1549. */
  1550. div_op = strrchr(str, '/');
  1551. if (div_op)
  1552. field_op = FIELD_OP_DIV;
  1553. mult_op = strrchr(str, '*');
  1554. /*
  1555. * For operators of the same precedence use to rightmost as the
  1556. * root, so that the expression is evaluated left to right.
  1557. */
  1558. if (mult_op > div_op)
  1559. field_op = FIELD_OP_MULT;
  1560. out:
  1561. if (sep) {
  1562. switch (field_op) {
  1563. case FIELD_OP_UNARY_MINUS:
  1564. case FIELD_OP_MINUS:
  1565. *sep = minus_op;
  1566. break;
  1567. case FIELD_OP_PLUS:
  1568. *sep = plus_op;
  1569. break;
  1570. case FIELD_OP_DIV:
  1571. *sep = div_op;
  1572. break;
  1573. case FIELD_OP_MULT:
  1574. *sep = mult_op;
  1575. break;
  1576. case FIELD_OP_NONE:
  1577. default:
  1578. *sep = NULL;
  1579. break;
  1580. }
  1581. }
  1582. return field_op;
  1583. }
  1584. static void get_hist_field(struct hist_field *hist_field)
  1585. {
  1586. hist_field->ref++;
  1587. }
  1588. static void __destroy_hist_field(struct hist_field *hist_field)
  1589. {
  1590. if (--hist_field->ref > 1)
  1591. return;
  1592. kfree(hist_field->var.name);
  1593. kfree(hist_field->name);
  1594. /* Can likely be a const */
  1595. kfree_const(hist_field->type);
  1596. kfree(hist_field->system);
  1597. kfree(hist_field->event_name);
  1598. kfree(hist_field);
  1599. }
  1600. static void destroy_hist_field(struct hist_field *hist_field,
  1601. unsigned int level)
  1602. {
  1603. unsigned int i;
  1604. if (level > 3)
  1605. return;
  1606. if (!hist_field)
  1607. return;
  1608. if (hist_field->flags & HIST_FIELD_FL_VAR_REF)
  1609. return; /* var refs will be destroyed separately */
  1610. for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
  1611. destroy_hist_field(hist_field->operands[i], level + 1);
  1612. __destroy_hist_field(hist_field);
  1613. }
  1614. static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
  1615. struct ftrace_event_field *field,
  1616. unsigned long flags,
  1617. char *var_name)
  1618. {
  1619. struct hist_field *hist_field;
  1620. if (field && is_function_field(field))
  1621. return NULL;
  1622. hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
  1623. if (!hist_field)
  1624. return NULL;
  1625. hist_field->ref = 1;
  1626. hist_field->hist_data = hist_data;
  1627. if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
  1628. goto out; /* caller will populate */
  1629. if (flags & HIST_FIELD_FL_VAR_REF) {
  1630. hist_field->fn_num = HIST_FIELD_FN_VAR_REF;
  1631. goto out;
  1632. }
  1633. if (flags & HIST_FIELD_FL_HITCOUNT) {
  1634. hist_field->fn_num = HIST_FIELD_FN_COUNTER;
  1635. hist_field->size = sizeof(u64);
  1636. hist_field->type = "u64";
  1637. goto out;
  1638. }
  1639. if (flags & HIST_FIELD_FL_CONST) {
  1640. hist_field->fn_num = HIST_FIELD_FN_CONST;
  1641. hist_field->size = sizeof(u64);
  1642. hist_field->type = kstrdup("u64", GFP_KERNEL);
  1643. if (!hist_field->type)
  1644. goto free;
  1645. goto out;
  1646. }
  1647. if (flags & HIST_FIELD_FL_STACKTRACE) {
  1648. if (field)
  1649. hist_field->fn_num = HIST_FIELD_FN_STACK;
  1650. else
  1651. hist_field->fn_num = HIST_FIELD_FN_NOP;
  1652. hist_field->size = HIST_STACKTRACE_SIZE;
  1653. hist_field->type = kstrdup_const("unsigned long[]", GFP_KERNEL);
  1654. if (!hist_field->type)
  1655. goto free;
  1656. goto out;
  1657. }
  1658. if (flags & (HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET)) {
  1659. unsigned long fl = flags & ~(HIST_FIELD_FL_LOG2 | HIST_FIELD_FL_BUCKET);
  1660. hist_field->fn_num = flags & HIST_FIELD_FL_LOG2 ? HIST_FIELD_FN_LOG2 :
  1661. HIST_FIELD_FN_BUCKET;
  1662. hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
  1663. if (!hist_field->operands[0])
  1664. goto free;
  1665. hist_field->size = hist_field->operands[0]->size;
  1666. hist_field->type = kstrdup_const(hist_field->operands[0]->type, GFP_KERNEL);
  1667. if (!hist_field->type)
  1668. goto free;
  1669. goto out;
  1670. }
  1671. if (flags & HIST_FIELD_FL_TIMESTAMP) {
  1672. hist_field->fn_num = HIST_FIELD_FN_TIMESTAMP;
  1673. hist_field->size = sizeof(u64);
  1674. hist_field->type = "u64";
  1675. goto out;
  1676. }
  1677. if (flags & HIST_FIELD_FL_CPU) {
  1678. hist_field->fn_num = HIST_FIELD_FN_CPU;
  1679. hist_field->size = sizeof(int);
  1680. hist_field->type = "unsigned int";
  1681. goto out;
  1682. }
  1683. if (WARN_ON_ONCE(!field))
  1684. goto out;
  1685. /* Pointers to strings are just pointers and dangerous to dereference */
  1686. if (is_string_field(field) &&
  1687. (field->filter_type != FILTER_PTR_STRING)) {
  1688. flags |= HIST_FIELD_FL_STRING;
  1689. hist_field->size = MAX_FILTER_STR_VAL;
  1690. hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
  1691. if (!hist_field->type)
  1692. goto free;
  1693. if (field->filter_type == FILTER_STATIC_STRING) {
  1694. hist_field->fn_num = HIST_FIELD_FN_STRING;
  1695. hist_field->size = field->size;
  1696. } else if (field->filter_type == FILTER_DYN_STRING) {
  1697. hist_field->fn_num = HIST_FIELD_FN_DYNSTRING;
  1698. } else if (field->filter_type == FILTER_RDYN_STRING)
  1699. hist_field->fn_num = HIST_FIELD_FN_RELDYNSTRING;
  1700. else
  1701. hist_field->fn_num = HIST_FIELD_FN_PSTRING;
  1702. } else {
  1703. hist_field->size = field->size;
  1704. hist_field->is_signed = field->is_signed;
  1705. hist_field->type = kstrdup_const(field->type, GFP_KERNEL);
  1706. if (!hist_field->type)
  1707. goto free;
  1708. hist_field->fn_num = select_value_fn(field->size,
  1709. field->is_signed);
  1710. if (hist_field->fn_num == HIST_FIELD_FN_NOP) {
  1711. destroy_hist_field(hist_field, 0);
  1712. return NULL;
  1713. }
  1714. }
  1715. out:
  1716. hist_field->field = field;
  1717. hist_field->flags = flags;
  1718. if (var_name) {
  1719. hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
  1720. if (!hist_field->var.name)
  1721. goto free;
  1722. }
  1723. return hist_field;
  1724. free:
  1725. destroy_hist_field(hist_field, 0);
  1726. return NULL;
  1727. }
  1728. static void destroy_hist_fields(struct hist_trigger_data *hist_data)
  1729. {
  1730. unsigned int i;
  1731. for (i = 0; i < HIST_FIELDS_MAX; i++) {
  1732. if (hist_data->fields[i]) {
  1733. destroy_hist_field(hist_data->fields[i], 0);
  1734. hist_data->fields[i] = NULL;
  1735. }
  1736. }
  1737. for (i = 0; i < hist_data->n_var_refs; i++) {
  1738. WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF));
  1739. __destroy_hist_field(hist_data->var_refs[i]);
  1740. hist_data->var_refs[i] = NULL;
  1741. }
  1742. }
  1743. static int init_var_ref(struct hist_field *ref_field,
  1744. struct hist_field *var_field,
  1745. char *system, char *event_name)
  1746. {
  1747. int err = 0;
  1748. ref_field->var.idx = var_field->var.idx;
  1749. ref_field->var.hist_data = var_field->hist_data;
  1750. ref_field->size = var_field->size;
  1751. ref_field->is_signed = var_field->is_signed;
  1752. ref_field->flags |= var_field->flags &
  1753. (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
  1754. if (system) {
  1755. ref_field->system = kstrdup(system, GFP_KERNEL);
  1756. if (!ref_field->system)
  1757. return -ENOMEM;
  1758. }
  1759. if (event_name) {
  1760. ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
  1761. if (!ref_field->event_name) {
  1762. err = -ENOMEM;
  1763. goto free;
  1764. }
  1765. }
  1766. if (var_field->var.name) {
  1767. ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
  1768. if (!ref_field->name) {
  1769. err = -ENOMEM;
  1770. goto free;
  1771. }
  1772. } else if (var_field->name) {
  1773. ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
  1774. if (!ref_field->name) {
  1775. err = -ENOMEM;
  1776. goto free;
  1777. }
  1778. }
  1779. ref_field->type = kstrdup_const(var_field->type, GFP_KERNEL);
  1780. if (!ref_field->type) {
  1781. err = -ENOMEM;
  1782. goto free;
  1783. }
  1784. out:
  1785. return err;
  1786. free:
  1787. kfree(ref_field->system);
  1788. ref_field->system = NULL;
  1789. kfree(ref_field->event_name);
  1790. ref_field->event_name = NULL;
  1791. kfree(ref_field->name);
  1792. ref_field->name = NULL;
  1793. goto out;
  1794. }
  1795. static int find_var_ref_idx(struct hist_trigger_data *hist_data,
  1796. struct hist_field *var_field)
  1797. {
  1798. struct hist_field *ref_field;
  1799. int i;
  1800. for (i = 0; i < hist_data->n_var_refs; i++) {
  1801. ref_field = hist_data->var_refs[i];
  1802. if (ref_field->var.idx == var_field->var.idx &&
  1803. ref_field->var.hist_data == var_field->hist_data)
  1804. return i;
  1805. }
  1806. return -ENOENT;
  1807. }
  1808. /**
  1809. * create_var_ref - Create a variable reference and attach it to trigger
  1810. * @hist_data: The trigger that will be referencing the variable
  1811. * @var_field: The VAR field to create a reference to
  1812. * @system: The optional system string
  1813. * @event_name: The optional event_name string
  1814. *
  1815. * Given a variable hist_field, create a VAR_REF hist_field that
  1816. * represents a reference to it.
  1817. *
  1818. * This function also adds the reference to the trigger that
  1819. * now references the variable.
  1820. *
  1821. * Return: The VAR_REF field if successful, NULL if not
  1822. */
  1823. static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
  1824. struct hist_field *var_field,
  1825. char *system, char *event_name)
  1826. {
  1827. unsigned long flags = HIST_FIELD_FL_VAR_REF;
  1828. struct hist_field *ref_field;
  1829. int i;
  1830. /* Check if the variable already exists */
  1831. for (i = 0; i < hist_data->n_var_refs; i++) {
  1832. ref_field = hist_data->var_refs[i];
  1833. if (ref_field->var.idx == var_field->var.idx &&
  1834. ref_field->var.hist_data == var_field->hist_data) {
  1835. get_hist_field(ref_field);
  1836. return ref_field;
  1837. }
  1838. }
  1839. /* Sanity check to avoid out-of-bound write on 'hist_data->var_refs' */
  1840. if (hist_data->n_var_refs >= TRACING_MAP_VARS_MAX)
  1841. return NULL;
  1842. ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
  1843. if (ref_field) {
  1844. if (init_var_ref(ref_field, var_field, system, event_name)) {
  1845. destroy_hist_field(ref_field, 0);
  1846. return NULL;
  1847. }
  1848. hist_data->var_refs[hist_data->n_var_refs] = ref_field;
  1849. ref_field->var_ref_idx = hist_data->n_var_refs++;
  1850. }
  1851. return ref_field;
  1852. }
  1853. static bool is_var_ref(char *var_name)
  1854. {
  1855. if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
  1856. return false;
  1857. return true;
  1858. }
  1859. static char *field_name_from_var(struct hist_trigger_data *hist_data,
  1860. char *var_name)
  1861. {
  1862. char *name, *field;
  1863. unsigned int i;
  1864. for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
  1865. name = hist_data->attrs->var_defs.name[i];
  1866. if (strcmp(var_name, name) == 0) {
  1867. field = hist_data->attrs->var_defs.expr[i];
  1868. if (contains_operator(field, NULL) || is_var_ref(field))
  1869. continue;
  1870. return field;
  1871. }
  1872. }
  1873. return NULL;
  1874. }
  1875. static char *local_field_var_ref(struct hist_trigger_data *hist_data,
  1876. char *system, char *event_name,
  1877. char *var_name)
  1878. {
  1879. struct trace_event_call *call;
  1880. if (system && event_name) {
  1881. call = hist_data->event_file->event_call;
  1882. if (strcmp(system, call->class->system) != 0)
  1883. return NULL;
  1884. if (strcmp(event_name, trace_event_name(call)) != 0)
  1885. return NULL;
  1886. }
  1887. if (!!system != !!event_name)
  1888. return NULL;
  1889. if (!is_var_ref(var_name))
  1890. return NULL;
  1891. var_name++;
  1892. return field_name_from_var(hist_data, var_name);
  1893. }
  1894. static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
  1895. char *system, char *event_name,
  1896. char *var_name)
  1897. {
  1898. struct hist_field *var_field = NULL, *ref_field = NULL;
  1899. struct trace_array *tr = hist_data->event_file->tr;
  1900. if (!is_var_ref(var_name))
  1901. return NULL;
  1902. var_name++;
  1903. var_field = find_event_var(hist_data, system, event_name, var_name);
  1904. if (var_field)
  1905. ref_field = create_var_ref(hist_data, var_field,
  1906. system, event_name);
  1907. if (!ref_field)
  1908. hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name));
  1909. return ref_field;
  1910. }
  1911. static struct ftrace_event_field *
  1912. parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
  1913. char *field_str, unsigned long *flags, unsigned long *buckets)
  1914. {
  1915. struct ftrace_event_field *field = NULL;
  1916. char *field_name, *modifier, *str;
  1917. struct trace_array *tr = file->tr;
  1918. modifier = str = kstrdup(field_str, GFP_KERNEL);
  1919. if (!modifier)
  1920. return ERR_PTR(-ENOMEM);
  1921. field_name = strsep(&modifier, ".");
  1922. if (modifier) {
  1923. if (strcmp(modifier, "hex") == 0)
  1924. *flags |= HIST_FIELD_FL_HEX;
  1925. else if (strcmp(modifier, "sym") == 0)
  1926. *flags |= HIST_FIELD_FL_SYM;
  1927. /*
  1928. * 'sym-offset' occurrences in the trigger string are modified
  1929. * to 'symXoffset' to simplify arithmetic expression parsing.
  1930. */
  1931. else if (strcmp(modifier, "symXoffset") == 0)
  1932. *flags |= HIST_FIELD_FL_SYM_OFFSET;
  1933. else if ((strcmp(modifier, "execname") == 0) &&
  1934. (strcmp(field_name, "common_pid") == 0))
  1935. *flags |= HIST_FIELD_FL_EXECNAME;
  1936. else if (strcmp(modifier, "syscall") == 0)
  1937. *flags |= HIST_FIELD_FL_SYSCALL;
  1938. else if (strcmp(modifier, "stacktrace") == 0)
  1939. *flags |= HIST_FIELD_FL_STACKTRACE;
  1940. else if (strcmp(modifier, "log2") == 0)
  1941. *flags |= HIST_FIELD_FL_LOG2;
  1942. else if (strcmp(modifier, "usecs") == 0)
  1943. *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
  1944. else if (strncmp(modifier, "bucket", 6) == 0) {
  1945. int ret;
  1946. modifier += 6;
  1947. if (*modifier == 's')
  1948. modifier++;
  1949. if (*modifier != '=')
  1950. goto error;
  1951. modifier++;
  1952. ret = kstrtoul(modifier, 0, buckets);
  1953. if (ret || !(*buckets))
  1954. goto error;
  1955. *flags |= HIST_FIELD_FL_BUCKET;
  1956. } else if (strncmp(modifier, "percent", 7) == 0) {
  1957. if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
  1958. goto error;
  1959. *flags |= HIST_FIELD_FL_PERCENT;
  1960. } else if (strncmp(modifier, "graph", 5) == 0) {
  1961. if (*flags & (HIST_FIELD_FL_VAR | HIST_FIELD_FL_KEY))
  1962. goto error;
  1963. *flags |= HIST_FIELD_FL_GRAPH;
  1964. } else {
  1965. error:
  1966. hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier));
  1967. field = ERR_PTR(-EINVAL);
  1968. goto out;
  1969. }
  1970. }
  1971. if (strcmp(field_name, "common_timestamp") == 0) {
  1972. *flags |= HIST_FIELD_FL_TIMESTAMP;
  1973. hist_data->enable_timestamps = true;
  1974. if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
  1975. hist_data->attrs->ts_in_usecs = true;
  1976. } else if (strcmp(field_name, "common_stacktrace") == 0) {
  1977. *flags |= HIST_FIELD_FL_STACKTRACE;
  1978. } else if (strcmp(field_name, "common_cpu") == 0)
  1979. *flags |= HIST_FIELD_FL_CPU;
  1980. else if (strcmp(field_name, "hitcount") == 0)
  1981. *flags |= HIST_FIELD_FL_HITCOUNT;
  1982. else {
  1983. field = trace_find_event_field(file->event_call, field_name);
  1984. if (!field || !field->size) {
  1985. /*
  1986. * For backward compatibility, if field_name
  1987. * was "cpu" or "stacktrace", then we treat this
  1988. * the same as common_cpu and common_stacktrace
  1989. * respectively. This also works for "CPU", and
  1990. * "STACKTRACE".
  1991. */
  1992. if (field && field->filter_type == FILTER_CPU) {
  1993. *flags |= HIST_FIELD_FL_CPU;
  1994. } else if (field && field->filter_type == FILTER_STACKTRACE) {
  1995. *flags |= HIST_FIELD_FL_STACKTRACE;
  1996. } else {
  1997. hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
  1998. errpos(field_name));
  1999. field = ERR_PTR(-EINVAL);
  2000. goto out;
  2001. }
  2002. }
  2003. }
  2004. out:
  2005. kfree(str);
  2006. return field;
  2007. }
  2008. static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
  2009. struct hist_field *var_ref,
  2010. char *var_name)
  2011. {
  2012. struct hist_field *alias = NULL;
  2013. unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
  2014. alias = create_hist_field(hist_data, NULL, flags, var_name);
  2015. if (!alias)
  2016. return NULL;
  2017. alias->fn_num = var_ref->fn_num;
  2018. alias->operands[0] = var_ref;
  2019. if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
  2020. destroy_hist_field(alias, 0);
  2021. return NULL;
  2022. }
  2023. alias->var_ref_idx = var_ref->var_ref_idx;
  2024. return alias;
  2025. }
  2026. static struct hist_field *parse_const(struct hist_trigger_data *hist_data,
  2027. char *str, char *var_name,
  2028. unsigned long *flags)
  2029. {
  2030. struct trace_array *tr = hist_data->event_file->tr;
  2031. struct hist_field *field = NULL;
  2032. u64 constant;
  2033. if (kstrtoull(str, 0, &constant)) {
  2034. hist_err(tr, HIST_ERR_EXPECT_NUMBER, errpos(str));
  2035. return NULL;
  2036. }
  2037. *flags |= HIST_FIELD_FL_CONST;
  2038. field = create_hist_field(hist_data, NULL, *flags, var_name);
  2039. if (!field)
  2040. return NULL;
  2041. field->constant = constant;
  2042. return field;
  2043. }
  2044. static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
  2045. struct trace_event_file *file, char *str,
  2046. unsigned long *flags, char *var_name)
  2047. {
  2048. char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
  2049. struct ftrace_event_field *field = NULL;
  2050. struct hist_field *hist_field = NULL;
  2051. unsigned long buckets = 0;
  2052. int ret = 0;
  2053. if (isdigit(str[0])) {
  2054. hist_field = parse_const(hist_data, str, var_name, flags);
  2055. if (!hist_field) {
  2056. ret = -EINVAL;
  2057. goto out;
  2058. }
  2059. return hist_field;
  2060. }
  2061. s = strchr(str, '.');
  2062. if (s) {
  2063. s = strchr(++s, '.');
  2064. if (s) {
  2065. ref_system = strsep(&str, ".");
  2066. if (!str) {
  2067. ret = -EINVAL;
  2068. goto out;
  2069. }
  2070. ref_event = strsep(&str, ".");
  2071. if (!str) {
  2072. ret = -EINVAL;
  2073. goto out;
  2074. }
  2075. ref_var = str;
  2076. }
  2077. }
  2078. s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
  2079. if (!s) {
  2080. hist_field = parse_var_ref(hist_data, ref_system,
  2081. ref_event, ref_var);
  2082. if (hist_field) {
  2083. if (var_name) {
  2084. hist_field = create_alias(hist_data, hist_field, var_name);
  2085. if (!hist_field) {
  2086. ret = -ENOMEM;
  2087. goto out;
  2088. }
  2089. }
  2090. return hist_field;
  2091. }
  2092. } else
  2093. str = s;
  2094. field = parse_field(hist_data, file, str, flags, &buckets);
  2095. if (IS_ERR(field)) {
  2096. ret = PTR_ERR(field);
  2097. goto out;
  2098. }
  2099. hist_field = create_hist_field(hist_data, field, *flags, var_name);
  2100. if (!hist_field) {
  2101. ret = -ENOMEM;
  2102. goto out;
  2103. }
  2104. hist_field->buckets = buckets;
  2105. return hist_field;
  2106. out:
  2107. return ERR_PTR(ret);
  2108. }
  2109. static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
  2110. struct trace_event_file *file,
  2111. char *str, unsigned long flags,
  2112. char *var_name, unsigned int *n_subexprs);
  2113. static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
  2114. struct trace_event_file *file,
  2115. char *str, unsigned long flags,
  2116. char *var_name, unsigned int *n_subexprs)
  2117. {
  2118. struct hist_field *operand1, *expr = NULL;
  2119. unsigned long operand_flags;
  2120. int ret = 0;
  2121. char *s;
  2122. /* Unary minus operator, increment n_subexprs */
  2123. ++*n_subexprs;
  2124. /* we support only -(xxx) i.e. explicit parens required */
  2125. if (*n_subexprs > 3) {
  2126. hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
  2127. ret = -EINVAL;
  2128. goto free;
  2129. }
  2130. str++; /* skip leading '-' */
  2131. s = strchr(str, '(');
  2132. if (s)
  2133. str++;
  2134. else {
  2135. ret = -EINVAL;
  2136. goto free;
  2137. }
  2138. s = strrchr(str, ')');
  2139. if (s) {
  2140. /* unary minus not supported in sub-expressions */
  2141. if (*(s+1) != '\0') {
  2142. hist_err(file->tr, HIST_ERR_UNARY_MINUS_SUBEXPR,
  2143. errpos(str));
  2144. ret = -EINVAL;
  2145. goto free;
  2146. }
  2147. *s = '\0';
  2148. }
  2149. else {
  2150. ret = -EINVAL; /* no closing ')' */
  2151. goto free;
  2152. }
  2153. flags |= HIST_FIELD_FL_EXPR;
  2154. expr = create_hist_field(hist_data, NULL, flags, var_name);
  2155. if (!expr) {
  2156. ret = -ENOMEM;
  2157. goto free;
  2158. }
  2159. operand_flags = 0;
  2160. operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
  2161. if (IS_ERR(operand1)) {
  2162. ret = PTR_ERR(operand1);
  2163. goto free;
  2164. }
  2165. if (operand1->flags & HIST_FIELD_FL_STRING) {
  2166. /* String type can not be the operand of unary operator. */
  2167. hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
  2168. destroy_hist_field(operand1, 0);
  2169. ret = -EINVAL;
  2170. goto free;
  2171. }
  2172. expr->flags |= operand1->flags &
  2173. (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
  2174. expr->fn_num = HIST_FIELD_FN_UMINUS;
  2175. expr->operands[0] = operand1;
  2176. expr->size = operand1->size;
  2177. expr->is_signed = operand1->is_signed;
  2178. expr->operator = FIELD_OP_UNARY_MINUS;
  2179. expr->name = expr_str(expr, 0);
  2180. expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
  2181. if (!expr->type) {
  2182. ret = -ENOMEM;
  2183. goto free;
  2184. }
  2185. return expr;
  2186. free:
  2187. destroy_hist_field(expr, 0);
  2188. return ERR_PTR(ret);
  2189. }
  2190. /*
  2191. * If the operands are var refs, return pointers the
  2192. * variable(s) referenced in var1 and var2, else NULL.
  2193. */
  2194. static int check_expr_operands(struct trace_array *tr,
  2195. struct hist_field *operand1,
  2196. struct hist_field *operand2,
  2197. struct hist_field **var1,
  2198. struct hist_field **var2)
  2199. {
  2200. unsigned long operand1_flags = operand1->flags;
  2201. unsigned long operand2_flags = operand2->flags;
  2202. if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
  2203. (operand1_flags & HIST_FIELD_FL_ALIAS)) {
  2204. struct hist_field *var;
  2205. var = find_var_field(operand1->var.hist_data, operand1->name);
  2206. if (!var)
  2207. return -EINVAL;
  2208. operand1_flags = var->flags;
  2209. *var1 = var;
  2210. }
  2211. if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
  2212. (operand2_flags & HIST_FIELD_FL_ALIAS)) {
  2213. struct hist_field *var;
  2214. var = find_var_field(operand2->var.hist_data, operand2->name);
  2215. if (!var)
  2216. return -EINVAL;
  2217. operand2_flags = var->flags;
  2218. *var2 = var;
  2219. }
  2220. if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
  2221. (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
  2222. hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0);
  2223. return -EINVAL;
  2224. }
  2225. return 0;
  2226. }
  2227. static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
  2228. struct trace_event_file *file,
  2229. char *str, unsigned long flags,
  2230. char *var_name, unsigned int *n_subexprs)
  2231. {
  2232. struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
  2233. struct hist_field *var1 = NULL, *var2 = NULL;
  2234. unsigned long operand_flags, operand2_flags;
  2235. int field_op, ret = -EINVAL;
  2236. char *sep, *operand1_str;
  2237. enum hist_field_fn op_fn;
  2238. bool combine_consts;
  2239. if (*n_subexprs > 3) {
  2240. hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str));
  2241. return ERR_PTR(-EINVAL);
  2242. }
  2243. field_op = contains_operator(str, &sep);
  2244. if (field_op == FIELD_OP_NONE)
  2245. return parse_atom(hist_data, file, str, &flags, var_name);
  2246. if (field_op == FIELD_OP_UNARY_MINUS)
  2247. return parse_unary(hist_data, file, str, flags, var_name, n_subexprs);
  2248. /* Binary operator found, increment n_subexprs */
  2249. ++*n_subexprs;
  2250. /* Split the expression string at the root operator */
  2251. if (!sep)
  2252. return ERR_PTR(-EINVAL);
  2253. *sep = '\0';
  2254. operand1_str = str;
  2255. str = sep+1;
  2256. /* Binary operator requires both operands */
  2257. if (*operand1_str == '\0' || *str == '\0')
  2258. return ERR_PTR(-EINVAL);
  2259. operand_flags = 0;
  2260. /* LHS of string is an expression e.g. a+b in a+b+c */
  2261. operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs);
  2262. if (IS_ERR(operand1))
  2263. return ERR_CAST(operand1);
  2264. if (operand1->flags & HIST_FIELD_FL_STRING) {
  2265. hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
  2266. ret = -EINVAL;
  2267. goto free_op1;
  2268. }
  2269. /* RHS of string is another expression e.g. c in a+b+c */
  2270. operand_flags = 0;
  2271. operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
  2272. if (IS_ERR(operand2)) {
  2273. ret = PTR_ERR(operand2);
  2274. goto free_op1;
  2275. }
  2276. if (operand2->flags & HIST_FIELD_FL_STRING) {
  2277. hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
  2278. ret = -EINVAL;
  2279. goto free_operands;
  2280. }
  2281. switch (field_op) {
  2282. case FIELD_OP_MINUS:
  2283. op_fn = HIST_FIELD_FN_MINUS;
  2284. break;
  2285. case FIELD_OP_PLUS:
  2286. op_fn = HIST_FIELD_FN_PLUS;
  2287. break;
  2288. case FIELD_OP_DIV:
  2289. op_fn = HIST_FIELD_FN_DIV;
  2290. break;
  2291. case FIELD_OP_MULT:
  2292. op_fn = HIST_FIELD_FN_MULT;
  2293. break;
  2294. default:
  2295. ret = -EINVAL;
  2296. goto free_operands;
  2297. }
  2298. ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2);
  2299. if (ret)
  2300. goto free_operands;
  2301. operand_flags = var1 ? var1->flags : operand1->flags;
  2302. operand2_flags = var2 ? var2->flags : operand2->flags;
  2303. /*
  2304. * If both operands are constant, the expression can be
  2305. * collapsed to a single constant.
  2306. */
  2307. combine_consts = operand_flags & operand2_flags & HIST_FIELD_FL_CONST;
  2308. flags |= combine_consts ? HIST_FIELD_FL_CONST : HIST_FIELD_FL_EXPR;
  2309. flags |= operand1->flags &
  2310. (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
  2311. expr = create_hist_field(hist_data, NULL, flags, var_name);
  2312. if (!expr) {
  2313. ret = -ENOMEM;
  2314. goto free_operands;
  2315. }
  2316. operand1->read_once = true;
  2317. operand2->read_once = true;
  2318. /* The operands are now owned and free'd by 'expr' */
  2319. expr->operands[0] = operand1;
  2320. expr->operands[1] = operand2;
  2321. if (field_op == FIELD_OP_DIV &&
  2322. operand2_flags & HIST_FIELD_FL_CONST) {
  2323. u64 divisor = var2 ? var2->constant : operand2->constant;
  2324. if (!divisor) {
  2325. hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str));
  2326. ret = -EDOM;
  2327. goto free_expr;
  2328. }
  2329. /*
  2330. * Copy the divisor here so we don't have to look it up
  2331. * later if this is a var ref
  2332. */
  2333. operand2->constant = divisor;
  2334. op_fn = hist_field_get_div_fn(operand2);
  2335. }
  2336. expr->fn_num = op_fn;
  2337. if (combine_consts) {
  2338. if (var1)
  2339. expr->operands[0] = var1;
  2340. if (var2)
  2341. expr->operands[1] = var2;
  2342. expr->constant = hist_fn_call(expr, NULL, NULL, NULL, NULL);
  2343. expr->fn_num = HIST_FIELD_FN_CONST;
  2344. expr->operands[0] = NULL;
  2345. expr->operands[1] = NULL;
  2346. /*
  2347. * var refs won't be destroyed immediately
  2348. * See: destroy_hist_field()
  2349. */
  2350. destroy_hist_field(operand2, 0);
  2351. destroy_hist_field(operand1, 0);
  2352. expr->name = expr_str(expr, 0);
  2353. } else {
  2354. /* The operand sizes should be the same, so just pick one */
  2355. expr->size = operand1->size;
  2356. expr->is_signed = operand1->is_signed;
  2357. expr->operator = field_op;
  2358. expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
  2359. if (!expr->type) {
  2360. ret = -ENOMEM;
  2361. goto free_expr;
  2362. }
  2363. expr->name = expr_str(expr, 0);
  2364. }
  2365. return expr;
  2366. free_operands:
  2367. destroy_hist_field(operand2, 0);
  2368. free_op1:
  2369. destroy_hist_field(operand1, 0);
  2370. return ERR_PTR(ret);
  2371. free_expr:
  2372. destroy_hist_field(expr, 0);
  2373. return ERR_PTR(ret);
  2374. }
  2375. static char *find_trigger_filter(struct hist_trigger_data *hist_data,
  2376. struct trace_event_file *file)
  2377. {
  2378. struct event_trigger_data *test;
  2379. lockdep_assert_held(&event_mutex);
  2380. list_for_each_entry(test, &file->triggers, list) {
  2381. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  2382. if (test->private_data == hist_data)
  2383. return test->filter_str;
  2384. }
  2385. }
  2386. return NULL;
  2387. }
  2388. static struct event_command trigger_hist_cmd;
  2389. static int event_hist_trigger_parse(struct event_command *cmd_ops,
  2390. struct trace_event_file *file,
  2391. char *glob, char *cmd,
  2392. char *param_and_filter);
  2393. static bool compatible_keys(struct hist_trigger_data *target_hist_data,
  2394. struct hist_trigger_data *hist_data,
  2395. unsigned int n_keys)
  2396. {
  2397. struct hist_field *target_hist_field, *hist_field;
  2398. unsigned int n, i, j;
  2399. if (hist_data->n_fields - hist_data->n_vals != n_keys)
  2400. return false;
  2401. i = hist_data->n_vals;
  2402. j = target_hist_data->n_vals;
  2403. for (n = 0; n < n_keys; n++) {
  2404. hist_field = hist_data->fields[i + n];
  2405. target_hist_field = target_hist_data->fields[j + n];
  2406. if (strcmp(hist_field->type, target_hist_field->type) != 0)
  2407. return false;
  2408. if (hist_field->size != target_hist_field->size)
  2409. return false;
  2410. if (hist_field->is_signed != target_hist_field->is_signed)
  2411. return false;
  2412. }
  2413. return true;
  2414. }
  2415. static struct hist_trigger_data *
  2416. find_compatible_hist(struct hist_trigger_data *target_hist_data,
  2417. struct trace_event_file *file)
  2418. {
  2419. struct hist_trigger_data *hist_data;
  2420. struct event_trigger_data *test;
  2421. unsigned int n_keys;
  2422. lockdep_assert_held(&event_mutex);
  2423. n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
  2424. list_for_each_entry(test, &file->triggers, list) {
  2425. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  2426. hist_data = test->private_data;
  2427. if (compatible_keys(target_hist_data, hist_data, n_keys))
  2428. return hist_data;
  2429. }
  2430. }
  2431. return NULL;
  2432. }
  2433. static struct trace_event_file *event_file(struct trace_array *tr,
  2434. char *system, char *event_name)
  2435. {
  2436. struct trace_event_file *file;
  2437. file = __find_event_file(tr, system, event_name);
  2438. if (!file)
  2439. return ERR_PTR(-EINVAL);
  2440. return file;
  2441. }
  2442. static struct hist_field *
  2443. find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
  2444. char *system, char *event_name, char *field_name)
  2445. {
  2446. struct hist_field *event_var;
  2447. char *synthetic_name;
  2448. synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
  2449. if (!synthetic_name)
  2450. return ERR_PTR(-ENOMEM);
  2451. strcpy(synthetic_name, "synthetic_");
  2452. strcat(synthetic_name, field_name);
  2453. event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
  2454. kfree(synthetic_name);
  2455. return event_var;
  2456. }
  2457. /**
  2458. * create_field_var_hist - Automatically create a histogram and var for a field
  2459. * @target_hist_data: The target hist trigger
  2460. * @subsys_name: Optional subsystem name
  2461. * @event_name: Optional event name
  2462. * @field_name: The name of the field (and the resulting variable)
  2463. *
  2464. * Hist trigger actions fetch data from variables, not directly from
  2465. * events. However, for convenience, users are allowed to directly
  2466. * specify an event field in an action, which will be automatically
  2467. * converted into a variable on their behalf.
  2468. *
  2469. * If a user specifies a field on an event that isn't the event the
  2470. * histogram currently being defined (the target event histogram), the
  2471. * only way that can be accomplished is if a new hist trigger is
  2472. * created and the field variable defined on that.
  2473. *
  2474. * This function creates a new histogram compatible with the target
  2475. * event (meaning a histogram with the same key as the target
  2476. * histogram), and creates a variable for the specified field, but
  2477. * with 'synthetic_' prepended to the variable name in order to avoid
  2478. * collision with normal field variables.
  2479. *
  2480. * Return: The variable created for the field.
  2481. */
  2482. static struct hist_field *
  2483. create_field_var_hist(struct hist_trigger_data *target_hist_data,
  2484. char *subsys_name, char *event_name, char *field_name)
  2485. {
  2486. struct trace_array *tr = target_hist_data->event_file->tr;
  2487. struct hist_trigger_data *hist_data;
  2488. unsigned int i, n, first = true;
  2489. struct field_var_hist *var_hist;
  2490. struct trace_event_file *file;
  2491. struct hist_field *key_field;
  2492. struct hist_field *event_var;
  2493. char *saved_filter;
  2494. char *cmd;
  2495. int ret;
  2496. if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
  2497. hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
  2498. return ERR_PTR(-EINVAL);
  2499. }
  2500. file = event_file(tr, subsys_name, event_name);
  2501. if (IS_ERR(file)) {
  2502. hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name));
  2503. ret = PTR_ERR(file);
  2504. return ERR_PTR(ret);
  2505. }
  2506. /*
  2507. * Look for a histogram compatible with target. We'll use the
  2508. * found histogram specification to create a new matching
  2509. * histogram with our variable on it. target_hist_data is not
  2510. * yet a registered histogram so we can't use that.
  2511. */
  2512. hist_data = find_compatible_hist(target_hist_data, file);
  2513. if (!hist_data) {
  2514. hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name));
  2515. return ERR_PTR(-EINVAL);
  2516. }
  2517. /* See if a synthetic field variable has already been created */
  2518. event_var = find_synthetic_field_var(target_hist_data, subsys_name,
  2519. event_name, field_name);
  2520. if (!IS_ERR_OR_NULL(event_var))
  2521. return event_var;
  2522. var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
  2523. if (!var_hist)
  2524. return ERR_PTR(-ENOMEM);
  2525. cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
  2526. if (!cmd) {
  2527. kfree(var_hist);
  2528. return ERR_PTR(-ENOMEM);
  2529. }
  2530. /* Use the same keys as the compatible histogram */
  2531. strcat(cmd, "keys=");
  2532. for_each_hist_key_field(i, hist_data) {
  2533. key_field = hist_data->fields[i];
  2534. if (!first)
  2535. strcat(cmd, ",");
  2536. strcat(cmd, key_field->field->name);
  2537. first = false;
  2538. }
  2539. /* Create the synthetic field variable specification */
  2540. strcat(cmd, ":synthetic_");
  2541. strcat(cmd, field_name);
  2542. strcat(cmd, "=");
  2543. strcat(cmd, field_name);
  2544. /* Use the same filter as the compatible histogram */
  2545. saved_filter = find_trigger_filter(hist_data, file);
  2546. if (saved_filter) {
  2547. strcat(cmd, " if ");
  2548. strcat(cmd, saved_filter);
  2549. }
  2550. var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
  2551. if (!var_hist->cmd) {
  2552. kfree(cmd);
  2553. kfree(var_hist);
  2554. return ERR_PTR(-ENOMEM);
  2555. }
  2556. /* Save the compatible histogram information */
  2557. var_hist->hist_data = hist_data;
  2558. /* Create the new histogram with our variable */
  2559. ret = event_hist_trigger_parse(&trigger_hist_cmd, file,
  2560. "", "hist", cmd);
  2561. if (ret) {
  2562. kfree(cmd);
  2563. kfree(var_hist->cmd);
  2564. kfree(var_hist);
  2565. hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name));
  2566. return ERR_PTR(ret);
  2567. }
  2568. kfree(cmd);
  2569. /* If we can't find the variable, something went wrong */
  2570. event_var = find_synthetic_field_var(target_hist_data, subsys_name,
  2571. event_name, field_name);
  2572. if (IS_ERR_OR_NULL(event_var)) {
  2573. kfree(var_hist->cmd);
  2574. kfree(var_hist);
  2575. hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name));
  2576. return ERR_PTR(-EINVAL);
  2577. }
  2578. n = target_hist_data->n_field_var_hists;
  2579. target_hist_data->field_var_hists[n] = var_hist;
  2580. target_hist_data->n_field_var_hists++;
  2581. return event_var;
  2582. }
  2583. static struct hist_field *
  2584. find_target_event_var(struct hist_trigger_data *hist_data,
  2585. char *subsys_name, char *event_name, char *var_name)
  2586. {
  2587. struct trace_event_file *file = hist_data->event_file;
  2588. struct hist_field *hist_field = NULL;
  2589. if (subsys_name) {
  2590. struct trace_event_call *call;
  2591. if (!event_name)
  2592. return NULL;
  2593. call = file->event_call;
  2594. if (strcmp(subsys_name, call->class->system) != 0)
  2595. return NULL;
  2596. if (strcmp(event_name, trace_event_name(call)) != 0)
  2597. return NULL;
  2598. }
  2599. hist_field = find_var_field(hist_data, var_name);
  2600. return hist_field;
  2601. }
  2602. static inline void __update_field_vars(struct tracing_map_elt *elt,
  2603. struct trace_buffer *buffer,
  2604. struct ring_buffer_event *rbe,
  2605. void *rec,
  2606. struct field_var **field_vars,
  2607. unsigned int n_field_vars,
  2608. unsigned int field_var_str_start)
  2609. {
  2610. struct hist_elt_data *elt_data = elt->private_data;
  2611. unsigned int i, j, var_idx;
  2612. u64 var_val;
  2613. /* Make sure stacktrace can fit in the string variable length */
  2614. BUILD_BUG_ON((HIST_STACKTRACE_DEPTH + 1) * sizeof(long) >= STR_VAR_LEN_MAX);
  2615. for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
  2616. struct field_var *field_var = field_vars[i];
  2617. struct hist_field *var = field_var->var;
  2618. struct hist_field *val = field_var->val;
  2619. var_val = hist_fn_call(val, elt, buffer, rbe, rec);
  2620. var_idx = var->var.idx;
  2621. if (val->flags & (HIST_FIELD_FL_STRING |
  2622. HIST_FIELD_FL_STACKTRACE)) {
  2623. char *str = elt_data->field_var_str[j++];
  2624. char *val_str = (char *)(uintptr_t)var_val;
  2625. unsigned int size;
  2626. if (val->flags & HIST_FIELD_FL_STRING) {
  2627. size = min(val->size, STR_VAR_LEN_MAX);
  2628. strscpy(str, val_str, size);
  2629. } else {
  2630. char *stack_start = str + sizeof(unsigned long);
  2631. int e;
  2632. e = stack_trace_save((void *)stack_start,
  2633. HIST_STACKTRACE_DEPTH,
  2634. HIST_STACKTRACE_SKIP);
  2635. if (e < HIST_STACKTRACE_DEPTH - 1)
  2636. ((unsigned long *)stack_start)[e] = 0;
  2637. *((unsigned long *)str) = e;
  2638. }
  2639. var_val = (u64)(uintptr_t)str;
  2640. }
  2641. tracing_map_set_var(elt, var_idx, var_val);
  2642. }
  2643. }
  2644. static void update_field_vars(struct hist_trigger_data *hist_data,
  2645. struct tracing_map_elt *elt,
  2646. struct trace_buffer *buffer,
  2647. struct ring_buffer_event *rbe,
  2648. void *rec)
  2649. {
  2650. __update_field_vars(elt, buffer, rbe, rec, hist_data->field_vars,
  2651. hist_data->n_field_vars, 0);
  2652. }
  2653. static void save_track_data_vars(struct hist_trigger_data *hist_data,
  2654. struct tracing_map_elt *elt,
  2655. struct trace_buffer *buffer, void *rec,
  2656. struct ring_buffer_event *rbe, void *key,
  2657. struct action_data *data, u64 *var_ref_vals)
  2658. {
  2659. __update_field_vars(elt, buffer, rbe, rec, hist_data->save_vars,
  2660. hist_data->n_save_vars, hist_data->n_field_var_str);
  2661. }
  2662. static struct hist_field *create_var(struct hist_trigger_data *hist_data,
  2663. struct trace_event_file *file,
  2664. char *name, int size, const char *type)
  2665. {
  2666. struct hist_field *var;
  2667. int idx;
  2668. if (find_var(hist_data, file, name) && !hist_data->remove) {
  2669. var = ERR_PTR(-EINVAL);
  2670. goto out;
  2671. }
  2672. var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
  2673. if (!var) {
  2674. var = ERR_PTR(-ENOMEM);
  2675. goto out;
  2676. }
  2677. idx = tracing_map_add_var(hist_data->map);
  2678. if (idx < 0) {
  2679. kfree(var);
  2680. var = ERR_PTR(-EINVAL);
  2681. goto out;
  2682. }
  2683. var->ref = 1;
  2684. var->flags = HIST_FIELD_FL_VAR;
  2685. var->var.idx = idx;
  2686. var->var.hist_data = var->hist_data = hist_data;
  2687. var->size = size;
  2688. var->var.name = kstrdup(name, GFP_KERNEL);
  2689. var->type = kstrdup_const(type, GFP_KERNEL);
  2690. if (!var->var.name || !var->type) {
  2691. kfree_const(var->type);
  2692. kfree(var->var.name);
  2693. kfree(var);
  2694. var = ERR_PTR(-ENOMEM);
  2695. }
  2696. out:
  2697. return var;
  2698. }
  2699. static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
  2700. struct trace_event_file *file,
  2701. char *field_name)
  2702. {
  2703. struct hist_field *val = NULL, *var = NULL;
  2704. unsigned long flags = HIST_FIELD_FL_VAR;
  2705. struct trace_array *tr = file->tr;
  2706. struct field_var *field_var;
  2707. int ret = 0;
  2708. if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
  2709. hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name));
  2710. ret = -EINVAL;
  2711. goto err;
  2712. }
  2713. val = parse_atom(hist_data, file, field_name, &flags, NULL);
  2714. if (IS_ERR(val)) {
  2715. hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name));
  2716. ret = PTR_ERR(val);
  2717. goto err;
  2718. }
  2719. var = create_var(hist_data, file, field_name, val->size, val->type);
  2720. if (IS_ERR(var)) {
  2721. hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name));
  2722. kfree(val);
  2723. ret = PTR_ERR(var);
  2724. goto err;
  2725. }
  2726. field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
  2727. if (!field_var) {
  2728. kfree(val);
  2729. kfree(var);
  2730. ret = -ENOMEM;
  2731. goto err;
  2732. }
  2733. field_var->var = var;
  2734. field_var->val = val;
  2735. out:
  2736. return field_var;
  2737. err:
  2738. field_var = ERR_PTR(ret);
  2739. goto out;
  2740. }
  2741. /**
  2742. * create_target_field_var - Automatically create a variable for a field
  2743. * @target_hist_data: The target hist trigger
  2744. * @subsys_name: Optional subsystem name
  2745. * @event_name: Optional event name
  2746. * @var_name: The name of the field (and the resulting variable)
  2747. *
  2748. * Hist trigger actions fetch data from variables, not directly from
  2749. * events. However, for convenience, users are allowed to directly
  2750. * specify an event field in an action, which will be automatically
  2751. * converted into a variable on their behalf.
  2752. *
  2753. * This function creates a field variable with the name var_name on
  2754. * the hist trigger currently being defined on the target event. If
  2755. * subsys_name and event_name are specified, this function simply
  2756. * verifies that they do in fact match the target event subsystem and
  2757. * event name.
  2758. *
  2759. * Return: The variable created for the field.
  2760. */
  2761. static struct field_var *
  2762. create_target_field_var(struct hist_trigger_data *target_hist_data,
  2763. char *subsys_name, char *event_name, char *var_name)
  2764. {
  2765. struct trace_event_file *file = target_hist_data->event_file;
  2766. if (subsys_name) {
  2767. struct trace_event_call *call;
  2768. if (!event_name)
  2769. return NULL;
  2770. call = file->event_call;
  2771. if (strcmp(subsys_name, call->class->system) != 0)
  2772. return NULL;
  2773. if (strcmp(event_name, trace_event_name(call)) != 0)
  2774. return NULL;
  2775. }
  2776. return create_field_var(target_hist_data, file, var_name);
  2777. }
  2778. static bool check_track_val_max(u64 track_val, u64 var_val)
  2779. {
  2780. if (var_val <= track_val)
  2781. return false;
  2782. return true;
  2783. }
  2784. static bool check_track_val_changed(u64 track_val, u64 var_val)
  2785. {
  2786. if (var_val == track_val)
  2787. return false;
  2788. return true;
  2789. }
  2790. static u64 get_track_val(struct hist_trigger_data *hist_data,
  2791. struct tracing_map_elt *elt,
  2792. struct action_data *data)
  2793. {
  2794. unsigned int track_var_idx = data->track_data.track_var->var.idx;
  2795. u64 track_val;
  2796. track_val = tracing_map_read_var(elt, track_var_idx);
  2797. return track_val;
  2798. }
  2799. static void save_track_val(struct hist_trigger_data *hist_data,
  2800. struct tracing_map_elt *elt,
  2801. struct action_data *data, u64 var_val)
  2802. {
  2803. unsigned int track_var_idx = data->track_data.track_var->var.idx;
  2804. tracing_map_set_var(elt, track_var_idx, var_val);
  2805. }
  2806. static void save_track_data(struct hist_trigger_data *hist_data,
  2807. struct tracing_map_elt *elt,
  2808. struct trace_buffer *buffer, void *rec,
  2809. struct ring_buffer_event *rbe, void *key,
  2810. struct action_data *data, u64 *var_ref_vals)
  2811. {
  2812. if (data->track_data.save_data)
  2813. data->track_data.save_data(hist_data, elt, buffer, rec, rbe,
  2814. key, data, var_ref_vals);
  2815. }
  2816. static bool check_track_val(struct tracing_map_elt *elt,
  2817. struct action_data *data,
  2818. u64 var_val)
  2819. {
  2820. struct hist_trigger_data *hist_data;
  2821. u64 track_val;
  2822. hist_data = data->track_data.track_var->hist_data;
  2823. track_val = get_track_val(hist_data, elt, data);
  2824. return data->track_data.check_val(track_val, var_val);
  2825. }
  2826. #ifdef CONFIG_TRACER_SNAPSHOT
  2827. static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
  2828. {
  2829. /* called with tr->max_lock held */
  2830. struct track_data *track_data = tr->cond_snapshot->cond_data;
  2831. struct hist_elt_data *elt_data, *track_elt_data;
  2832. struct snapshot_context *context = cond_data;
  2833. struct action_data *action;
  2834. u64 track_val;
  2835. if (!track_data)
  2836. return false;
  2837. action = track_data->action_data;
  2838. track_val = get_track_val(track_data->hist_data, context->elt,
  2839. track_data->action_data);
  2840. if (!action->track_data.check_val(track_data->track_val, track_val))
  2841. return false;
  2842. track_data->track_val = track_val;
  2843. memcpy(track_data->key, context->key, track_data->key_len);
  2844. elt_data = context->elt->private_data;
  2845. track_elt_data = track_data->elt.private_data;
  2846. if (elt_data->comm)
  2847. strncpy(track_elt_data->comm, elt_data->comm, TASK_COMM_LEN);
  2848. track_data->updated = true;
  2849. return true;
  2850. }
  2851. static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
  2852. struct tracing_map_elt *elt,
  2853. struct trace_buffer *buffer, void *rec,
  2854. struct ring_buffer_event *rbe, void *key,
  2855. struct action_data *data,
  2856. u64 *var_ref_vals)
  2857. {
  2858. struct trace_event_file *file = hist_data->event_file;
  2859. struct snapshot_context context;
  2860. context.elt = elt;
  2861. context.key = key;
  2862. tracing_snapshot_cond(file->tr, &context);
  2863. }
  2864. static void hist_trigger_print_key(struct seq_file *m,
  2865. struct hist_trigger_data *hist_data,
  2866. void *key,
  2867. struct tracing_map_elt *elt);
  2868. static struct action_data *snapshot_action(struct hist_trigger_data *hist_data)
  2869. {
  2870. unsigned int i;
  2871. if (!hist_data->n_actions)
  2872. return NULL;
  2873. for (i = 0; i < hist_data->n_actions; i++) {
  2874. struct action_data *data = hist_data->actions[i];
  2875. if (data->action == ACTION_SNAPSHOT)
  2876. return data;
  2877. }
  2878. return NULL;
  2879. }
  2880. static void track_data_snapshot_print(struct seq_file *m,
  2881. struct hist_trigger_data *hist_data)
  2882. {
  2883. struct trace_event_file *file = hist_data->event_file;
  2884. struct track_data *track_data;
  2885. struct action_data *action;
  2886. track_data = tracing_cond_snapshot_data(file->tr);
  2887. if (!track_data)
  2888. return;
  2889. if (!track_data->updated)
  2890. return;
  2891. action = snapshot_action(hist_data);
  2892. if (!action)
  2893. return;
  2894. seq_puts(m, "\nSnapshot taken (see tracing/snapshot). Details:\n");
  2895. seq_printf(m, "\ttriggering value { %s(%s) }: %10llu",
  2896. action->handler == HANDLER_ONMAX ? "onmax" : "onchange",
  2897. action->track_data.var_str, track_data->track_val);
  2898. seq_puts(m, "\ttriggered by event with key: ");
  2899. hist_trigger_print_key(m, hist_data, track_data->key, &track_data->elt);
  2900. seq_putc(m, '\n');
  2901. }
  2902. #else
  2903. static bool cond_snapshot_update(struct trace_array *tr, void *cond_data)
  2904. {
  2905. return false;
  2906. }
  2907. static void save_track_data_snapshot(struct hist_trigger_data *hist_data,
  2908. struct tracing_map_elt *elt,
  2909. struct trace_buffer *buffer, void *rec,
  2910. struct ring_buffer_event *rbe, void *key,
  2911. struct action_data *data,
  2912. u64 *var_ref_vals) {}
  2913. static void track_data_snapshot_print(struct seq_file *m,
  2914. struct hist_trigger_data *hist_data) {}
  2915. #endif /* CONFIG_TRACER_SNAPSHOT */
  2916. static void track_data_print(struct seq_file *m,
  2917. struct hist_trigger_data *hist_data,
  2918. struct tracing_map_elt *elt,
  2919. struct action_data *data)
  2920. {
  2921. u64 track_val = get_track_val(hist_data, elt, data);
  2922. unsigned int i, save_var_idx;
  2923. if (data->handler == HANDLER_ONMAX)
  2924. seq_printf(m, "\n\tmax: %10llu", track_val);
  2925. else if (data->handler == HANDLER_ONCHANGE)
  2926. seq_printf(m, "\n\tchanged: %10llu", track_val);
  2927. if (data->action == ACTION_SNAPSHOT)
  2928. return;
  2929. for (i = 0; i < hist_data->n_save_vars; i++) {
  2930. struct hist_field *save_val = hist_data->save_vars[i]->val;
  2931. struct hist_field *save_var = hist_data->save_vars[i]->var;
  2932. u64 val;
  2933. save_var_idx = save_var->var.idx;
  2934. val = tracing_map_read_var(elt, save_var_idx);
  2935. if (save_val->flags & HIST_FIELD_FL_STRING) {
  2936. seq_printf(m, " %s: %-32s", save_var->var.name,
  2937. (char *)(uintptr_t)(val));
  2938. } else
  2939. seq_printf(m, " %s: %10llu", save_var->var.name, val);
  2940. }
  2941. }
  2942. static void ontrack_action(struct hist_trigger_data *hist_data,
  2943. struct tracing_map_elt *elt,
  2944. struct trace_buffer *buffer, void *rec,
  2945. struct ring_buffer_event *rbe, void *key,
  2946. struct action_data *data, u64 *var_ref_vals)
  2947. {
  2948. u64 var_val = var_ref_vals[data->track_data.var_ref->var_ref_idx];
  2949. if (check_track_val(elt, data, var_val)) {
  2950. save_track_val(hist_data, elt, data, var_val);
  2951. save_track_data(hist_data, elt, buffer, rec, rbe,
  2952. key, data, var_ref_vals);
  2953. }
  2954. }
  2955. static void action_data_destroy(struct action_data *data)
  2956. {
  2957. unsigned int i;
  2958. lockdep_assert_held(&event_mutex);
  2959. kfree(data->action_name);
  2960. for (i = 0; i < data->n_params; i++)
  2961. kfree(data->params[i]);
  2962. if (data->synth_event)
  2963. data->synth_event->ref--;
  2964. kfree(data->synth_event_name);
  2965. kfree(data);
  2966. }
  2967. static void track_data_destroy(struct hist_trigger_data *hist_data,
  2968. struct action_data *data)
  2969. {
  2970. struct trace_event_file *file = hist_data->event_file;
  2971. destroy_hist_field(data->track_data.track_var, 0);
  2972. if (data->action == ACTION_SNAPSHOT) {
  2973. struct track_data *track_data;
  2974. track_data = tracing_cond_snapshot_data(file->tr);
  2975. if (track_data && track_data->hist_data == hist_data) {
  2976. tracing_snapshot_cond_disable(file->tr);
  2977. track_data_free(track_data);
  2978. }
  2979. }
  2980. kfree(data->track_data.var_str);
  2981. action_data_destroy(data);
  2982. }
  2983. static int action_create(struct hist_trigger_data *hist_data,
  2984. struct action_data *data);
  2985. static int track_data_create(struct hist_trigger_data *hist_data,
  2986. struct action_data *data)
  2987. {
  2988. struct hist_field *var_field, *ref_field, *track_var = NULL;
  2989. struct trace_event_file *file = hist_data->event_file;
  2990. struct trace_array *tr = file->tr;
  2991. char *track_data_var_str;
  2992. int ret = 0;
  2993. track_data_var_str = data->track_data.var_str;
  2994. if (track_data_var_str[0] != '$') {
  2995. hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str));
  2996. return -EINVAL;
  2997. }
  2998. track_data_var_str++;
  2999. var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str);
  3000. if (!var_field) {
  3001. hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str));
  3002. return -EINVAL;
  3003. }
  3004. ref_field = create_var_ref(hist_data, var_field, NULL, NULL);
  3005. if (!ref_field)
  3006. return -ENOMEM;
  3007. data->track_data.var_ref = ref_field;
  3008. if (data->handler == HANDLER_ONMAX)
  3009. track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64");
  3010. if (IS_ERR(track_var)) {
  3011. hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
  3012. ret = PTR_ERR(track_var);
  3013. goto out;
  3014. }
  3015. if (data->handler == HANDLER_ONCHANGE)
  3016. track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64");
  3017. if (IS_ERR(track_var)) {
  3018. hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0);
  3019. ret = PTR_ERR(track_var);
  3020. goto out;
  3021. }
  3022. data->track_data.track_var = track_var;
  3023. ret = action_create(hist_data, data);
  3024. out:
  3025. return ret;
  3026. }
  3027. static int parse_action_params(struct trace_array *tr, char *params,
  3028. struct action_data *data)
  3029. {
  3030. char *param, *saved_param;
  3031. bool first_param = true;
  3032. int ret = 0;
  3033. while (params) {
  3034. if (data->n_params >= SYNTH_FIELDS_MAX) {
  3035. hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0);
  3036. ret = -EINVAL;
  3037. goto out;
  3038. }
  3039. param = strsep(&params, ",");
  3040. if (!param) {
  3041. hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0);
  3042. ret = -EINVAL;
  3043. goto out;
  3044. }
  3045. param = strstrip(param);
  3046. if (strlen(param) < 2) {
  3047. hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param));
  3048. ret = -EINVAL;
  3049. goto out;
  3050. }
  3051. saved_param = kstrdup(param, GFP_KERNEL);
  3052. if (!saved_param) {
  3053. ret = -ENOMEM;
  3054. goto out;
  3055. }
  3056. if (first_param && data->use_trace_keyword) {
  3057. data->synth_event_name = saved_param;
  3058. first_param = false;
  3059. continue;
  3060. }
  3061. first_param = false;
  3062. data->params[data->n_params++] = saved_param;
  3063. }
  3064. out:
  3065. return ret;
  3066. }
  3067. static int action_parse(struct trace_array *tr, char *str, struct action_data *data,
  3068. enum handler_id handler)
  3069. {
  3070. char *action_name;
  3071. int ret = 0;
  3072. strsep(&str, ".");
  3073. if (!str) {
  3074. hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
  3075. ret = -EINVAL;
  3076. goto out;
  3077. }
  3078. action_name = strsep(&str, "(");
  3079. if (!action_name || !str) {
  3080. hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0);
  3081. ret = -EINVAL;
  3082. goto out;
  3083. }
  3084. if (str_has_prefix(action_name, "save")) {
  3085. char *params = strsep(&str, ")");
  3086. if (!params) {
  3087. hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0);
  3088. ret = -EINVAL;
  3089. goto out;
  3090. }
  3091. ret = parse_action_params(tr, params, data);
  3092. if (ret)
  3093. goto out;
  3094. if (handler == HANDLER_ONMAX)
  3095. data->track_data.check_val = check_track_val_max;
  3096. else if (handler == HANDLER_ONCHANGE)
  3097. data->track_data.check_val = check_track_val_changed;
  3098. else {
  3099. hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
  3100. ret = -EINVAL;
  3101. goto out;
  3102. }
  3103. data->track_data.save_data = save_track_data_vars;
  3104. data->fn = ontrack_action;
  3105. data->action = ACTION_SAVE;
  3106. } else if (str_has_prefix(action_name, "snapshot")) {
  3107. char *params = strsep(&str, ")");
  3108. if (!str) {
  3109. hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params));
  3110. ret = -EINVAL;
  3111. goto out;
  3112. }
  3113. if (handler == HANDLER_ONMAX)
  3114. data->track_data.check_val = check_track_val_max;
  3115. else if (handler == HANDLER_ONCHANGE)
  3116. data->track_data.check_val = check_track_val_changed;
  3117. else {
  3118. hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name));
  3119. ret = -EINVAL;
  3120. goto out;
  3121. }
  3122. data->track_data.save_data = save_track_data_snapshot;
  3123. data->fn = ontrack_action;
  3124. data->action = ACTION_SNAPSHOT;
  3125. } else {
  3126. char *params = strsep(&str, ")");
  3127. if (str_has_prefix(action_name, "trace"))
  3128. data->use_trace_keyword = true;
  3129. if (params) {
  3130. ret = parse_action_params(tr, params, data);
  3131. if (ret)
  3132. goto out;
  3133. }
  3134. if (handler == HANDLER_ONMAX)
  3135. data->track_data.check_val = check_track_val_max;
  3136. else if (handler == HANDLER_ONCHANGE)
  3137. data->track_data.check_val = check_track_val_changed;
  3138. if (handler != HANDLER_ONMATCH) {
  3139. data->track_data.save_data = action_trace;
  3140. data->fn = ontrack_action;
  3141. } else
  3142. data->fn = action_trace;
  3143. data->action = ACTION_TRACE;
  3144. }
  3145. data->action_name = kstrdup(action_name, GFP_KERNEL);
  3146. if (!data->action_name) {
  3147. ret = -ENOMEM;
  3148. goto out;
  3149. }
  3150. data->handler = handler;
  3151. out:
  3152. return ret;
  3153. }
  3154. static struct action_data *track_data_parse(struct hist_trigger_data *hist_data,
  3155. char *str, enum handler_id handler)
  3156. {
  3157. struct action_data *data;
  3158. int ret = -EINVAL;
  3159. char *var_str;
  3160. data = kzalloc(sizeof(*data), GFP_KERNEL);
  3161. if (!data)
  3162. return ERR_PTR(-ENOMEM);
  3163. var_str = strsep(&str, ")");
  3164. if (!var_str || !str) {
  3165. ret = -EINVAL;
  3166. goto free;
  3167. }
  3168. data->track_data.var_str = kstrdup(var_str, GFP_KERNEL);
  3169. if (!data->track_data.var_str) {
  3170. ret = -ENOMEM;
  3171. goto free;
  3172. }
  3173. ret = action_parse(hist_data->event_file->tr, str, data, handler);
  3174. if (ret)
  3175. goto free;
  3176. out:
  3177. return data;
  3178. free:
  3179. track_data_destroy(hist_data, data);
  3180. data = ERR_PTR(ret);
  3181. goto out;
  3182. }
  3183. static void onmatch_destroy(struct action_data *data)
  3184. {
  3185. kfree(data->match_data.event);
  3186. kfree(data->match_data.event_system);
  3187. action_data_destroy(data);
  3188. }
  3189. static void destroy_field_var(struct field_var *field_var)
  3190. {
  3191. if (!field_var)
  3192. return;
  3193. destroy_hist_field(field_var->var, 0);
  3194. destroy_hist_field(field_var->val, 0);
  3195. kfree(field_var);
  3196. }
  3197. static void destroy_field_vars(struct hist_trigger_data *hist_data)
  3198. {
  3199. unsigned int i;
  3200. for (i = 0; i < hist_data->n_field_vars; i++)
  3201. destroy_field_var(hist_data->field_vars[i]);
  3202. for (i = 0; i < hist_data->n_save_vars; i++)
  3203. destroy_field_var(hist_data->save_vars[i]);
  3204. }
  3205. static void save_field_var(struct hist_trigger_data *hist_data,
  3206. struct field_var *field_var)
  3207. {
  3208. hist_data->field_vars[hist_data->n_field_vars++] = field_var;
  3209. /* Stack traces are saved in the string storage too */
  3210. if (field_var->val->flags & (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
  3211. hist_data->n_field_var_str++;
  3212. }
  3213. static int check_synth_field(struct synth_event *event,
  3214. struct hist_field *hist_field,
  3215. unsigned int field_pos)
  3216. {
  3217. struct synth_field *field;
  3218. if (field_pos >= event->n_fields)
  3219. return -EINVAL;
  3220. field = event->fields[field_pos];
  3221. /*
  3222. * A dynamic string synth field can accept static or
  3223. * dynamic. A static string synth field can only accept a
  3224. * same-sized static string, which is checked for later.
  3225. */
  3226. if (strstr(hist_field->type, "char[") && field->is_string
  3227. && field->is_dynamic)
  3228. return 0;
  3229. if (strstr(hist_field->type, "long[") && field->is_stack)
  3230. return 0;
  3231. if (strcmp(field->type, hist_field->type) != 0) {
  3232. if (field->size != hist_field->size ||
  3233. (!field->is_string && field->is_signed != hist_field->is_signed))
  3234. return -EINVAL;
  3235. }
  3236. return 0;
  3237. }
  3238. static struct hist_field *
  3239. trace_action_find_var(struct hist_trigger_data *hist_data,
  3240. struct action_data *data,
  3241. char *system, char *event, char *var)
  3242. {
  3243. struct trace_array *tr = hist_data->event_file->tr;
  3244. struct hist_field *hist_field;
  3245. var++; /* skip '$' */
  3246. hist_field = find_target_event_var(hist_data, system, event, var);
  3247. if (!hist_field) {
  3248. if (!system && data->handler == HANDLER_ONMATCH) {
  3249. system = data->match_data.event_system;
  3250. event = data->match_data.event;
  3251. }
  3252. hist_field = find_event_var(hist_data, system, event, var);
  3253. }
  3254. if (!hist_field)
  3255. hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var));
  3256. return hist_field;
  3257. }
  3258. static struct hist_field *
  3259. trace_action_create_field_var(struct hist_trigger_data *hist_data,
  3260. struct action_data *data, char *system,
  3261. char *event, char *var)
  3262. {
  3263. struct hist_field *hist_field = NULL;
  3264. struct field_var *field_var;
  3265. /*
  3266. * First try to create a field var on the target event (the
  3267. * currently being defined). This will create a variable for
  3268. * unqualified fields on the target event, or if qualified,
  3269. * target fields that have qualified names matching the target.
  3270. */
  3271. field_var = create_target_field_var(hist_data, system, event, var);
  3272. if (field_var && !IS_ERR(field_var)) {
  3273. save_field_var(hist_data, field_var);
  3274. hist_field = field_var->var;
  3275. } else {
  3276. field_var = NULL;
  3277. /*
  3278. * If no explicit system.event is specified, default to
  3279. * looking for fields on the onmatch(system.event.xxx)
  3280. * event.
  3281. */
  3282. if (!system && data->handler == HANDLER_ONMATCH) {
  3283. system = data->match_data.event_system;
  3284. event = data->match_data.event;
  3285. }
  3286. if (!event)
  3287. goto free;
  3288. /*
  3289. * At this point, we're looking at a field on another
  3290. * event. Because we can't modify a hist trigger on
  3291. * another event to add a variable for a field, we need
  3292. * to create a new trigger on that event and create the
  3293. * variable at the same time.
  3294. */
  3295. hist_field = create_field_var_hist(hist_data, system, event, var);
  3296. if (IS_ERR(hist_field))
  3297. goto free;
  3298. }
  3299. out:
  3300. return hist_field;
  3301. free:
  3302. destroy_field_var(field_var);
  3303. hist_field = NULL;
  3304. goto out;
  3305. }
  3306. static int trace_action_create(struct hist_trigger_data *hist_data,
  3307. struct action_data *data)
  3308. {
  3309. struct trace_array *tr = hist_data->event_file->tr;
  3310. char *event_name, *param, *system = NULL;
  3311. struct hist_field *hist_field, *var_ref;
  3312. unsigned int i;
  3313. unsigned int field_pos = 0;
  3314. struct synth_event *event;
  3315. char *synth_event_name;
  3316. int var_ref_idx, ret = 0;
  3317. lockdep_assert_held(&event_mutex);
  3318. /* Sanity check to avoid out-of-bound write on 'data->var_ref_idx' */
  3319. if (data->n_params > SYNTH_FIELDS_MAX)
  3320. return -EINVAL;
  3321. if (data->use_trace_keyword)
  3322. synth_event_name = data->synth_event_name;
  3323. else
  3324. synth_event_name = data->action_name;
  3325. event = find_synth_event(synth_event_name);
  3326. if (!event) {
  3327. hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name));
  3328. return -EINVAL;
  3329. }
  3330. event->ref++;
  3331. for (i = 0; i < data->n_params; i++) {
  3332. char *p;
  3333. p = param = kstrdup(data->params[i], GFP_KERNEL);
  3334. if (!param) {
  3335. ret = -ENOMEM;
  3336. goto err;
  3337. }
  3338. system = strsep(&param, ".");
  3339. if (!param) {
  3340. param = (char *)system;
  3341. system = event_name = NULL;
  3342. } else {
  3343. event_name = strsep(&param, ".");
  3344. if (!param) {
  3345. kfree(p);
  3346. ret = -EINVAL;
  3347. goto err;
  3348. }
  3349. }
  3350. if (param[0] == '$')
  3351. hist_field = trace_action_find_var(hist_data, data,
  3352. system, event_name,
  3353. param);
  3354. else
  3355. hist_field = trace_action_create_field_var(hist_data,
  3356. data,
  3357. system,
  3358. event_name,
  3359. param);
  3360. if (!hist_field) {
  3361. kfree(p);
  3362. ret = -EINVAL;
  3363. goto err;
  3364. }
  3365. if (check_synth_field(event, hist_field, field_pos) == 0) {
  3366. var_ref = create_var_ref(hist_data, hist_field,
  3367. system, event_name);
  3368. if (!var_ref) {
  3369. kfree(p);
  3370. ret = -ENOMEM;
  3371. goto err;
  3372. }
  3373. var_ref_idx = find_var_ref_idx(hist_data, var_ref);
  3374. if (WARN_ON(var_ref_idx < 0)) {
  3375. kfree(p);
  3376. ret = var_ref_idx;
  3377. goto err;
  3378. }
  3379. data->var_ref_idx[i] = var_ref_idx;
  3380. field_pos++;
  3381. kfree(p);
  3382. continue;
  3383. }
  3384. hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param));
  3385. kfree(p);
  3386. ret = -EINVAL;
  3387. goto err;
  3388. }
  3389. if (field_pos != event->n_fields) {
  3390. hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name));
  3391. ret = -EINVAL;
  3392. goto err;
  3393. }
  3394. data->synth_event = event;
  3395. out:
  3396. return ret;
  3397. err:
  3398. event->ref--;
  3399. goto out;
  3400. }
  3401. static int action_create(struct hist_trigger_data *hist_data,
  3402. struct action_data *data)
  3403. {
  3404. struct trace_event_file *file = hist_data->event_file;
  3405. struct trace_array *tr = file->tr;
  3406. struct track_data *track_data;
  3407. struct field_var *field_var;
  3408. unsigned int i;
  3409. char *param;
  3410. int ret = 0;
  3411. if (data->action == ACTION_TRACE)
  3412. return trace_action_create(hist_data, data);
  3413. if (data->action == ACTION_SNAPSHOT) {
  3414. track_data = track_data_alloc(hist_data->key_size, data, hist_data);
  3415. if (IS_ERR(track_data)) {
  3416. ret = PTR_ERR(track_data);
  3417. goto out;
  3418. }
  3419. ret = tracing_snapshot_cond_enable(file->tr, track_data,
  3420. cond_snapshot_update);
  3421. if (ret)
  3422. track_data_free(track_data);
  3423. goto out;
  3424. }
  3425. if (data->action == ACTION_SAVE) {
  3426. if (hist_data->n_save_vars) {
  3427. ret = -EEXIST;
  3428. hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0);
  3429. goto out;
  3430. }
  3431. for (i = 0; i < data->n_params; i++) {
  3432. param = kstrdup(data->params[i], GFP_KERNEL);
  3433. if (!param) {
  3434. ret = -ENOMEM;
  3435. goto out;
  3436. }
  3437. field_var = create_target_field_var(hist_data, NULL, NULL, param);
  3438. if (IS_ERR(field_var)) {
  3439. hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL,
  3440. errpos(param));
  3441. ret = PTR_ERR(field_var);
  3442. kfree(param);
  3443. goto out;
  3444. }
  3445. hist_data->save_vars[hist_data->n_save_vars++] = field_var;
  3446. if (field_var->val->flags &
  3447. (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
  3448. hist_data->n_save_var_str++;
  3449. kfree(param);
  3450. }
  3451. }
  3452. out:
  3453. return ret;
  3454. }
  3455. static int onmatch_create(struct hist_trigger_data *hist_data,
  3456. struct action_data *data)
  3457. {
  3458. return action_create(hist_data, data);
  3459. }
  3460. static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
  3461. {
  3462. char *match_event, *match_event_system;
  3463. struct action_data *data;
  3464. int ret = -EINVAL;
  3465. data = kzalloc(sizeof(*data), GFP_KERNEL);
  3466. if (!data)
  3467. return ERR_PTR(-ENOMEM);
  3468. match_event = strsep(&str, ")");
  3469. if (!match_event || !str) {
  3470. hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event));
  3471. goto free;
  3472. }
  3473. match_event_system = strsep(&match_event, ".");
  3474. if (!match_event) {
  3475. hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system));
  3476. goto free;
  3477. }
  3478. if (IS_ERR(event_file(tr, match_event_system, match_event))) {
  3479. hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event));
  3480. goto free;
  3481. }
  3482. data->match_data.event = kstrdup(match_event, GFP_KERNEL);
  3483. if (!data->match_data.event) {
  3484. ret = -ENOMEM;
  3485. goto free;
  3486. }
  3487. data->match_data.event_system = kstrdup(match_event_system, GFP_KERNEL);
  3488. if (!data->match_data.event_system) {
  3489. ret = -ENOMEM;
  3490. goto free;
  3491. }
  3492. ret = action_parse(tr, str, data, HANDLER_ONMATCH);
  3493. if (ret)
  3494. goto free;
  3495. out:
  3496. return data;
  3497. free:
  3498. onmatch_destroy(data);
  3499. data = ERR_PTR(ret);
  3500. goto out;
  3501. }
  3502. static int create_hitcount_val(struct hist_trigger_data *hist_data)
  3503. {
  3504. hist_data->fields[HITCOUNT_IDX] =
  3505. create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
  3506. if (!hist_data->fields[HITCOUNT_IDX])
  3507. return -ENOMEM;
  3508. hist_data->n_vals++;
  3509. hist_data->n_fields++;
  3510. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
  3511. return -EINVAL;
  3512. return 0;
  3513. }
  3514. static int __create_val_field(struct hist_trigger_data *hist_data,
  3515. unsigned int val_idx,
  3516. struct trace_event_file *file,
  3517. char *var_name, char *field_str,
  3518. unsigned long flags)
  3519. {
  3520. struct hist_field *hist_field;
  3521. int ret = 0, n_subexprs = 0;
  3522. hist_field = parse_expr(hist_data, file, field_str, flags, var_name, &n_subexprs);
  3523. if (IS_ERR(hist_field)) {
  3524. ret = PTR_ERR(hist_field);
  3525. goto out;
  3526. }
  3527. /* values and variables should not have some modifiers */
  3528. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  3529. /* Variable */
  3530. if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
  3531. HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
  3532. goto err;
  3533. } else {
  3534. /* Value */
  3535. if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
  3536. HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
  3537. HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
  3538. HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
  3539. goto err;
  3540. }
  3541. hist_data->fields[val_idx] = hist_field;
  3542. ++hist_data->n_vals;
  3543. ++hist_data->n_fields;
  3544. if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
  3545. ret = -EINVAL;
  3546. out:
  3547. return ret;
  3548. err:
  3549. hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
  3550. return -EINVAL;
  3551. }
  3552. static int create_val_field(struct hist_trigger_data *hist_data,
  3553. unsigned int val_idx,
  3554. struct trace_event_file *file,
  3555. char *field_str)
  3556. {
  3557. if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
  3558. return -EINVAL;
  3559. return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
  3560. }
  3561. static const char no_comm[] = "(no comm)";
  3562. static u64 hist_field_execname(struct hist_field *hist_field,
  3563. struct tracing_map_elt *elt,
  3564. struct trace_buffer *buffer,
  3565. struct ring_buffer_event *rbe,
  3566. void *event)
  3567. {
  3568. struct hist_elt_data *elt_data;
  3569. if (WARN_ON_ONCE(!elt))
  3570. return (u64)(unsigned long)no_comm;
  3571. elt_data = elt->private_data;
  3572. if (WARN_ON_ONCE(!elt_data->comm))
  3573. return (u64)(unsigned long)no_comm;
  3574. return (u64)(unsigned long)(elt_data->comm);
  3575. }
  3576. static u64 hist_field_stack(struct hist_field *hist_field,
  3577. struct tracing_map_elt *elt,
  3578. struct trace_buffer *buffer,
  3579. struct ring_buffer_event *rbe,
  3580. void *event)
  3581. {
  3582. u32 str_item = *(u32 *)(event + hist_field->field->offset);
  3583. int str_loc = str_item & 0xffff;
  3584. char *addr = (char *)(event + str_loc);
  3585. return (u64)(unsigned long)addr;
  3586. }
  3587. static u64 hist_fn_call(struct hist_field *hist_field,
  3588. struct tracing_map_elt *elt,
  3589. struct trace_buffer *buffer,
  3590. struct ring_buffer_event *rbe,
  3591. void *event)
  3592. {
  3593. switch (hist_field->fn_num) {
  3594. case HIST_FIELD_FN_VAR_REF:
  3595. return hist_field_var_ref(hist_field, elt, buffer, rbe, event);
  3596. case HIST_FIELD_FN_COUNTER:
  3597. return hist_field_counter(hist_field, elt, buffer, rbe, event);
  3598. case HIST_FIELD_FN_CONST:
  3599. return hist_field_const(hist_field, elt, buffer, rbe, event);
  3600. case HIST_FIELD_FN_LOG2:
  3601. return hist_field_log2(hist_field, elt, buffer, rbe, event);
  3602. case HIST_FIELD_FN_BUCKET:
  3603. return hist_field_bucket(hist_field, elt, buffer, rbe, event);
  3604. case HIST_FIELD_FN_TIMESTAMP:
  3605. return hist_field_timestamp(hist_field, elt, buffer, rbe, event);
  3606. case HIST_FIELD_FN_CPU:
  3607. return hist_field_cpu(hist_field, elt, buffer, rbe, event);
  3608. case HIST_FIELD_FN_STRING:
  3609. return hist_field_string(hist_field, elt, buffer, rbe, event);
  3610. case HIST_FIELD_FN_DYNSTRING:
  3611. return hist_field_dynstring(hist_field, elt, buffer, rbe, event);
  3612. case HIST_FIELD_FN_RELDYNSTRING:
  3613. return hist_field_reldynstring(hist_field, elt, buffer, rbe, event);
  3614. case HIST_FIELD_FN_PSTRING:
  3615. return hist_field_pstring(hist_field, elt, buffer, rbe, event);
  3616. case HIST_FIELD_FN_S64:
  3617. return hist_field_s64(hist_field, elt, buffer, rbe, event);
  3618. case HIST_FIELD_FN_U64:
  3619. return hist_field_u64(hist_field, elt, buffer, rbe, event);
  3620. case HIST_FIELD_FN_S32:
  3621. return hist_field_s32(hist_field, elt, buffer, rbe, event);
  3622. case HIST_FIELD_FN_U32:
  3623. return hist_field_u32(hist_field, elt, buffer, rbe, event);
  3624. case HIST_FIELD_FN_S16:
  3625. return hist_field_s16(hist_field, elt, buffer, rbe, event);
  3626. case HIST_FIELD_FN_U16:
  3627. return hist_field_u16(hist_field, elt, buffer, rbe, event);
  3628. case HIST_FIELD_FN_S8:
  3629. return hist_field_s8(hist_field, elt, buffer, rbe, event);
  3630. case HIST_FIELD_FN_U8:
  3631. return hist_field_u8(hist_field, elt, buffer, rbe, event);
  3632. case HIST_FIELD_FN_UMINUS:
  3633. return hist_field_unary_minus(hist_field, elt, buffer, rbe, event);
  3634. case HIST_FIELD_FN_MINUS:
  3635. return hist_field_minus(hist_field, elt, buffer, rbe, event);
  3636. case HIST_FIELD_FN_PLUS:
  3637. return hist_field_plus(hist_field, elt, buffer, rbe, event);
  3638. case HIST_FIELD_FN_DIV:
  3639. return hist_field_div(hist_field, elt, buffer, rbe, event);
  3640. case HIST_FIELD_FN_MULT:
  3641. return hist_field_mult(hist_field, elt, buffer, rbe, event);
  3642. case HIST_FIELD_FN_DIV_POWER2:
  3643. return div_by_power_of_two(hist_field, elt, buffer, rbe, event);
  3644. case HIST_FIELD_FN_DIV_NOT_POWER2:
  3645. return div_by_not_power_of_two(hist_field, elt, buffer, rbe, event);
  3646. case HIST_FIELD_FN_DIV_MULT_SHIFT:
  3647. return div_by_mult_and_shift(hist_field, elt, buffer, rbe, event);
  3648. case HIST_FIELD_FN_EXECNAME:
  3649. return hist_field_execname(hist_field, elt, buffer, rbe, event);
  3650. case HIST_FIELD_FN_STACK:
  3651. return hist_field_stack(hist_field, elt, buffer, rbe, event);
  3652. default:
  3653. return 0;
  3654. }
  3655. }
  3656. /* Convert a var that points to common_pid.execname to a string */
  3657. static void update_var_execname(struct hist_field *hist_field)
  3658. {
  3659. hist_field->flags = HIST_FIELD_FL_STRING | HIST_FIELD_FL_VAR |
  3660. HIST_FIELD_FL_EXECNAME;
  3661. hist_field->size = MAX_FILTER_STR_VAL;
  3662. hist_field->is_signed = 0;
  3663. kfree_const(hist_field->type);
  3664. hist_field->type = "char[]";
  3665. hist_field->fn_num = HIST_FIELD_FN_EXECNAME;
  3666. }
  3667. static int create_var_field(struct hist_trigger_data *hist_data,
  3668. unsigned int val_idx,
  3669. struct trace_event_file *file,
  3670. char *var_name, char *expr_str)
  3671. {
  3672. struct trace_array *tr = hist_data->event_file->tr;
  3673. unsigned long flags = 0;
  3674. int ret;
  3675. if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
  3676. return -EINVAL;
  3677. if (find_var(hist_data, file, var_name) && !hist_data->remove) {
  3678. hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name));
  3679. return -EINVAL;
  3680. }
  3681. flags |= HIST_FIELD_FL_VAR;
  3682. hist_data->n_vars++;
  3683. if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
  3684. return -EINVAL;
  3685. ret = __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
  3686. if (!ret && hist_data->fields[val_idx]->flags & HIST_FIELD_FL_EXECNAME)
  3687. update_var_execname(hist_data->fields[val_idx]);
  3688. if (!ret && hist_data->fields[val_idx]->flags &
  3689. (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE))
  3690. hist_data->fields[val_idx]->var_str_idx = hist_data->n_var_str++;
  3691. return ret;
  3692. }
  3693. static int create_val_fields(struct hist_trigger_data *hist_data,
  3694. struct trace_event_file *file)
  3695. {
  3696. unsigned int i, j = 1, n_hitcount = 0;
  3697. char *fields_str, *field_str;
  3698. int ret;
  3699. ret = create_hitcount_val(hist_data);
  3700. if (ret)
  3701. goto out;
  3702. fields_str = hist_data->attrs->vals_str;
  3703. if (!fields_str)
  3704. goto out;
  3705. for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
  3706. j < TRACING_MAP_VALS_MAX; i++) {
  3707. field_str = strsep(&fields_str, ",");
  3708. if (!field_str)
  3709. break;
  3710. if (strcmp(field_str, "hitcount") == 0) {
  3711. if (!n_hitcount++)
  3712. continue;
  3713. }
  3714. ret = create_val_field(hist_data, j++, file, field_str);
  3715. if (ret)
  3716. goto out;
  3717. }
  3718. if (fields_str && (strcmp(fields_str, "hitcount") != 0))
  3719. ret = -EINVAL;
  3720. out:
  3721. /* There is only raw hitcount but nohitcount suppresses it. */
  3722. if (j == 1 && hist_data->attrs->no_hitcount) {
  3723. hist_err(hist_data->event_file->tr, HIST_ERR_NEED_NOHC_VAL, 0);
  3724. ret = -ENOENT;
  3725. }
  3726. return ret;
  3727. }
  3728. static int create_key_field(struct hist_trigger_data *hist_data,
  3729. unsigned int key_idx,
  3730. unsigned int key_offset,
  3731. struct trace_event_file *file,
  3732. char *field_str)
  3733. {
  3734. struct trace_array *tr = hist_data->event_file->tr;
  3735. struct hist_field *hist_field = NULL;
  3736. unsigned long flags = 0;
  3737. unsigned int key_size;
  3738. int ret = 0, n_subexprs = 0;
  3739. if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
  3740. return -EINVAL;
  3741. flags |= HIST_FIELD_FL_KEY;
  3742. if (strcmp(field_str, "stacktrace") == 0) {
  3743. flags |= HIST_FIELD_FL_STACKTRACE;
  3744. key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
  3745. hist_field = create_hist_field(hist_data, NULL, flags, NULL);
  3746. } else {
  3747. hist_field = parse_expr(hist_data, file, field_str, flags,
  3748. NULL, &n_subexprs);
  3749. if (IS_ERR(hist_field)) {
  3750. ret = PTR_ERR(hist_field);
  3751. goto out;
  3752. }
  3753. if (field_has_hist_vars(hist_field, 0)) {
  3754. hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str));
  3755. destroy_hist_field(hist_field, 0);
  3756. ret = -EINVAL;
  3757. goto out;
  3758. }
  3759. key_size = hist_field->size;
  3760. }
  3761. hist_data->fields[key_idx] = hist_field;
  3762. key_size = ALIGN(key_size, sizeof(u64));
  3763. hist_data->fields[key_idx]->size = key_size;
  3764. hist_data->fields[key_idx]->offset = key_offset;
  3765. hist_data->key_size += key_size;
  3766. if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
  3767. ret = -EINVAL;
  3768. goto out;
  3769. }
  3770. hist_data->n_keys++;
  3771. hist_data->n_fields++;
  3772. if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
  3773. return -EINVAL;
  3774. ret = key_size;
  3775. out:
  3776. return ret;
  3777. }
  3778. static int create_key_fields(struct hist_trigger_data *hist_data,
  3779. struct trace_event_file *file)
  3780. {
  3781. unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
  3782. char *fields_str, *field_str;
  3783. int ret = -EINVAL;
  3784. fields_str = hist_data->attrs->keys_str;
  3785. if (!fields_str)
  3786. goto out;
  3787. for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
  3788. field_str = strsep(&fields_str, ",");
  3789. if (!field_str)
  3790. break;
  3791. ret = create_key_field(hist_data, i, key_offset,
  3792. file, field_str);
  3793. if (ret < 0)
  3794. goto out;
  3795. key_offset += ret;
  3796. }
  3797. if (fields_str) {
  3798. ret = -EINVAL;
  3799. goto out;
  3800. }
  3801. ret = 0;
  3802. out:
  3803. return ret;
  3804. }
  3805. static int create_var_fields(struct hist_trigger_data *hist_data,
  3806. struct trace_event_file *file)
  3807. {
  3808. unsigned int i, j = hist_data->n_vals;
  3809. int ret = 0;
  3810. unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
  3811. for (i = 0; i < n_vars; i++) {
  3812. char *var_name = hist_data->attrs->var_defs.name[i];
  3813. char *expr = hist_data->attrs->var_defs.expr[i];
  3814. ret = create_var_field(hist_data, j++, file, var_name, expr);
  3815. if (ret)
  3816. goto out;
  3817. }
  3818. out:
  3819. return ret;
  3820. }
  3821. static void free_var_defs(struct hist_trigger_data *hist_data)
  3822. {
  3823. unsigned int i;
  3824. for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
  3825. kfree(hist_data->attrs->var_defs.name[i]);
  3826. kfree(hist_data->attrs->var_defs.expr[i]);
  3827. }
  3828. hist_data->attrs->var_defs.n_vars = 0;
  3829. }
  3830. static int parse_var_defs(struct hist_trigger_data *hist_data)
  3831. {
  3832. struct trace_array *tr = hist_data->event_file->tr;
  3833. char *s, *str, *var_name, *field_str;
  3834. unsigned int i, j, n_vars = 0;
  3835. int ret = 0;
  3836. for (i = 0; i < hist_data->attrs->n_assignments; i++) {
  3837. str = hist_data->attrs->assignment_str[i];
  3838. for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
  3839. field_str = strsep(&str, ",");
  3840. if (!field_str)
  3841. break;
  3842. var_name = strsep(&field_str, "=");
  3843. if (!var_name || !field_str) {
  3844. hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT,
  3845. errpos(var_name));
  3846. ret = -EINVAL;
  3847. goto free;
  3848. }
  3849. if (n_vars == TRACING_MAP_VARS_MAX) {
  3850. hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name));
  3851. ret = -EINVAL;
  3852. goto free;
  3853. }
  3854. s = kstrdup(var_name, GFP_KERNEL);
  3855. if (!s) {
  3856. ret = -ENOMEM;
  3857. goto free;
  3858. }
  3859. hist_data->attrs->var_defs.name[n_vars] = s;
  3860. s = kstrdup(field_str, GFP_KERNEL);
  3861. if (!s) {
  3862. kfree(hist_data->attrs->var_defs.name[n_vars]);
  3863. hist_data->attrs->var_defs.name[n_vars] = NULL;
  3864. ret = -ENOMEM;
  3865. goto free;
  3866. }
  3867. hist_data->attrs->var_defs.expr[n_vars++] = s;
  3868. hist_data->attrs->var_defs.n_vars = n_vars;
  3869. }
  3870. }
  3871. return ret;
  3872. free:
  3873. free_var_defs(hist_data);
  3874. return ret;
  3875. }
  3876. static int create_hist_fields(struct hist_trigger_data *hist_data,
  3877. struct trace_event_file *file)
  3878. {
  3879. int ret;
  3880. ret = parse_var_defs(hist_data);
  3881. if (ret)
  3882. return ret;
  3883. ret = create_val_fields(hist_data, file);
  3884. if (ret)
  3885. goto out;
  3886. ret = create_var_fields(hist_data, file);
  3887. if (ret)
  3888. goto out;
  3889. ret = create_key_fields(hist_data, file);
  3890. out:
  3891. free_var_defs(hist_data);
  3892. return ret;
  3893. }
  3894. static int is_descending(struct trace_array *tr, const char *str)
  3895. {
  3896. if (!str)
  3897. return 0;
  3898. if (strcmp(str, "descending") == 0)
  3899. return 1;
  3900. if (strcmp(str, "ascending") == 0)
  3901. return 0;
  3902. hist_err(tr, HIST_ERR_INVALID_SORT_MODIFIER, errpos((char *)str));
  3903. return -EINVAL;
  3904. }
  3905. static int create_sort_keys(struct hist_trigger_data *hist_data)
  3906. {
  3907. struct trace_array *tr = hist_data->event_file->tr;
  3908. char *fields_str = hist_data->attrs->sort_key_str;
  3909. struct tracing_map_sort_key *sort_key;
  3910. int descending, ret = 0;
  3911. unsigned int i, j, k;
  3912. hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
  3913. if (!fields_str)
  3914. goto out;
  3915. for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
  3916. struct hist_field *hist_field;
  3917. char *field_str, *field_name;
  3918. const char *test_name;
  3919. sort_key = &hist_data->sort_keys[i];
  3920. field_str = strsep(&fields_str, ",");
  3921. if (!field_str)
  3922. break;
  3923. if (!*field_str) {
  3924. ret = -EINVAL;
  3925. hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
  3926. break;
  3927. }
  3928. if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
  3929. hist_err(tr, HIST_ERR_TOO_MANY_SORT_FIELDS, errpos("sort="));
  3930. ret = -EINVAL;
  3931. break;
  3932. }
  3933. field_name = strsep(&field_str, ".");
  3934. if (!field_name || !*field_name) {
  3935. ret = -EINVAL;
  3936. hist_err(tr, HIST_ERR_EMPTY_SORT_FIELD, errpos("sort="));
  3937. break;
  3938. }
  3939. if (strcmp(field_name, "hitcount") == 0) {
  3940. descending = is_descending(tr, field_str);
  3941. if (descending < 0) {
  3942. ret = descending;
  3943. break;
  3944. }
  3945. sort_key->descending = descending;
  3946. continue;
  3947. }
  3948. for (j = 1, k = 1; j < hist_data->n_fields; j++) {
  3949. unsigned int idx;
  3950. hist_field = hist_data->fields[j];
  3951. if (hist_field->flags & HIST_FIELD_FL_VAR)
  3952. continue;
  3953. idx = k++;
  3954. test_name = hist_field_name(hist_field, 0);
  3955. if (strcmp(field_name, test_name) == 0) {
  3956. sort_key->field_idx = idx;
  3957. descending = is_descending(tr, field_str);
  3958. if (descending < 0) {
  3959. ret = descending;
  3960. goto out;
  3961. }
  3962. sort_key->descending = descending;
  3963. break;
  3964. }
  3965. }
  3966. if (j == hist_data->n_fields) {
  3967. ret = -EINVAL;
  3968. hist_err(tr, HIST_ERR_INVALID_SORT_FIELD, errpos(field_name));
  3969. break;
  3970. }
  3971. }
  3972. hist_data->n_sort_keys = i;
  3973. out:
  3974. return ret;
  3975. }
  3976. static void destroy_actions(struct hist_trigger_data *hist_data)
  3977. {
  3978. unsigned int i;
  3979. for (i = 0; i < hist_data->n_actions; i++) {
  3980. struct action_data *data = hist_data->actions[i];
  3981. if (data->handler == HANDLER_ONMATCH)
  3982. onmatch_destroy(data);
  3983. else if (data->handler == HANDLER_ONMAX ||
  3984. data->handler == HANDLER_ONCHANGE)
  3985. track_data_destroy(hist_data, data);
  3986. else
  3987. kfree(data);
  3988. }
  3989. }
  3990. static int parse_actions(struct hist_trigger_data *hist_data)
  3991. {
  3992. struct trace_array *tr = hist_data->event_file->tr;
  3993. struct action_data *data;
  3994. unsigned int i;
  3995. int ret = 0;
  3996. char *str;
  3997. int len;
  3998. for (i = 0; i < hist_data->attrs->n_actions; i++) {
  3999. enum handler_id hid = 0;
  4000. char *action_str;
  4001. str = hist_data->attrs->action_str[i];
  4002. if ((len = str_has_prefix(str, "onmatch(")))
  4003. hid = HANDLER_ONMATCH;
  4004. else if ((len = str_has_prefix(str, "onmax(")))
  4005. hid = HANDLER_ONMAX;
  4006. else if ((len = str_has_prefix(str, "onchange(")))
  4007. hid = HANDLER_ONCHANGE;
  4008. action_str = str + len;
  4009. switch (hid) {
  4010. case HANDLER_ONMATCH:
  4011. data = onmatch_parse(tr, action_str);
  4012. break;
  4013. case HANDLER_ONMAX:
  4014. case HANDLER_ONCHANGE:
  4015. data = track_data_parse(hist_data, action_str, hid);
  4016. break;
  4017. default:
  4018. data = ERR_PTR(-EINVAL);
  4019. break;
  4020. }
  4021. if (IS_ERR(data)) {
  4022. ret = PTR_ERR(data);
  4023. break;
  4024. }
  4025. hist_data->actions[hist_data->n_actions++] = data;
  4026. }
  4027. return ret;
  4028. }
  4029. static int create_actions(struct hist_trigger_data *hist_data)
  4030. {
  4031. struct action_data *data;
  4032. unsigned int i;
  4033. int ret = 0;
  4034. for (i = 0; i < hist_data->attrs->n_actions; i++) {
  4035. data = hist_data->actions[i];
  4036. if (data->handler == HANDLER_ONMATCH) {
  4037. ret = onmatch_create(hist_data, data);
  4038. if (ret)
  4039. break;
  4040. } else if (data->handler == HANDLER_ONMAX ||
  4041. data->handler == HANDLER_ONCHANGE) {
  4042. ret = track_data_create(hist_data, data);
  4043. if (ret)
  4044. break;
  4045. } else {
  4046. ret = -EINVAL;
  4047. break;
  4048. }
  4049. }
  4050. return ret;
  4051. }
  4052. static void print_actions(struct seq_file *m,
  4053. struct hist_trigger_data *hist_data,
  4054. struct tracing_map_elt *elt)
  4055. {
  4056. unsigned int i;
  4057. for (i = 0; i < hist_data->n_actions; i++) {
  4058. struct action_data *data = hist_data->actions[i];
  4059. if (data->action == ACTION_SNAPSHOT)
  4060. continue;
  4061. if (data->handler == HANDLER_ONMAX ||
  4062. data->handler == HANDLER_ONCHANGE)
  4063. track_data_print(m, hist_data, elt, data);
  4064. }
  4065. }
  4066. static void print_action_spec(struct seq_file *m,
  4067. struct hist_trigger_data *hist_data,
  4068. struct action_data *data)
  4069. {
  4070. unsigned int i;
  4071. if (data->action == ACTION_SAVE) {
  4072. for (i = 0; i < hist_data->n_save_vars; i++) {
  4073. seq_printf(m, "%s", hist_data->save_vars[i]->var->var.name);
  4074. if (i < hist_data->n_save_vars - 1)
  4075. seq_puts(m, ",");
  4076. }
  4077. } else if (data->action == ACTION_TRACE) {
  4078. if (data->use_trace_keyword)
  4079. seq_printf(m, "%s", data->synth_event_name);
  4080. for (i = 0; i < data->n_params; i++) {
  4081. if (i || data->use_trace_keyword)
  4082. seq_puts(m, ",");
  4083. seq_printf(m, "%s", data->params[i]);
  4084. }
  4085. }
  4086. }
  4087. static void print_track_data_spec(struct seq_file *m,
  4088. struct hist_trigger_data *hist_data,
  4089. struct action_data *data)
  4090. {
  4091. if (data->handler == HANDLER_ONMAX)
  4092. seq_puts(m, ":onmax(");
  4093. else if (data->handler == HANDLER_ONCHANGE)
  4094. seq_puts(m, ":onchange(");
  4095. seq_printf(m, "%s", data->track_data.var_str);
  4096. seq_printf(m, ").%s(", data->action_name);
  4097. print_action_spec(m, hist_data, data);
  4098. seq_puts(m, ")");
  4099. }
  4100. static void print_onmatch_spec(struct seq_file *m,
  4101. struct hist_trigger_data *hist_data,
  4102. struct action_data *data)
  4103. {
  4104. seq_printf(m, ":onmatch(%s.%s).", data->match_data.event_system,
  4105. data->match_data.event);
  4106. seq_printf(m, "%s(", data->action_name);
  4107. print_action_spec(m, hist_data, data);
  4108. seq_puts(m, ")");
  4109. }
  4110. static bool actions_match(struct hist_trigger_data *hist_data,
  4111. struct hist_trigger_data *hist_data_test)
  4112. {
  4113. unsigned int i, j;
  4114. if (hist_data->n_actions != hist_data_test->n_actions)
  4115. return false;
  4116. for (i = 0; i < hist_data->n_actions; i++) {
  4117. struct action_data *data = hist_data->actions[i];
  4118. struct action_data *data_test = hist_data_test->actions[i];
  4119. char *action_name, *action_name_test;
  4120. if (data->handler != data_test->handler)
  4121. return false;
  4122. if (data->action != data_test->action)
  4123. return false;
  4124. if (data->n_params != data_test->n_params)
  4125. return false;
  4126. for (j = 0; j < data->n_params; j++) {
  4127. if (strcmp(data->params[j], data_test->params[j]) != 0)
  4128. return false;
  4129. }
  4130. if (data->use_trace_keyword)
  4131. action_name = data->synth_event_name;
  4132. else
  4133. action_name = data->action_name;
  4134. if (data_test->use_trace_keyword)
  4135. action_name_test = data_test->synth_event_name;
  4136. else
  4137. action_name_test = data_test->action_name;
  4138. if (strcmp(action_name, action_name_test) != 0)
  4139. return false;
  4140. if (data->handler == HANDLER_ONMATCH) {
  4141. if (strcmp(data->match_data.event_system,
  4142. data_test->match_data.event_system) != 0)
  4143. return false;
  4144. if (strcmp(data->match_data.event,
  4145. data_test->match_data.event) != 0)
  4146. return false;
  4147. } else if (data->handler == HANDLER_ONMAX ||
  4148. data->handler == HANDLER_ONCHANGE) {
  4149. if (strcmp(data->track_data.var_str,
  4150. data_test->track_data.var_str) != 0)
  4151. return false;
  4152. }
  4153. }
  4154. return true;
  4155. }
  4156. static void print_actions_spec(struct seq_file *m,
  4157. struct hist_trigger_data *hist_data)
  4158. {
  4159. unsigned int i;
  4160. for (i = 0; i < hist_data->n_actions; i++) {
  4161. struct action_data *data = hist_data->actions[i];
  4162. if (data->handler == HANDLER_ONMATCH)
  4163. print_onmatch_spec(m, hist_data, data);
  4164. else if (data->handler == HANDLER_ONMAX ||
  4165. data->handler == HANDLER_ONCHANGE)
  4166. print_track_data_spec(m, hist_data, data);
  4167. }
  4168. }
  4169. static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
  4170. {
  4171. unsigned int i;
  4172. for (i = 0; i < hist_data->n_field_var_hists; i++) {
  4173. kfree(hist_data->field_var_hists[i]->cmd);
  4174. kfree(hist_data->field_var_hists[i]);
  4175. }
  4176. }
  4177. static void destroy_hist_data(struct hist_trigger_data *hist_data)
  4178. {
  4179. if (!hist_data)
  4180. return;
  4181. destroy_hist_trigger_attrs(hist_data->attrs);
  4182. destroy_hist_fields(hist_data);
  4183. tracing_map_destroy(hist_data->map);
  4184. destroy_actions(hist_data);
  4185. destroy_field_vars(hist_data);
  4186. destroy_field_var_hists(hist_data);
  4187. kfree(hist_data);
  4188. }
  4189. static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
  4190. {
  4191. struct tracing_map *map = hist_data->map;
  4192. struct ftrace_event_field *field;
  4193. struct hist_field *hist_field;
  4194. int i, idx = 0;
  4195. for_each_hist_field(i, hist_data) {
  4196. hist_field = hist_data->fields[i];
  4197. if (hist_field->flags & HIST_FIELD_FL_KEY) {
  4198. tracing_map_cmp_fn_t cmp_fn;
  4199. field = hist_field->field;
  4200. if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
  4201. cmp_fn = tracing_map_cmp_none;
  4202. else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
  4203. cmp_fn = tracing_map_cmp_num(hist_field->size,
  4204. hist_field->is_signed);
  4205. else if (is_string_field(field))
  4206. cmp_fn = tracing_map_cmp_string;
  4207. else
  4208. cmp_fn = tracing_map_cmp_num(field->size,
  4209. field->is_signed);
  4210. idx = tracing_map_add_key_field(map,
  4211. hist_field->offset,
  4212. cmp_fn);
  4213. } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
  4214. idx = tracing_map_add_sum_field(map);
  4215. if (idx < 0)
  4216. return idx;
  4217. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  4218. idx = tracing_map_add_var(map);
  4219. if (idx < 0)
  4220. return idx;
  4221. hist_field->var.idx = idx;
  4222. hist_field->var.hist_data = hist_data;
  4223. }
  4224. }
  4225. return 0;
  4226. }
  4227. static struct hist_trigger_data *
  4228. create_hist_data(unsigned int map_bits,
  4229. struct hist_trigger_attrs *attrs,
  4230. struct trace_event_file *file,
  4231. bool remove)
  4232. {
  4233. const struct tracing_map_ops *map_ops = NULL;
  4234. struct hist_trigger_data *hist_data;
  4235. int ret = 0;
  4236. hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
  4237. if (!hist_data)
  4238. return ERR_PTR(-ENOMEM);
  4239. hist_data->attrs = attrs;
  4240. hist_data->remove = remove;
  4241. hist_data->event_file = file;
  4242. ret = parse_actions(hist_data);
  4243. if (ret)
  4244. goto free;
  4245. ret = create_hist_fields(hist_data, file);
  4246. if (ret)
  4247. goto free;
  4248. ret = create_sort_keys(hist_data);
  4249. if (ret)
  4250. goto free;
  4251. map_ops = &hist_trigger_elt_data_ops;
  4252. hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
  4253. map_ops, hist_data);
  4254. if (IS_ERR(hist_data->map)) {
  4255. ret = PTR_ERR(hist_data->map);
  4256. hist_data->map = NULL;
  4257. goto free;
  4258. }
  4259. ret = create_tracing_map_fields(hist_data);
  4260. if (ret)
  4261. goto free;
  4262. out:
  4263. return hist_data;
  4264. free:
  4265. hist_data->attrs = NULL;
  4266. destroy_hist_data(hist_data);
  4267. hist_data = ERR_PTR(ret);
  4268. goto out;
  4269. }
  4270. static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
  4271. struct tracing_map_elt *elt,
  4272. struct trace_buffer *buffer, void *rec,
  4273. struct ring_buffer_event *rbe,
  4274. u64 *var_ref_vals)
  4275. {
  4276. struct hist_elt_data *elt_data;
  4277. struct hist_field *hist_field;
  4278. unsigned int i, var_idx;
  4279. u64 hist_val;
  4280. elt_data = elt->private_data;
  4281. elt_data->var_ref_vals = var_ref_vals;
  4282. for_each_hist_val_field(i, hist_data) {
  4283. hist_field = hist_data->fields[i];
  4284. hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec);
  4285. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  4286. var_idx = hist_field->var.idx;
  4287. if (hist_field->flags &
  4288. (HIST_FIELD_FL_STRING | HIST_FIELD_FL_STACKTRACE)) {
  4289. unsigned int str_start, var_str_idx, idx;
  4290. char *str, *val_str;
  4291. unsigned int size;
  4292. str_start = hist_data->n_field_var_str +
  4293. hist_data->n_save_var_str;
  4294. var_str_idx = hist_field->var_str_idx;
  4295. idx = str_start + var_str_idx;
  4296. str = elt_data->field_var_str[idx];
  4297. val_str = (char *)(uintptr_t)hist_val;
  4298. if (hist_field->flags & HIST_FIELD_FL_STRING) {
  4299. size = min(hist_field->size, STR_VAR_LEN_MAX);
  4300. strscpy(str, val_str, size);
  4301. } else {
  4302. char *stack_start = str + sizeof(unsigned long);
  4303. int e;
  4304. e = stack_trace_save((void *)stack_start,
  4305. HIST_STACKTRACE_DEPTH,
  4306. HIST_STACKTRACE_SKIP);
  4307. if (e < HIST_STACKTRACE_DEPTH - 1)
  4308. ((unsigned long *)stack_start)[e] = 0;
  4309. *((unsigned long *)str) = e;
  4310. }
  4311. hist_val = (u64)(uintptr_t)str;
  4312. }
  4313. tracing_map_set_var(elt, var_idx, hist_val);
  4314. continue;
  4315. }
  4316. tracing_map_update_sum(elt, i, hist_val);
  4317. }
  4318. for_each_hist_key_field(i, hist_data) {
  4319. hist_field = hist_data->fields[i];
  4320. if (hist_field->flags & HIST_FIELD_FL_VAR) {
  4321. hist_val = hist_fn_call(hist_field, elt, buffer, rbe, rec);
  4322. var_idx = hist_field->var.idx;
  4323. tracing_map_set_var(elt, var_idx, hist_val);
  4324. }
  4325. }
  4326. update_field_vars(hist_data, elt, buffer, rbe, rec);
  4327. }
  4328. static inline void add_to_key(char *compound_key, void *key,
  4329. struct hist_field *key_field, void *rec)
  4330. {
  4331. size_t size = key_field->size;
  4332. if (key_field->flags & HIST_FIELD_FL_STRING) {
  4333. struct ftrace_event_field *field;
  4334. field = key_field->field;
  4335. if (field->filter_type == FILTER_DYN_STRING ||
  4336. field->filter_type == FILTER_RDYN_STRING)
  4337. size = *(u32 *)(rec + field->offset) >> 16;
  4338. else if (field->filter_type == FILTER_STATIC_STRING)
  4339. size = field->size;
  4340. /* ensure NULL-termination */
  4341. if (size > key_field->size - 1)
  4342. size = key_field->size - 1;
  4343. strncpy(compound_key + key_field->offset, (char *)key, size);
  4344. } else
  4345. memcpy(compound_key + key_field->offset, key, size);
  4346. }
  4347. static void
  4348. hist_trigger_actions(struct hist_trigger_data *hist_data,
  4349. struct tracing_map_elt *elt,
  4350. struct trace_buffer *buffer, void *rec,
  4351. struct ring_buffer_event *rbe, void *key,
  4352. u64 *var_ref_vals)
  4353. {
  4354. struct action_data *data;
  4355. unsigned int i;
  4356. for (i = 0; i < hist_data->n_actions; i++) {
  4357. data = hist_data->actions[i];
  4358. data->fn(hist_data, elt, buffer, rec, rbe, key, data, var_ref_vals);
  4359. }
  4360. }
  4361. /*
  4362. * The hist_pad structure is used to save information to create
  4363. * a histogram from the histogram trigger. It's too big to store
  4364. * on the stack, so when the histogram trigger is initialized
  4365. * a percpu array of 4 hist_pad structures is allocated.
  4366. * This will cover every context from normal, softirq, irq and NMI
  4367. * in the very unlikely event that a tigger happens at each of
  4368. * these contexts and interrupts a currently active trigger.
  4369. */
  4370. struct hist_pad {
  4371. unsigned long entries[HIST_STACKTRACE_DEPTH];
  4372. u64 var_ref_vals[TRACING_MAP_VARS_MAX];
  4373. char compound_key[HIST_KEY_SIZE_MAX];
  4374. };
  4375. static struct hist_pad __percpu *hist_pads;
  4376. static DEFINE_PER_CPU(int, hist_pad_cnt);
  4377. static refcount_t hist_pad_ref;
  4378. /* One hist_pad for every context (normal, softirq, irq, NMI) */
  4379. #define MAX_HIST_CNT 4
  4380. static int alloc_hist_pad(void)
  4381. {
  4382. lockdep_assert_held(&event_mutex);
  4383. if (refcount_read(&hist_pad_ref)) {
  4384. refcount_inc(&hist_pad_ref);
  4385. return 0;
  4386. }
  4387. hist_pads = __alloc_percpu(sizeof(struct hist_pad) * MAX_HIST_CNT,
  4388. __alignof__(struct hist_pad));
  4389. if (!hist_pads)
  4390. return -ENOMEM;
  4391. refcount_set(&hist_pad_ref, 1);
  4392. return 0;
  4393. }
  4394. static void free_hist_pad(void)
  4395. {
  4396. lockdep_assert_held(&event_mutex);
  4397. if (!refcount_dec_and_test(&hist_pad_ref))
  4398. return;
  4399. free_percpu(hist_pads);
  4400. hist_pads = NULL;
  4401. }
  4402. static struct hist_pad *get_hist_pad(void)
  4403. {
  4404. struct hist_pad *hist_pad;
  4405. int cnt;
  4406. if (WARN_ON_ONCE(!hist_pads))
  4407. return NULL;
  4408. preempt_disable();
  4409. hist_pad = per_cpu_ptr(hist_pads, smp_processor_id());
  4410. if (this_cpu_read(hist_pad_cnt) == MAX_HIST_CNT) {
  4411. preempt_enable();
  4412. return NULL;
  4413. }
  4414. cnt = this_cpu_inc_return(hist_pad_cnt) - 1;
  4415. return &hist_pad[cnt];
  4416. }
  4417. static void put_hist_pad(void)
  4418. {
  4419. this_cpu_dec(hist_pad_cnt);
  4420. preempt_enable();
  4421. }
  4422. static void event_hist_trigger(struct event_trigger_data *data,
  4423. struct trace_buffer *buffer, void *rec,
  4424. struct ring_buffer_event *rbe)
  4425. {
  4426. struct hist_trigger_data *hist_data = data->private_data;
  4427. bool use_compound_key = (hist_data->n_keys > 1);
  4428. struct tracing_map_elt *elt = NULL;
  4429. struct hist_field *key_field;
  4430. struct hist_pad *hist_pad;
  4431. u64 field_contents;
  4432. void *key = NULL;
  4433. unsigned int i;
  4434. if (unlikely(!rbe))
  4435. return;
  4436. hist_pad = get_hist_pad();
  4437. if (!hist_pad)
  4438. return;
  4439. memset(hist_pad->compound_key, 0, hist_data->key_size);
  4440. for_each_hist_key_field(i, hist_data) {
  4441. key_field = hist_data->fields[i];
  4442. if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
  4443. unsigned long *entries = hist_pad->entries;
  4444. memset(entries, 0, HIST_STACKTRACE_SIZE);
  4445. if (key_field->field) {
  4446. unsigned long *stack, n_entries;
  4447. field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec);
  4448. stack = (unsigned long *)(long)field_contents;
  4449. n_entries = *stack;
  4450. memcpy(entries, ++stack, n_entries * sizeof(unsigned long));
  4451. } else {
  4452. stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
  4453. HIST_STACKTRACE_SKIP);
  4454. }
  4455. key = entries;
  4456. } else {
  4457. field_contents = hist_fn_call(key_field, elt, buffer, rbe, rec);
  4458. if (key_field->flags & HIST_FIELD_FL_STRING) {
  4459. key = (void *)(unsigned long)field_contents;
  4460. use_compound_key = true;
  4461. } else
  4462. key = (void *)&field_contents;
  4463. }
  4464. if (use_compound_key)
  4465. add_to_key(hist_pad->compound_key, key, key_field, rec);
  4466. }
  4467. if (use_compound_key)
  4468. key = hist_pad->compound_key;
  4469. if (hist_data->n_var_refs &&
  4470. !resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, false))
  4471. goto out;
  4472. elt = tracing_map_insert(hist_data->map, key);
  4473. if (!elt)
  4474. goto out;
  4475. hist_trigger_elt_update(hist_data, elt, buffer, rec, rbe, hist_pad->var_ref_vals);
  4476. if (resolve_var_refs(hist_data, key, hist_pad->var_ref_vals, true)) {
  4477. hist_trigger_actions(hist_data, elt, buffer, rec, rbe,
  4478. key, hist_pad->var_ref_vals);
  4479. }
  4480. hist_poll_wakeup();
  4481. out:
  4482. put_hist_pad();
  4483. }
  4484. static void hist_trigger_stacktrace_print(struct seq_file *m,
  4485. unsigned long *stacktrace_entries,
  4486. unsigned int max_entries)
  4487. {
  4488. unsigned int spaces = 8;
  4489. unsigned int i;
  4490. for (i = 0; i < max_entries; i++) {
  4491. if (!stacktrace_entries[i])
  4492. return;
  4493. seq_printf(m, "%*c", 1 + spaces, ' ');
  4494. seq_printf(m, "%pS\n", (void*)stacktrace_entries[i]);
  4495. }
  4496. }
  4497. static void hist_trigger_print_key(struct seq_file *m,
  4498. struct hist_trigger_data *hist_data,
  4499. void *key,
  4500. struct tracing_map_elt *elt)
  4501. {
  4502. struct hist_field *key_field;
  4503. bool multiline = false;
  4504. const char *field_name;
  4505. unsigned int i;
  4506. u64 uval;
  4507. seq_puts(m, "{ ");
  4508. for_each_hist_key_field(i, hist_data) {
  4509. key_field = hist_data->fields[i];
  4510. if (i > hist_data->n_vals)
  4511. seq_puts(m, ", ");
  4512. field_name = hist_field_name(key_field, 0);
  4513. if (key_field->flags & HIST_FIELD_FL_HEX) {
  4514. uval = *(u64 *)(key + key_field->offset);
  4515. seq_printf(m, "%s: %llx", field_name, uval);
  4516. } else if (key_field->flags & HIST_FIELD_FL_SYM) {
  4517. uval = *(u64 *)(key + key_field->offset);
  4518. seq_printf(m, "%s: [%llx] %-45ps", field_name,
  4519. uval, (void *)(uintptr_t)uval);
  4520. } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
  4521. uval = *(u64 *)(key + key_field->offset);
  4522. seq_printf(m, "%s: [%llx] %-55pS", field_name,
  4523. uval, (void *)(uintptr_t)uval);
  4524. } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
  4525. struct hist_elt_data *elt_data = elt->private_data;
  4526. char *comm;
  4527. if (WARN_ON_ONCE(!elt_data))
  4528. return;
  4529. comm = elt_data->comm;
  4530. uval = *(u64 *)(key + key_field->offset);
  4531. seq_printf(m, "%s: %-16s[%10llu]", field_name,
  4532. comm, uval);
  4533. } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
  4534. const char *syscall_name;
  4535. uval = *(u64 *)(key + key_field->offset);
  4536. syscall_name = get_syscall_name(uval);
  4537. if (!syscall_name)
  4538. syscall_name = "unknown_syscall";
  4539. seq_printf(m, "%s: %-30s[%3llu]", field_name,
  4540. syscall_name, uval);
  4541. } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
  4542. if (key_field->field)
  4543. seq_printf(m, "%s.stacktrace", key_field->field->name);
  4544. else
  4545. seq_puts(m, "common_stacktrace:\n");
  4546. hist_trigger_stacktrace_print(m,
  4547. key + key_field->offset,
  4548. HIST_STACKTRACE_DEPTH);
  4549. multiline = true;
  4550. } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
  4551. seq_printf(m, "%s: ~ 2^%-2llu", field_name,
  4552. *(u64 *)(key + key_field->offset));
  4553. } else if (key_field->flags & HIST_FIELD_FL_BUCKET) {
  4554. unsigned long buckets = key_field->buckets;
  4555. uval = *(u64 *)(key + key_field->offset);
  4556. seq_printf(m, "%s: ~ %llu-%llu", field_name,
  4557. uval, uval + buckets -1);
  4558. } else if (key_field->flags & HIST_FIELD_FL_STRING) {
  4559. seq_printf(m, "%s: %-50s", field_name,
  4560. (char *)(key + key_field->offset));
  4561. } else {
  4562. uval = *(u64 *)(key + key_field->offset);
  4563. seq_printf(m, "%s: %10llu", field_name, uval);
  4564. }
  4565. }
  4566. if (!multiline)
  4567. seq_puts(m, " ");
  4568. seq_puts(m, "}");
  4569. }
  4570. /* Get the 100 times of the percentage of @val in @total */
  4571. static inline unsigned int __get_percentage(u64 val, u64 total)
  4572. {
  4573. if (!total)
  4574. goto div0;
  4575. if (val < (U64_MAX / 10000))
  4576. return (unsigned int)div64_ul(val * 10000, total);
  4577. total = div64_u64(total, 10000);
  4578. if (!total)
  4579. goto div0;
  4580. return (unsigned int)div64_ul(val, total);
  4581. div0:
  4582. return val ? UINT_MAX : 0;
  4583. }
  4584. #define BAR_CHAR '#'
  4585. static inline const char *__fill_bar_str(char *buf, int size, u64 val, u64 max)
  4586. {
  4587. unsigned int len = __get_percentage(val, max);
  4588. int i;
  4589. if (len == UINT_MAX) {
  4590. snprintf(buf, size, "[ERROR]");
  4591. return buf;
  4592. }
  4593. len = len * size / 10000;
  4594. for (i = 0; i < len && i < size; i++)
  4595. buf[i] = BAR_CHAR;
  4596. while (i < size)
  4597. buf[i++] = ' ';
  4598. buf[size] = '\0';
  4599. return buf;
  4600. }
  4601. struct hist_val_stat {
  4602. u64 max;
  4603. u64 total;
  4604. };
  4605. static void hist_trigger_print_val(struct seq_file *m, unsigned int idx,
  4606. const char *field_name, unsigned long flags,
  4607. struct hist_val_stat *stats,
  4608. struct tracing_map_elt *elt)
  4609. {
  4610. u64 val = tracing_map_read_sum(elt, idx);
  4611. unsigned int pc;
  4612. char bar[21];
  4613. if (flags & HIST_FIELD_FL_PERCENT) {
  4614. pc = __get_percentage(val, stats[idx].total);
  4615. if (pc == UINT_MAX)
  4616. seq_printf(m, " %s (%%):[ERROR]", field_name);
  4617. else
  4618. seq_printf(m, " %s (%%): %3u.%02u", field_name,
  4619. pc / 100, pc % 100);
  4620. } else if (flags & HIST_FIELD_FL_GRAPH) {
  4621. seq_printf(m, " %s: %20s", field_name,
  4622. __fill_bar_str(bar, 20, val, stats[idx].max));
  4623. } else if (flags & HIST_FIELD_FL_HEX) {
  4624. seq_printf(m, " %s: %10llx", field_name, val);
  4625. } else {
  4626. seq_printf(m, " %s: %10llu", field_name, val);
  4627. }
  4628. }
  4629. static void hist_trigger_entry_print(struct seq_file *m,
  4630. struct hist_trigger_data *hist_data,
  4631. struct hist_val_stat *stats,
  4632. void *key,
  4633. struct tracing_map_elt *elt)
  4634. {
  4635. const char *field_name;
  4636. unsigned int i = HITCOUNT_IDX;
  4637. unsigned long flags;
  4638. hist_trigger_print_key(m, hist_data, key, elt);
  4639. /* At first, show the raw hitcount if !nohitcount */
  4640. if (!hist_data->attrs->no_hitcount)
  4641. hist_trigger_print_val(m, i, "hitcount", 0, stats, elt);
  4642. for (i = 1; i < hist_data->n_vals; i++) {
  4643. field_name = hist_field_name(hist_data->fields[i], 0);
  4644. flags = hist_data->fields[i]->flags;
  4645. if (flags & HIST_FIELD_FL_VAR || flags & HIST_FIELD_FL_EXPR)
  4646. continue;
  4647. seq_puts(m, " ");
  4648. hist_trigger_print_val(m, i, field_name, flags, stats, elt);
  4649. }
  4650. print_actions(m, hist_data, elt);
  4651. seq_puts(m, "\n");
  4652. }
  4653. static int print_entries(struct seq_file *m,
  4654. struct hist_trigger_data *hist_data)
  4655. {
  4656. struct tracing_map_sort_entry **sort_entries = NULL;
  4657. struct tracing_map *map = hist_data->map;
  4658. int i, j, n_entries;
  4659. struct hist_val_stat *stats = NULL;
  4660. u64 val;
  4661. n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
  4662. hist_data->n_sort_keys,
  4663. &sort_entries);
  4664. if (n_entries < 0)
  4665. return n_entries;
  4666. /* Calculate the max and the total for each field if needed. */
  4667. for (j = 0; j < hist_data->n_vals; j++) {
  4668. if (!(hist_data->fields[j]->flags &
  4669. (HIST_FIELD_FL_PERCENT | HIST_FIELD_FL_GRAPH)))
  4670. continue;
  4671. if (!stats) {
  4672. stats = kcalloc(hist_data->n_vals, sizeof(*stats),
  4673. GFP_KERNEL);
  4674. if (!stats) {
  4675. n_entries = -ENOMEM;
  4676. goto out;
  4677. }
  4678. }
  4679. for (i = 0; i < n_entries; i++) {
  4680. val = tracing_map_read_sum(sort_entries[i]->elt, j);
  4681. stats[j].total += val;
  4682. if (stats[j].max < val)
  4683. stats[j].max = val;
  4684. }
  4685. }
  4686. for (i = 0; i < n_entries; i++)
  4687. hist_trigger_entry_print(m, hist_data, stats,
  4688. sort_entries[i]->key,
  4689. sort_entries[i]->elt);
  4690. kfree(stats);
  4691. out:
  4692. tracing_map_destroy_sort_entries(sort_entries, n_entries);
  4693. return n_entries;
  4694. }
  4695. static void hist_trigger_show(struct seq_file *m,
  4696. struct event_trigger_data *data, int n)
  4697. {
  4698. struct hist_trigger_data *hist_data;
  4699. int n_entries;
  4700. if (n > 0)
  4701. seq_puts(m, "\n\n");
  4702. seq_puts(m, "# event histogram\n#\n# trigger info: ");
  4703. data->ops->print(m, data);
  4704. seq_puts(m, "#\n\n");
  4705. hist_data = data->private_data;
  4706. n_entries = print_entries(m, hist_data);
  4707. if (n_entries < 0)
  4708. n_entries = 0;
  4709. track_data_snapshot_print(m, hist_data);
  4710. seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
  4711. (u64)atomic64_read(&hist_data->map->hits),
  4712. n_entries, (u64)atomic64_read(&hist_data->map->drops));
  4713. }
  4714. struct hist_file_data {
  4715. struct file *file;
  4716. u64 last_read;
  4717. u64 last_act;
  4718. };
  4719. static u64 get_hist_hit_count(struct trace_event_file *event_file)
  4720. {
  4721. struct hist_trigger_data *hist_data;
  4722. struct event_trigger_data *data;
  4723. u64 ret = 0;
  4724. list_for_each_entry(data, &event_file->triggers, list) {
  4725. if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  4726. hist_data = data->private_data;
  4727. ret += atomic64_read(&hist_data->map->hits);
  4728. }
  4729. }
  4730. return ret;
  4731. }
  4732. static int hist_show(struct seq_file *m, void *v)
  4733. {
  4734. struct hist_file_data *hist_file = m->private;
  4735. struct event_trigger_data *data;
  4736. struct trace_event_file *event_file;
  4737. int n = 0;
  4738. guard(mutex)(&event_mutex);
  4739. event_file = event_file_file(hist_file->file);
  4740. if (unlikely(!event_file))
  4741. return -ENODEV;
  4742. list_for_each_entry(data, &event_file->triggers, list) {
  4743. if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
  4744. hist_trigger_show(m, data, n++);
  4745. }
  4746. hist_file->last_read = get_hist_hit_count(event_file);
  4747. /*
  4748. * Update last_act too so that poll()/POLLPRI can wait for the next
  4749. * event after any syscall on hist file.
  4750. */
  4751. hist_file->last_act = hist_file->last_read;
  4752. return 0;
  4753. }
  4754. static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait)
  4755. {
  4756. struct trace_event_file *event_file;
  4757. struct seq_file *m = file->private_data;
  4758. struct hist_file_data *hist_file = m->private;
  4759. __poll_t ret = 0;
  4760. u64 cnt;
  4761. guard(mutex)(&event_mutex);
  4762. event_file = event_file_data(file);
  4763. if (!event_file)
  4764. return EPOLLERR;
  4765. hist_poll_wait(file, wait);
  4766. cnt = get_hist_hit_count(event_file);
  4767. if (hist_file->last_read != cnt)
  4768. ret |= EPOLLIN | EPOLLRDNORM;
  4769. if (hist_file->last_act != cnt) {
  4770. hist_file->last_act = cnt;
  4771. ret |= EPOLLPRI;
  4772. }
  4773. return ret;
  4774. }
  4775. static int event_hist_release(struct inode *inode, struct file *file)
  4776. {
  4777. struct seq_file *m = file->private_data;
  4778. struct hist_file_data *hist_file = m->private;
  4779. kfree(hist_file);
  4780. return tracing_single_release_file_tr(inode, file);
  4781. }
  4782. static int event_hist_open(struct inode *inode, struct file *file)
  4783. {
  4784. struct trace_event_file *event_file;
  4785. struct hist_file_data *hist_file;
  4786. int ret;
  4787. ret = tracing_open_file_tr(inode, file);
  4788. if (ret)
  4789. return ret;
  4790. guard(mutex)(&event_mutex);
  4791. event_file = event_file_data(file);
  4792. if (!event_file) {
  4793. ret = -ENODEV;
  4794. goto err;
  4795. }
  4796. hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
  4797. if (!hist_file) {
  4798. ret = -ENOMEM;
  4799. goto err;
  4800. }
  4801. hist_file->file = file;
  4802. hist_file->last_act = get_hist_hit_count(event_file);
  4803. /* Clear private_data to avoid warning in single_open() */
  4804. file->private_data = NULL;
  4805. ret = single_open(file, hist_show, hist_file);
  4806. if (ret) {
  4807. kfree(hist_file);
  4808. goto err;
  4809. }
  4810. return 0;
  4811. err:
  4812. tracing_release_file_tr(inode, file);
  4813. return ret;
  4814. }
  4815. const struct file_operations event_hist_fops = {
  4816. .open = event_hist_open,
  4817. .read = seq_read,
  4818. .llseek = seq_lseek,
  4819. .release = event_hist_release,
  4820. .poll = event_hist_poll,
  4821. };
  4822. #ifdef CONFIG_HIST_TRIGGERS_DEBUG
  4823. static void hist_field_debug_show_flags(struct seq_file *m,
  4824. unsigned long flags)
  4825. {
  4826. seq_puts(m, " flags:\n");
  4827. if (flags & HIST_FIELD_FL_KEY)
  4828. seq_puts(m, " HIST_FIELD_FL_KEY\n");
  4829. else if (flags & HIST_FIELD_FL_HITCOUNT)
  4830. seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n");
  4831. else if (flags & HIST_FIELD_FL_VAR)
  4832. seq_puts(m, " HIST_FIELD_FL_VAR\n");
  4833. else if (flags & HIST_FIELD_FL_VAR_REF)
  4834. seq_puts(m, " HIST_FIELD_FL_VAR_REF\n");
  4835. else
  4836. seq_puts(m, " VAL: normal u64 value\n");
  4837. if (flags & HIST_FIELD_FL_ALIAS)
  4838. seq_puts(m, " HIST_FIELD_FL_ALIAS\n");
  4839. else if (flags & HIST_FIELD_FL_CONST)
  4840. seq_puts(m, " HIST_FIELD_FL_CONST\n");
  4841. }
  4842. static int hist_field_debug_show(struct seq_file *m,
  4843. struct hist_field *field, unsigned long flags)
  4844. {
  4845. if ((field->flags & flags) != flags) {
  4846. seq_printf(m, "ERROR: bad flags - %lx\n", flags);
  4847. return -EINVAL;
  4848. }
  4849. hist_field_debug_show_flags(m, field->flags);
  4850. if (field->field)
  4851. seq_printf(m, " ftrace_event_field name: %s\n",
  4852. field->field->name);
  4853. if (field->flags & HIST_FIELD_FL_VAR) {
  4854. seq_printf(m, " var.name: %s\n", field->var.name);
  4855. seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
  4856. field->var.idx);
  4857. }
  4858. if (field->flags & HIST_FIELD_FL_CONST)
  4859. seq_printf(m, " constant: %llu\n", field->constant);
  4860. if (field->flags & HIST_FIELD_FL_ALIAS)
  4861. seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
  4862. field->var_ref_idx);
  4863. if (field->flags & HIST_FIELD_FL_VAR_REF) {
  4864. seq_printf(m, " name: %s\n", field->name);
  4865. seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
  4866. field->var.idx);
  4867. seq_printf(m, " var.hist_data: %p\n", field->var.hist_data);
  4868. seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
  4869. field->var_ref_idx);
  4870. if (field->system)
  4871. seq_printf(m, " system: %s\n", field->system);
  4872. if (field->event_name)
  4873. seq_printf(m, " event_name: %s\n", field->event_name);
  4874. }
  4875. seq_printf(m, " type: %s\n", field->type);
  4876. seq_printf(m, " size: %u\n", field->size);
  4877. seq_printf(m, " is_signed: %u\n", field->is_signed);
  4878. return 0;
  4879. }
  4880. static int field_var_debug_show(struct seq_file *m,
  4881. struct field_var *field_var, unsigned int i,
  4882. bool save_vars)
  4883. {
  4884. const char *vars_name = save_vars ? "save_vars" : "field_vars";
  4885. struct hist_field *field;
  4886. int ret = 0;
  4887. seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i);
  4888. field = field_var->var;
  4889. seq_printf(m, "\n %s[%d].var:\n", vars_name, i);
  4890. hist_field_debug_show_flags(m, field->flags);
  4891. seq_printf(m, " var.name: %s\n", field->var.name);
  4892. seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
  4893. field->var.idx);
  4894. field = field_var->val;
  4895. seq_printf(m, "\n %s[%d].val:\n", vars_name, i);
  4896. if (field->field)
  4897. seq_printf(m, " ftrace_event_field name: %s\n",
  4898. field->field->name);
  4899. else {
  4900. ret = -EINVAL;
  4901. goto out;
  4902. }
  4903. seq_printf(m, " type: %s\n", field->type);
  4904. seq_printf(m, " size: %u\n", field->size);
  4905. seq_printf(m, " is_signed: %u\n", field->is_signed);
  4906. out:
  4907. return ret;
  4908. }
  4909. static int hist_action_debug_show(struct seq_file *m,
  4910. struct action_data *data, int i)
  4911. {
  4912. int ret = 0;
  4913. if (data->handler == HANDLER_ONMAX ||
  4914. data->handler == HANDLER_ONCHANGE) {
  4915. seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i);
  4916. ret = hist_field_debug_show(m, data->track_data.var_ref,
  4917. HIST_FIELD_FL_VAR_REF);
  4918. if (ret)
  4919. goto out;
  4920. seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i);
  4921. ret = hist_field_debug_show(m, data->track_data.track_var,
  4922. HIST_FIELD_FL_VAR);
  4923. if (ret)
  4924. goto out;
  4925. }
  4926. if (data->handler == HANDLER_ONMATCH) {
  4927. seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n",
  4928. i, data->match_data.event_system);
  4929. seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n",
  4930. i, data->match_data.event);
  4931. }
  4932. out:
  4933. return ret;
  4934. }
  4935. static int hist_actions_debug_show(struct seq_file *m,
  4936. struct hist_trigger_data *hist_data)
  4937. {
  4938. int i, ret = 0;
  4939. if (hist_data->n_actions)
  4940. seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
  4941. for (i = 0; i < hist_data->n_actions; i++) {
  4942. struct action_data *action = hist_data->actions[i];
  4943. ret = hist_action_debug_show(m, action, i);
  4944. if (ret)
  4945. goto out;
  4946. }
  4947. if (hist_data->n_save_vars)
  4948. seq_puts(m, "\n save action variables (save() params):\n");
  4949. for (i = 0; i < hist_data->n_save_vars; i++) {
  4950. ret = field_var_debug_show(m, hist_data->save_vars[i], i, true);
  4951. if (ret)
  4952. goto out;
  4953. }
  4954. out:
  4955. return ret;
  4956. }
  4957. static void hist_trigger_debug_show(struct seq_file *m,
  4958. struct event_trigger_data *data, int n)
  4959. {
  4960. struct hist_trigger_data *hist_data;
  4961. int i, ret;
  4962. if (n > 0)
  4963. seq_puts(m, "\n\n");
  4964. seq_puts(m, "# event histogram\n#\n# trigger info: ");
  4965. data->ops->print(m, data);
  4966. seq_puts(m, "#\n\n");
  4967. hist_data = data->private_data;
  4968. seq_printf(m, "hist_data: %p\n\n", hist_data);
  4969. seq_printf(m, " n_vals: %u\n", hist_data->n_vals);
  4970. seq_printf(m, " n_keys: %u\n", hist_data->n_keys);
  4971. seq_printf(m, " n_fields: %u\n", hist_data->n_fields);
  4972. seq_puts(m, "\n val fields:\n\n");
  4973. seq_puts(m, " hist_data->fields[0]:\n");
  4974. ret = hist_field_debug_show(m, hist_data->fields[0],
  4975. HIST_FIELD_FL_HITCOUNT);
  4976. if (ret)
  4977. return;
  4978. for (i = 1; i < hist_data->n_vals; i++) {
  4979. seq_printf(m, "\n hist_data->fields[%d]:\n", i);
  4980. ret = hist_field_debug_show(m, hist_data->fields[i], 0);
  4981. if (ret)
  4982. return;
  4983. }
  4984. seq_puts(m, "\n key fields:\n");
  4985. for (i = hist_data->n_vals; i < hist_data->n_fields; i++) {
  4986. seq_printf(m, "\n hist_data->fields[%d]:\n", i);
  4987. ret = hist_field_debug_show(m, hist_data->fields[i],
  4988. HIST_FIELD_FL_KEY);
  4989. if (ret)
  4990. return;
  4991. }
  4992. if (hist_data->n_var_refs)
  4993. seq_puts(m, "\n variable reference fields:\n");
  4994. for (i = 0; i < hist_data->n_var_refs; i++) {
  4995. seq_printf(m, "\n hist_data->var_refs[%d]:\n", i);
  4996. ret = hist_field_debug_show(m, hist_data->var_refs[i],
  4997. HIST_FIELD_FL_VAR_REF);
  4998. if (ret)
  4999. return;
  5000. }
  5001. if (hist_data->n_field_vars)
  5002. seq_puts(m, "\n field variables:\n");
  5003. for (i = 0; i < hist_data->n_field_vars; i++) {
  5004. ret = field_var_debug_show(m, hist_data->field_vars[i], i, false);
  5005. if (ret)
  5006. return;
  5007. }
  5008. ret = hist_actions_debug_show(m, hist_data);
  5009. if (ret)
  5010. return;
  5011. }
  5012. static int hist_debug_show(struct seq_file *m, void *v)
  5013. {
  5014. struct event_trigger_data *data;
  5015. struct trace_event_file *event_file;
  5016. int n = 0;
  5017. guard(mutex)(&event_mutex);
  5018. event_file = event_file_file(m->private);
  5019. if (unlikely(!event_file))
  5020. return -ENODEV;
  5021. list_for_each_entry(data, &event_file->triggers, list) {
  5022. if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
  5023. hist_trigger_debug_show(m, data, n++);
  5024. }
  5025. return 0;
  5026. }
  5027. static int event_hist_debug_open(struct inode *inode, struct file *file)
  5028. {
  5029. int ret;
  5030. ret = tracing_open_file_tr(inode, file);
  5031. if (ret)
  5032. return ret;
  5033. /* Clear private_data to avoid warning in single_open() */
  5034. file->private_data = NULL;
  5035. ret = single_open(file, hist_debug_show, file);
  5036. if (ret)
  5037. tracing_release_file_tr(inode, file);
  5038. return ret;
  5039. }
  5040. const struct file_operations event_hist_debug_fops = {
  5041. .open = event_hist_debug_open,
  5042. .read = seq_read,
  5043. .llseek = seq_lseek,
  5044. .release = tracing_single_release_file_tr,
  5045. };
  5046. #endif
  5047. static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
  5048. {
  5049. const char *field_name = hist_field_name(hist_field, 0);
  5050. if (hist_field->var.name)
  5051. seq_printf(m, "%s=", hist_field->var.name);
  5052. if (hist_field->flags & HIST_FIELD_FL_CPU)
  5053. seq_puts(m, "common_cpu");
  5054. else if (hist_field->flags & HIST_FIELD_FL_CONST)
  5055. seq_printf(m, "%llu", hist_field->constant);
  5056. else if (field_name) {
  5057. if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
  5058. hist_field->flags & HIST_FIELD_FL_ALIAS)
  5059. seq_putc(m, '$');
  5060. seq_printf(m, "%s", field_name);
  5061. } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
  5062. seq_puts(m, "common_timestamp");
  5063. if (hist_field->flags) {
  5064. if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
  5065. !(hist_field->flags & HIST_FIELD_FL_EXPR) &&
  5066. !(hist_field->flags & HIST_FIELD_FL_STACKTRACE)) {
  5067. const char *flags = get_hist_field_flags(hist_field);
  5068. if (flags)
  5069. seq_printf(m, ".%s", flags);
  5070. }
  5071. }
  5072. if (hist_field->buckets)
  5073. seq_printf(m, "=%ld", hist_field->buckets);
  5074. }
  5075. static int event_hist_trigger_print(struct seq_file *m,
  5076. struct event_trigger_data *data)
  5077. {
  5078. struct hist_trigger_data *hist_data = data->private_data;
  5079. struct hist_field *field;
  5080. bool have_var = false;
  5081. bool show_val = false;
  5082. unsigned int i;
  5083. seq_puts(m, HIST_PREFIX);
  5084. if (data->name)
  5085. seq_printf(m, "%s:", data->name);
  5086. seq_puts(m, "keys=");
  5087. for_each_hist_key_field(i, hist_data) {
  5088. field = hist_data->fields[i];
  5089. if (i > hist_data->n_vals)
  5090. seq_puts(m, ",");
  5091. if (field->flags & HIST_FIELD_FL_STACKTRACE) {
  5092. if (field->field)
  5093. seq_printf(m, "%s.stacktrace", field->field->name);
  5094. else
  5095. seq_puts(m, "common_stacktrace");
  5096. } else
  5097. hist_field_print(m, field);
  5098. }
  5099. seq_puts(m, ":vals=");
  5100. for_each_hist_val_field(i, hist_data) {
  5101. field = hist_data->fields[i];
  5102. if (field->flags & HIST_FIELD_FL_VAR) {
  5103. have_var = true;
  5104. continue;
  5105. }
  5106. if (i == HITCOUNT_IDX) {
  5107. if (hist_data->attrs->no_hitcount)
  5108. continue;
  5109. seq_puts(m, "hitcount");
  5110. } else {
  5111. if (show_val)
  5112. seq_puts(m, ",");
  5113. hist_field_print(m, field);
  5114. }
  5115. show_val = true;
  5116. }
  5117. if (have_var) {
  5118. unsigned int n = 0;
  5119. seq_puts(m, ":");
  5120. for_each_hist_val_field(i, hist_data) {
  5121. field = hist_data->fields[i];
  5122. if (field->flags & HIST_FIELD_FL_VAR) {
  5123. if (n++)
  5124. seq_puts(m, ",");
  5125. hist_field_print(m, field);
  5126. }
  5127. }
  5128. }
  5129. seq_puts(m, ":sort=");
  5130. for (i = 0; i < hist_data->n_sort_keys; i++) {
  5131. struct tracing_map_sort_key *sort_key;
  5132. unsigned int idx, first_key_idx;
  5133. /* skip VAR vals */
  5134. first_key_idx = hist_data->n_vals - hist_data->n_vars;
  5135. sort_key = &hist_data->sort_keys[i];
  5136. idx = sort_key->field_idx;
  5137. if (WARN_ON(idx >= HIST_FIELDS_MAX))
  5138. return -EINVAL;
  5139. if (i > 0)
  5140. seq_puts(m, ",");
  5141. if (idx == HITCOUNT_IDX)
  5142. seq_puts(m, "hitcount");
  5143. else {
  5144. if (idx >= first_key_idx)
  5145. idx += hist_data->n_vars;
  5146. hist_field_print(m, hist_data->fields[idx]);
  5147. }
  5148. if (sort_key->descending)
  5149. seq_puts(m, ".descending");
  5150. }
  5151. seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
  5152. if (hist_data->enable_timestamps)
  5153. seq_printf(m, ":clock=%s", hist_data->attrs->clock);
  5154. if (hist_data->attrs->no_hitcount)
  5155. seq_puts(m, ":nohitcount");
  5156. print_actions_spec(m, hist_data);
  5157. if (data->filter_str)
  5158. seq_printf(m, " if %s", data->filter_str);
  5159. if (data->paused)
  5160. seq_puts(m, " [paused]");
  5161. else
  5162. seq_puts(m, " [active]");
  5163. seq_putc(m, '\n');
  5164. return 0;
  5165. }
  5166. static int event_hist_trigger_init(struct event_trigger_data *data)
  5167. {
  5168. struct hist_trigger_data *hist_data = data->private_data;
  5169. if (alloc_hist_pad() < 0)
  5170. return -ENOMEM;
  5171. if (!data->ref && hist_data->attrs->name)
  5172. save_named_trigger(hist_data->attrs->name, data);
  5173. data->ref++;
  5174. return 0;
  5175. }
  5176. static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
  5177. {
  5178. struct trace_event_file *file;
  5179. unsigned int i;
  5180. char *cmd;
  5181. int ret;
  5182. for (i = 0; i < hist_data->n_field_var_hists; i++) {
  5183. file = hist_data->field_var_hists[i]->hist_data->event_file;
  5184. cmd = hist_data->field_var_hists[i]->cmd;
  5185. ret = event_hist_trigger_parse(&trigger_hist_cmd, file,
  5186. "!hist", "hist", cmd);
  5187. WARN_ON_ONCE(ret < 0);
  5188. }
  5189. }
  5190. static void event_hist_trigger_free(struct event_trigger_data *data)
  5191. {
  5192. struct hist_trigger_data *hist_data = data->private_data;
  5193. if (WARN_ON_ONCE(data->ref <= 0))
  5194. return;
  5195. data->ref--;
  5196. if (!data->ref) {
  5197. if (data->name)
  5198. del_named_trigger(data);
  5199. trigger_data_free(data);
  5200. remove_hist_vars(hist_data);
  5201. unregister_field_var_hists(hist_data);
  5202. destroy_hist_data(hist_data);
  5203. }
  5204. free_hist_pad();
  5205. }
  5206. static struct event_trigger_ops event_hist_trigger_ops = {
  5207. .trigger = event_hist_trigger,
  5208. .print = event_hist_trigger_print,
  5209. .init = event_hist_trigger_init,
  5210. .free = event_hist_trigger_free,
  5211. };
  5212. static int event_hist_trigger_named_init(struct event_trigger_data *data)
  5213. {
  5214. data->ref++;
  5215. save_named_trigger(data->named_data->name, data);
  5216. return event_hist_trigger_init(data->named_data);
  5217. }
  5218. static void event_hist_trigger_named_free(struct event_trigger_data *data)
  5219. {
  5220. if (WARN_ON_ONCE(data->ref <= 0))
  5221. return;
  5222. event_hist_trigger_free(data->named_data);
  5223. data->ref--;
  5224. if (!data->ref) {
  5225. del_named_trigger(data);
  5226. trigger_data_free(data);
  5227. }
  5228. }
  5229. static struct event_trigger_ops event_hist_trigger_named_ops = {
  5230. .trigger = event_hist_trigger,
  5231. .print = event_hist_trigger_print,
  5232. .init = event_hist_trigger_named_init,
  5233. .free = event_hist_trigger_named_free,
  5234. };
  5235. static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
  5236. char *param)
  5237. {
  5238. return &event_hist_trigger_ops;
  5239. }
  5240. static void hist_clear(struct event_trigger_data *data)
  5241. {
  5242. struct hist_trigger_data *hist_data = data->private_data;
  5243. if (data->name)
  5244. pause_named_trigger(data);
  5245. tracepoint_synchronize_unregister();
  5246. tracing_map_clear(hist_data->map);
  5247. if (data->name)
  5248. unpause_named_trigger(data);
  5249. }
  5250. static bool compatible_field(struct ftrace_event_field *field,
  5251. struct ftrace_event_field *test_field)
  5252. {
  5253. if (field == test_field)
  5254. return true;
  5255. if (field == NULL || test_field == NULL)
  5256. return false;
  5257. if (strcmp(field->name, test_field->name) != 0)
  5258. return false;
  5259. if (strcmp(field->type, test_field->type) != 0)
  5260. return false;
  5261. if (field->size != test_field->size)
  5262. return false;
  5263. if (field->is_signed != test_field->is_signed)
  5264. return false;
  5265. return true;
  5266. }
  5267. static bool hist_trigger_match(struct event_trigger_data *data,
  5268. struct event_trigger_data *data_test,
  5269. struct event_trigger_data *named_data,
  5270. bool ignore_filter)
  5271. {
  5272. struct tracing_map_sort_key *sort_key, *sort_key_test;
  5273. struct hist_trigger_data *hist_data, *hist_data_test;
  5274. struct hist_field *key_field, *key_field_test;
  5275. unsigned int i;
  5276. if (named_data && (named_data != data_test) &&
  5277. (named_data != data_test->named_data))
  5278. return false;
  5279. if (!named_data && is_named_trigger(data_test))
  5280. return false;
  5281. hist_data = data->private_data;
  5282. hist_data_test = data_test->private_data;
  5283. if (hist_data->n_vals != hist_data_test->n_vals ||
  5284. hist_data->n_fields != hist_data_test->n_fields ||
  5285. hist_data->n_sort_keys != hist_data_test->n_sort_keys)
  5286. return false;
  5287. if (!ignore_filter) {
  5288. if ((data->filter_str && !data_test->filter_str) ||
  5289. (!data->filter_str && data_test->filter_str))
  5290. return false;
  5291. }
  5292. for_each_hist_field(i, hist_data) {
  5293. key_field = hist_data->fields[i];
  5294. key_field_test = hist_data_test->fields[i];
  5295. if (key_field->flags != key_field_test->flags)
  5296. return false;
  5297. if (!compatible_field(key_field->field, key_field_test->field))
  5298. return false;
  5299. if (key_field->offset != key_field_test->offset)
  5300. return false;
  5301. if (key_field->size != key_field_test->size)
  5302. return false;
  5303. if (key_field->is_signed != key_field_test->is_signed)
  5304. return false;
  5305. if (!!key_field->var.name != !!key_field_test->var.name)
  5306. return false;
  5307. if (key_field->var.name &&
  5308. strcmp(key_field->var.name, key_field_test->var.name) != 0)
  5309. return false;
  5310. }
  5311. for (i = 0; i < hist_data->n_sort_keys; i++) {
  5312. sort_key = &hist_data->sort_keys[i];
  5313. sort_key_test = &hist_data_test->sort_keys[i];
  5314. if (sort_key->field_idx != sort_key_test->field_idx ||
  5315. sort_key->descending != sort_key_test->descending)
  5316. return false;
  5317. }
  5318. if (!ignore_filter && data->filter_str &&
  5319. (strcmp(data->filter_str, data_test->filter_str) != 0))
  5320. return false;
  5321. if (!actions_match(hist_data, hist_data_test))
  5322. return false;
  5323. return true;
  5324. }
  5325. static bool existing_hist_update_only(char *glob,
  5326. struct event_trigger_data *data,
  5327. struct trace_event_file *file)
  5328. {
  5329. struct hist_trigger_data *hist_data = data->private_data;
  5330. struct event_trigger_data *test, *named_data = NULL;
  5331. bool updated = false;
  5332. if (!hist_data->attrs->pause && !hist_data->attrs->cont &&
  5333. !hist_data->attrs->clear)
  5334. goto out;
  5335. if (hist_data->attrs->name) {
  5336. named_data = find_named_trigger(hist_data->attrs->name);
  5337. if (named_data) {
  5338. if (!hist_trigger_match(data, named_data, named_data,
  5339. true))
  5340. goto out;
  5341. }
  5342. }
  5343. if (hist_data->attrs->name && !named_data)
  5344. goto out;
  5345. list_for_each_entry(test, &file->triggers, list) {
  5346. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5347. if (!hist_trigger_match(data, test, named_data, false))
  5348. continue;
  5349. if (hist_data->attrs->pause)
  5350. test->paused = true;
  5351. else if (hist_data->attrs->cont)
  5352. test->paused = false;
  5353. else if (hist_data->attrs->clear)
  5354. hist_clear(test);
  5355. updated = true;
  5356. goto out;
  5357. }
  5358. }
  5359. out:
  5360. return updated;
  5361. }
  5362. static int hist_register_trigger(char *glob,
  5363. struct event_trigger_data *data,
  5364. struct trace_event_file *file)
  5365. {
  5366. struct hist_trigger_data *hist_data = data->private_data;
  5367. struct event_trigger_data *test, *named_data = NULL;
  5368. struct trace_array *tr = file->tr;
  5369. int ret = 0;
  5370. if (hist_data->attrs->name) {
  5371. named_data = find_named_trigger(hist_data->attrs->name);
  5372. if (named_data) {
  5373. if (!hist_trigger_match(data, named_data, named_data,
  5374. true)) {
  5375. hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name));
  5376. ret = -EINVAL;
  5377. goto out;
  5378. }
  5379. }
  5380. }
  5381. if (hist_data->attrs->name && !named_data)
  5382. goto new;
  5383. lockdep_assert_held(&event_mutex);
  5384. list_for_each_entry(test, &file->triggers, list) {
  5385. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5386. if (hist_trigger_match(data, test, named_data, false)) {
  5387. hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0);
  5388. ret = -EEXIST;
  5389. goto out;
  5390. }
  5391. }
  5392. }
  5393. new:
  5394. if (hist_data->attrs->cont || hist_data->attrs->clear) {
  5395. hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0);
  5396. ret = -ENOENT;
  5397. goto out;
  5398. }
  5399. if (hist_data->attrs->pause)
  5400. data->paused = true;
  5401. if (named_data) {
  5402. data->private_data = named_data->private_data;
  5403. set_named_trigger_data(data, named_data);
  5404. data->ops = &event_hist_trigger_named_ops;
  5405. }
  5406. if (data->ops->init) {
  5407. ret = data->ops->init(data);
  5408. if (ret < 0)
  5409. goto out;
  5410. }
  5411. if (hist_data->enable_timestamps) {
  5412. char *clock = hist_data->attrs->clock;
  5413. ret = tracing_set_clock(file->tr, hist_data->attrs->clock);
  5414. if (ret) {
  5415. hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock));
  5416. goto out;
  5417. }
  5418. tracing_set_filter_buffering(file->tr, true);
  5419. }
  5420. if (named_data)
  5421. destroy_hist_data(hist_data);
  5422. out:
  5423. return ret;
  5424. }
  5425. static int hist_trigger_enable(struct event_trigger_data *data,
  5426. struct trace_event_file *file)
  5427. {
  5428. int ret = 0;
  5429. list_add_tail_rcu(&data->list, &file->triggers);
  5430. update_cond_flag(file);
  5431. if (trace_event_trigger_enable_disable(file, 1) < 0) {
  5432. list_del_rcu(&data->list);
  5433. update_cond_flag(file);
  5434. ret--;
  5435. }
  5436. return ret;
  5437. }
  5438. static bool have_hist_trigger_match(struct event_trigger_data *data,
  5439. struct trace_event_file *file)
  5440. {
  5441. struct hist_trigger_data *hist_data = data->private_data;
  5442. struct event_trigger_data *test, *named_data = NULL;
  5443. bool match = false;
  5444. lockdep_assert_held(&event_mutex);
  5445. if (hist_data->attrs->name)
  5446. named_data = find_named_trigger(hist_data->attrs->name);
  5447. list_for_each_entry(test, &file->triggers, list) {
  5448. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5449. if (hist_trigger_match(data, test, named_data, false)) {
  5450. match = true;
  5451. break;
  5452. }
  5453. }
  5454. }
  5455. return match;
  5456. }
  5457. static bool hist_trigger_check_refs(struct event_trigger_data *data,
  5458. struct trace_event_file *file)
  5459. {
  5460. struct hist_trigger_data *hist_data = data->private_data;
  5461. struct event_trigger_data *test, *named_data = NULL;
  5462. lockdep_assert_held(&event_mutex);
  5463. if (hist_data->attrs->name)
  5464. named_data = find_named_trigger(hist_data->attrs->name);
  5465. list_for_each_entry(test, &file->triggers, list) {
  5466. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5467. if (!hist_trigger_match(data, test, named_data, false))
  5468. continue;
  5469. hist_data = test->private_data;
  5470. if (check_var_refs(hist_data))
  5471. return true;
  5472. break;
  5473. }
  5474. }
  5475. return false;
  5476. }
  5477. static void hist_unregister_trigger(char *glob,
  5478. struct event_trigger_data *data,
  5479. struct trace_event_file *file)
  5480. {
  5481. struct event_trigger_data *test = NULL, *iter, *named_data = NULL;
  5482. struct hist_trigger_data *hist_data = data->private_data;
  5483. lockdep_assert_held(&event_mutex);
  5484. if (hist_data->attrs->name)
  5485. named_data = find_named_trigger(hist_data->attrs->name);
  5486. list_for_each_entry(iter, &file->triggers, list) {
  5487. if (iter->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5488. if (!hist_trigger_match(data, iter, named_data, false))
  5489. continue;
  5490. test = iter;
  5491. list_del_rcu(&test->list);
  5492. trace_event_trigger_enable_disable(file, 0);
  5493. update_cond_flag(file);
  5494. break;
  5495. }
  5496. }
  5497. if (test && test->ops->free)
  5498. test->ops->free(test);
  5499. if (hist_data->enable_timestamps) {
  5500. if (!hist_data->remove || test)
  5501. tracing_set_filter_buffering(file->tr, false);
  5502. }
  5503. }
  5504. static bool hist_file_check_refs(struct trace_event_file *file)
  5505. {
  5506. struct hist_trigger_data *hist_data;
  5507. struct event_trigger_data *test;
  5508. lockdep_assert_held(&event_mutex);
  5509. list_for_each_entry(test, &file->triggers, list) {
  5510. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5511. hist_data = test->private_data;
  5512. if (check_var_refs(hist_data))
  5513. return true;
  5514. }
  5515. }
  5516. return false;
  5517. }
  5518. static void hist_unreg_all(struct trace_event_file *file)
  5519. {
  5520. struct event_trigger_data *test, *n;
  5521. struct hist_trigger_data *hist_data;
  5522. struct synth_event *se;
  5523. const char *se_name;
  5524. lockdep_assert_held(&event_mutex);
  5525. if (hist_file_check_refs(file))
  5526. return;
  5527. list_for_each_entry_safe(test, n, &file->triggers, list) {
  5528. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5529. hist_data = test->private_data;
  5530. list_del_rcu(&test->list);
  5531. trace_event_trigger_enable_disable(file, 0);
  5532. se_name = trace_event_name(file->event_call);
  5533. se = find_synth_event(se_name);
  5534. if (se)
  5535. se->ref--;
  5536. update_cond_flag(file);
  5537. if (hist_data->enable_timestamps)
  5538. tracing_set_filter_buffering(file->tr, false);
  5539. if (test->ops->free)
  5540. test->ops->free(test);
  5541. }
  5542. }
  5543. }
  5544. static int event_hist_trigger_parse(struct event_command *cmd_ops,
  5545. struct trace_event_file *file,
  5546. char *glob, char *cmd,
  5547. char *param_and_filter)
  5548. {
  5549. unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
  5550. struct event_trigger_data *trigger_data;
  5551. struct hist_trigger_attrs *attrs;
  5552. struct hist_trigger_data *hist_data;
  5553. char *param, *filter, *p, *start;
  5554. struct synth_event *se;
  5555. const char *se_name;
  5556. bool remove;
  5557. int ret = 0;
  5558. lockdep_assert_held(&event_mutex);
  5559. if (WARN_ON(!glob))
  5560. return -EINVAL;
  5561. if (glob[0]) {
  5562. hist_err_clear();
  5563. last_cmd_set(file, param_and_filter);
  5564. }
  5565. remove = event_trigger_check_remove(glob);
  5566. if (event_trigger_empty_param(param_and_filter))
  5567. return -EINVAL;
  5568. /*
  5569. * separate the trigger from the filter (k:v [if filter])
  5570. * allowing for whitespace in the trigger
  5571. */
  5572. p = param = param_and_filter;
  5573. do {
  5574. p = strstr(p, "if");
  5575. if (!p)
  5576. break;
  5577. if (p == param_and_filter)
  5578. return -EINVAL;
  5579. if (*(p - 1) != ' ' && *(p - 1) != '\t') {
  5580. p++;
  5581. continue;
  5582. }
  5583. if (p >= param_and_filter + strlen(param_and_filter) - (sizeof("if") - 1) - 1)
  5584. return -EINVAL;
  5585. if (*(p + sizeof("if") - 1) != ' ' && *(p + sizeof("if") - 1) != '\t') {
  5586. p++;
  5587. continue;
  5588. }
  5589. break;
  5590. } while (1);
  5591. if (!p)
  5592. filter = NULL;
  5593. else {
  5594. *(p - 1) = '\0';
  5595. filter = strstrip(p);
  5596. param = strstrip(param);
  5597. }
  5598. /*
  5599. * To simplify arithmetic expression parsing, replace occurrences of
  5600. * '.sym-offset' modifier with '.symXoffset'
  5601. */
  5602. start = strstr(param, ".sym-offset");
  5603. while (start) {
  5604. *(start + 4) = 'X';
  5605. start = strstr(start + 11, ".sym-offset");
  5606. }
  5607. attrs = parse_hist_trigger_attrs(file->tr, param);
  5608. if (IS_ERR(attrs))
  5609. return PTR_ERR(attrs);
  5610. if (attrs->map_bits)
  5611. hist_trigger_bits = attrs->map_bits;
  5612. hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
  5613. if (IS_ERR(hist_data)) {
  5614. destroy_hist_trigger_attrs(attrs);
  5615. return PTR_ERR(hist_data);
  5616. }
  5617. trigger_data = trigger_data_alloc(cmd_ops, cmd, param, hist_data);
  5618. if (!trigger_data) {
  5619. ret = -ENOMEM;
  5620. goto out_free;
  5621. }
  5622. ret = event_trigger_set_filter(cmd_ops, file, filter, trigger_data);
  5623. if (ret < 0)
  5624. goto out_free;
  5625. if (remove) {
  5626. if (!have_hist_trigger_match(trigger_data, file))
  5627. goto out_free;
  5628. if (hist_trigger_check_refs(trigger_data, file)) {
  5629. ret = -EBUSY;
  5630. goto out_free;
  5631. }
  5632. event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
  5633. se_name = trace_event_name(file->event_call);
  5634. se = find_synth_event(se_name);
  5635. if (se)
  5636. se->ref--;
  5637. ret = 0;
  5638. goto out_free;
  5639. }
  5640. if (existing_hist_update_only(glob, trigger_data, file))
  5641. goto out_free;
  5642. if (!get_named_trigger_data(trigger_data)) {
  5643. ret = create_actions(hist_data);
  5644. if (ret)
  5645. goto out_free;
  5646. if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
  5647. ret = save_hist_vars(hist_data);
  5648. if (ret)
  5649. goto out_free;
  5650. }
  5651. ret = tracing_map_init(hist_data->map);
  5652. if (ret)
  5653. goto out_free;
  5654. }
  5655. ret = event_trigger_register(cmd_ops, file, glob, trigger_data);
  5656. if (ret < 0)
  5657. goto out_free;
  5658. ret = hist_trigger_enable(trigger_data, file);
  5659. if (ret)
  5660. goto out_unreg;
  5661. se_name = trace_event_name(file->event_call);
  5662. se = find_synth_event(se_name);
  5663. if (se)
  5664. se->ref++;
  5665. out:
  5666. if (ret == 0 && glob[0])
  5667. hist_err_clear();
  5668. return ret;
  5669. out_unreg:
  5670. event_trigger_unregister(cmd_ops, file, glob+1, trigger_data);
  5671. out_free:
  5672. event_trigger_reset_filter(cmd_ops, trigger_data);
  5673. remove_hist_vars(hist_data);
  5674. kfree(trigger_data);
  5675. destroy_hist_data(hist_data);
  5676. goto out;
  5677. }
  5678. static struct event_command trigger_hist_cmd = {
  5679. .name = "hist",
  5680. .trigger_type = ETT_EVENT_HIST,
  5681. .flags = EVENT_CMD_FL_NEEDS_REC,
  5682. .parse = event_hist_trigger_parse,
  5683. .reg = hist_register_trigger,
  5684. .unreg = hist_unregister_trigger,
  5685. .unreg_all = hist_unreg_all,
  5686. .get_trigger_ops = event_hist_get_trigger_ops,
  5687. .set_filter = set_trigger_filter,
  5688. };
  5689. __init int register_trigger_hist_cmd(void)
  5690. {
  5691. int ret;
  5692. ret = register_event_command(&trigger_hist_cmd);
  5693. WARN_ON(ret < 0);
  5694. return ret;
  5695. }
  5696. static void
  5697. hist_enable_trigger(struct event_trigger_data *data,
  5698. struct trace_buffer *buffer, void *rec,
  5699. struct ring_buffer_event *event)
  5700. {
  5701. struct enable_trigger_data *enable_data = data->private_data;
  5702. struct event_trigger_data *test;
  5703. list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
  5704. lockdep_is_held(&event_mutex)) {
  5705. if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
  5706. if (enable_data->enable)
  5707. test->paused = false;
  5708. else
  5709. test->paused = true;
  5710. }
  5711. }
  5712. }
  5713. static void
  5714. hist_enable_count_trigger(struct event_trigger_data *data,
  5715. struct trace_buffer *buffer, void *rec,
  5716. struct ring_buffer_event *event)
  5717. {
  5718. if (!data->count)
  5719. return;
  5720. if (data->count != -1)
  5721. (data->count)--;
  5722. hist_enable_trigger(data, buffer, rec, event);
  5723. }
  5724. static struct event_trigger_ops hist_enable_trigger_ops = {
  5725. .trigger = hist_enable_trigger,
  5726. .print = event_enable_trigger_print,
  5727. .init = event_trigger_init,
  5728. .free = event_enable_trigger_free,
  5729. };
  5730. static struct event_trigger_ops hist_enable_count_trigger_ops = {
  5731. .trigger = hist_enable_count_trigger,
  5732. .print = event_enable_trigger_print,
  5733. .init = event_trigger_init,
  5734. .free = event_enable_trigger_free,
  5735. };
  5736. static struct event_trigger_ops hist_disable_trigger_ops = {
  5737. .trigger = hist_enable_trigger,
  5738. .print = event_enable_trigger_print,
  5739. .init = event_trigger_init,
  5740. .free = event_enable_trigger_free,
  5741. };
  5742. static struct event_trigger_ops hist_disable_count_trigger_ops = {
  5743. .trigger = hist_enable_count_trigger,
  5744. .print = event_enable_trigger_print,
  5745. .init = event_trigger_init,
  5746. .free = event_enable_trigger_free,
  5747. };
  5748. static struct event_trigger_ops *
  5749. hist_enable_get_trigger_ops(char *cmd, char *param)
  5750. {
  5751. struct event_trigger_ops *ops;
  5752. bool enable;
  5753. enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
  5754. if (enable)
  5755. ops = param ? &hist_enable_count_trigger_ops :
  5756. &hist_enable_trigger_ops;
  5757. else
  5758. ops = param ? &hist_disable_count_trigger_ops :
  5759. &hist_disable_trigger_ops;
  5760. return ops;
  5761. }
  5762. static void hist_enable_unreg_all(struct trace_event_file *file)
  5763. {
  5764. struct event_trigger_data *test, *n;
  5765. list_for_each_entry_safe(test, n, &file->triggers, list) {
  5766. if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
  5767. list_del_rcu(&test->list);
  5768. update_cond_flag(file);
  5769. trace_event_trigger_enable_disable(file, 0);
  5770. if (test->ops->free)
  5771. test->ops->free(test);
  5772. }
  5773. }
  5774. }
  5775. static struct event_command trigger_hist_enable_cmd = {
  5776. .name = ENABLE_HIST_STR,
  5777. .trigger_type = ETT_HIST_ENABLE,
  5778. .parse = event_enable_trigger_parse,
  5779. .reg = event_enable_register_trigger,
  5780. .unreg = event_enable_unregister_trigger,
  5781. .unreg_all = hist_enable_unreg_all,
  5782. .get_trigger_ops = hist_enable_get_trigger_ops,
  5783. .set_filter = set_trigger_filter,
  5784. };
  5785. static struct event_command trigger_hist_disable_cmd = {
  5786. .name = DISABLE_HIST_STR,
  5787. .trigger_type = ETT_HIST_ENABLE,
  5788. .parse = event_enable_trigger_parse,
  5789. .reg = event_enable_register_trigger,
  5790. .unreg = event_enable_unregister_trigger,
  5791. .unreg_all = hist_enable_unreg_all,
  5792. .get_trigger_ops = hist_enable_get_trigger_ops,
  5793. .set_filter = set_trigger_filter,
  5794. };
  5795. static __init void unregister_trigger_hist_enable_disable_cmds(void)
  5796. {
  5797. unregister_event_command(&trigger_hist_enable_cmd);
  5798. unregister_event_command(&trigger_hist_disable_cmd);
  5799. }
  5800. __init int register_trigger_hist_enable_disable_cmds(void)
  5801. {
  5802. int ret;
  5803. ret = register_event_command(&trigger_hist_enable_cmd);
  5804. if (WARN_ON(ret < 0))
  5805. return ret;
  5806. ret = register_event_command(&trigger_hist_disable_cmd);
  5807. if (WARN_ON(ret < 0))
  5808. unregister_trigger_hist_enable_disable_cmds();
  5809. return ret;
  5810. }