xfs_trace.h 165 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2009, Christoph Hellwig
  4. * All Rights Reserved.
  5. *
  6. * NOTE: none of these tracepoints shall be considered a stable kernel ABI
  7. * as they can change at any time.
  8. *
  9. * Current conventions for printing numbers measuring specific units:
  10. *
  11. * agno: allocation group number
  12. *
  13. * agino: per-AG inode number
  14. * ino: filesystem inode number
  15. *
  16. * agbno: per-AG block number in fs blocks
  17. * startblock: physical block number for file mappings. This is either a
  18. * segmented fsblock for data device mappings, or a rfsblock
  19. * for realtime device mappings
  20. * fsbcount: number of blocks in an extent, in fs blocks
  21. *
  22. * daddr: physical block number in 512b blocks
  23. * bbcount: number of blocks in a physical extent, in 512b blocks
  24. *
  25. * rtx: physical rt extent number for extent mappings
  26. * rtxcount: number of rt extents in an extent mapping
  27. *
  28. * owner: reverse-mapping owner, usually inodes
  29. *
  30. * fileoff: file offset, in fs blocks
  31. * pos: file offset, in bytes
  32. * bytecount: number of bytes
  33. *
  34. * dablk: directory or xattr block offset, in filesystem blocks
  35. *
  36. * disize: ondisk file size, in bytes
  37. * isize: incore file size, in bytes
  38. *
  39. * forkoff: inode fork offset, in bytes
  40. *
  41. * ireccount: number of inode records
  42. *
  43. * Numbers describing space allocations (blocks, extents, inodes) should be
  44. * formatted in hexadecimal.
  45. */
  46. #undef TRACE_SYSTEM
  47. #define TRACE_SYSTEM xfs
  48. #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
  49. #define _TRACE_XFS_H
  50. #include <linux/tracepoint.h>
  51. struct xfs_agf;
  52. struct xfs_alloc_arg;
  53. struct xfs_attr_list_context;
  54. struct xfs_buf_log_item;
  55. struct xfs_da_args;
  56. struct xfs_da_node_entry;
  57. struct xfs_dquot;
  58. struct xfs_log_item;
  59. struct xlog;
  60. struct xlog_ticket;
  61. struct xlog_recover;
  62. struct xlog_recover_item;
  63. struct xlog_rec_header;
  64. struct xlog_in_core;
  65. struct xfs_buf_log_format;
  66. struct xfs_inode_log_format;
  67. struct xfs_bmbt_irec;
  68. struct xfs_btree_cur;
  69. struct xfs_defer_op_type;
  70. struct xfs_refcount_irec;
  71. struct xfs_fsmap;
  72. struct xfs_rmap_irec;
  73. struct xfs_icreate_log;
  74. struct xfs_owner_info;
  75. struct xfs_trans_res;
  76. struct xfs_inobt_rec_incore;
  77. union xfs_btree_ptr;
  78. struct xfs_dqtrx;
  79. struct xfs_icwalk;
  80. struct xfs_perag;
  81. struct xfbtree;
  82. struct xfs_btree_ops;
  83. struct xfs_bmap_intent;
  84. struct xfs_exchmaps_intent;
  85. struct xfs_exchmaps_req;
  86. struct xfs_exchrange;
  87. struct xfs_getparents;
  88. struct xfs_parent_irec;
  89. struct xfs_attrlist_cursor_kern;
  90. struct xfs_extent_free_item;
  91. struct xfs_rmap_intent;
  92. struct xfs_refcount_intent;
  93. #define XFS_ATTR_FILTER_FLAGS \
  94. { XFS_ATTR_ROOT, "ROOT" }, \
  95. { XFS_ATTR_SECURE, "SECURE" }, \
  96. { XFS_ATTR_INCOMPLETE, "INCOMPLETE" }, \
  97. { XFS_ATTR_PARENT, "PARENT" }
  98. DECLARE_EVENT_CLASS(xfs_attr_list_class,
  99. TP_PROTO(struct xfs_attr_list_context *ctx),
  100. TP_ARGS(ctx),
  101. TP_STRUCT__entry(
  102. __field(dev_t, dev)
  103. __field(xfs_ino_t, ino)
  104. __field(u32, hashval)
  105. __field(u32, blkno)
  106. __field(u32, offset)
  107. __field(void *, buffer)
  108. __field(int, bufsize)
  109. __field(int, count)
  110. __field(int, firstu)
  111. __field(int, dupcnt)
  112. __field(unsigned int, attr_filter)
  113. ),
  114. TP_fast_assign(
  115. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  116. __entry->ino = ctx->dp->i_ino;
  117. __entry->hashval = ctx->cursor.hashval;
  118. __entry->blkno = ctx->cursor.blkno;
  119. __entry->offset = ctx->cursor.offset;
  120. __entry->buffer = ctx->buffer;
  121. __entry->bufsize = ctx->bufsize;
  122. __entry->count = ctx->count;
  123. __entry->firstu = ctx->firstu;
  124. __entry->attr_filter = ctx->attr_filter;
  125. ),
  126. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  127. "buffer %p size %u count %u firstu %u filter %s",
  128. MAJOR(__entry->dev), MINOR(__entry->dev),
  129. __entry->ino,
  130. __entry->hashval,
  131. __entry->blkno,
  132. __entry->offset,
  133. __entry->dupcnt,
  134. __entry->buffer,
  135. __entry->bufsize,
  136. __entry->count,
  137. __entry->firstu,
  138. __print_flags(__entry->attr_filter, "|",
  139. XFS_ATTR_FILTER_FLAGS)
  140. )
  141. )
  142. #define DEFINE_ATTR_LIST_EVENT(name) \
  143. DEFINE_EVENT(xfs_attr_list_class, name, \
  144. TP_PROTO(struct xfs_attr_list_context *ctx), \
  145. TP_ARGS(ctx))
  146. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
  147. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
  148. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
  149. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
  150. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
  151. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
  152. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
  153. DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
  154. DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list);
  155. DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list);
  156. TRACE_EVENT(xlog_intent_recovery_failed,
  157. TP_PROTO(struct xfs_mount *mp, const struct xfs_defer_op_type *ops,
  158. int error),
  159. TP_ARGS(mp, ops, error),
  160. TP_STRUCT__entry(
  161. __field(dev_t, dev)
  162. __string(name, ops->name)
  163. __field(int, error)
  164. ),
  165. TP_fast_assign(
  166. __entry->dev = mp->m_super->s_dev;
  167. __assign_str(name);
  168. __entry->error = error;
  169. ),
  170. TP_printk("dev %d:%d optype %s error %d",
  171. MAJOR(__entry->dev), MINOR(__entry->dev),
  172. __get_str(name),
  173. __entry->error)
  174. );
  175. DECLARE_EVENT_CLASS(xfs_perag_class,
  176. TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip),
  177. TP_ARGS(pag, caller_ip),
  178. TP_STRUCT__entry(
  179. __field(dev_t, dev)
  180. __field(xfs_agnumber_t, agno)
  181. __field(int, refcount)
  182. __field(int, active_refcount)
  183. __field(unsigned long, caller_ip)
  184. ),
  185. TP_fast_assign(
  186. __entry->dev = pag->pag_mount->m_super->s_dev;
  187. __entry->agno = pag->pag_agno;
  188. __entry->refcount = atomic_read(&pag->pag_ref);
  189. __entry->active_refcount = atomic_read(&pag->pag_active_ref);
  190. __entry->caller_ip = caller_ip;
  191. ),
  192. TP_printk("dev %d:%d agno 0x%x passive refs %d active refs %d caller %pS",
  193. MAJOR(__entry->dev), MINOR(__entry->dev),
  194. __entry->agno,
  195. __entry->refcount,
  196. __entry->active_refcount,
  197. (char *)__entry->caller_ip)
  198. );
  199. #define DEFINE_PERAG_REF_EVENT(name) \
  200. DEFINE_EVENT(xfs_perag_class, name, \
  201. TP_PROTO(struct xfs_perag *pag, unsigned long caller_ip), \
  202. TP_ARGS(pag, caller_ip))
  203. DEFINE_PERAG_REF_EVENT(xfs_perag_get);
  204. DEFINE_PERAG_REF_EVENT(xfs_perag_hold);
  205. DEFINE_PERAG_REF_EVENT(xfs_perag_put);
  206. DEFINE_PERAG_REF_EVENT(xfs_perag_grab);
  207. DEFINE_PERAG_REF_EVENT(xfs_perag_grab_next_tag);
  208. DEFINE_PERAG_REF_EVENT(xfs_perag_rele);
  209. DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
  210. DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
  211. DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count);
  212. TRACE_EVENT(xfs_inodegc_worker,
  213. TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits),
  214. TP_ARGS(mp, shrinker_hits),
  215. TP_STRUCT__entry(
  216. __field(dev_t, dev)
  217. __field(unsigned int, shrinker_hits)
  218. ),
  219. TP_fast_assign(
  220. __entry->dev = mp->m_super->s_dev;
  221. __entry->shrinker_hits = shrinker_hits;
  222. ),
  223. TP_printk("dev %d:%d shrinker_hits %u",
  224. MAJOR(__entry->dev), MINOR(__entry->dev),
  225. __entry->shrinker_hits)
  226. );
  227. DECLARE_EVENT_CLASS(xfs_fs_class,
  228. TP_PROTO(struct xfs_mount *mp, void *caller_ip),
  229. TP_ARGS(mp, caller_ip),
  230. TP_STRUCT__entry(
  231. __field(dev_t, dev)
  232. __field(unsigned long long, mflags)
  233. __field(unsigned long, opstate)
  234. __field(unsigned long, sbflags)
  235. __field(void *, caller_ip)
  236. ),
  237. TP_fast_assign(
  238. if (mp) {
  239. __entry->dev = mp->m_super->s_dev;
  240. __entry->mflags = mp->m_features;
  241. __entry->opstate = mp->m_opstate;
  242. __entry->sbflags = mp->m_super->s_flags;
  243. }
  244. __entry->caller_ip = caller_ip;
  245. ),
  246. TP_printk("dev %d:%d m_features 0x%llx opstate (%s) s_flags 0x%lx caller %pS",
  247. MAJOR(__entry->dev), MINOR(__entry->dev),
  248. __entry->mflags,
  249. __print_flags(__entry->opstate, "|", XFS_OPSTATE_STRINGS),
  250. __entry->sbflags,
  251. __entry->caller_ip)
  252. );
  253. #define DEFINE_FS_EVENT(name) \
  254. DEFINE_EVENT(xfs_fs_class, name, \
  255. TP_PROTO(struct xfs_mount *mp, void *caller_ip), \
  256. TP_ARGS(mp, caller_ip))
  257. DEFINE_FS_EVENT(xfs_inodegc_flush);
  258. DEFINE_FS_EVENT(xfs_inodegc_push);
  259. DEFINE_FS_EVENT(xfs_inodegc_start);
  260. DEFINE_FS_EVENT(xfs_inodegc_stop);
  261. DEFINE_FS_EVENT(xfs_inodegc_queue);
  262. DEFINE_FS_EVENT(xfs_inodegc_throttle);
  263. DEFINE_FS_EVENT(xfs_fs_sync_fs);
  264. DEFINE_FS_EVENT(xfs_blockgc_start);
  265. DEFINE_FS_EVENT(xfs_blockgc_stop);
  266. DEFINE_FS_EVENT(xfs_blockgc_worker);
  267. DEFINE_FS_EVENT(xfs_blockgc_flush_all);
  268. TRACE_EVENT(xfs_inodegc_shrinker_scan,
  269. TP_PROTO(struct xfs_mount *mp, struct shrink_control *sc,
  270. void *caller_ip),
  271. TP_ARGS(mp, sc, caller_ip),
  272. TP_STRUCT__entry(
  273. __field(dev_t, dev)
  274. __field(unsigned long, nr_to_scan)
  275. __field(void *, caller_ip)
  276. ),
  277. TP_fast_assign(
  278. __entry->dev = mp->m_super->s_dev;
  279. __entry->nr_to_scan = sc->nr_to_scan;
  280. __entry->caller_ip = caller_ip;
  281. ),
  282. TP_printk("dev %d:%d nr_to_scan %lu caller %pS",
  283. MAJOR(__entry->dev), MINOR(__entry->dev),
  284. __entry->nr_to_scan,
  285. __entry->caller_ip)
  286. );
  287. DECLARE_EVENT_CLASS(xfs_ag_class,
  288. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
  289. TP_ARGS(mp, agno),
  290. TP_STRUCT__entry(
  291. __field(dev_t, dev)
  292. __field(xfs_agnumber_t, agno)
  293. ),
  294. TP_fast_assign(
  295. __entry->dev = mp->m_super->s_dev;
  296. __entry->agno = agno;
  297. ),
  298. TP_printk("dev %d:%d agno 0x%x",
  299. MAJOR(__entry->dev), MINOR(__entry->dev),
  300. __entry->agno)
  301. );
  302. #define DEFINE_AG_EVENT(name) \
  303. DEFINE_EVENT(xfs_ag_class, name, \
  304. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno), \
  305. TP_ARGS(mp, agno))
  306. DEFINE_AG_EVENT(xfs_read_agf);
  307. DEFINE_AG_EVENT(xfs_alloc_read_agf);
  308. DEFINE_AG_EVENT(xfs_read_agi);
  309. DEFINE_AG_EVENT(xfs_ialloc_read_agi);
  310. TRACE_EVENT(xfs_attr_list_node_descend,
  311. TP_PROTO(struct xfs_attr_list_context *ctx,
  312. struct xfs_da_node_entry *btree),
  313. TP_ARGS(ctx, btree),
  314. TP_STRUCT__entry(
  315. __field(dev_t, dev)
  316. __field(xfs_ino_t, ino)
  317. __field(u32, hashval)
  318. __field(u32, blkno)
  319. __field(u32, offset)
  320. __field(void *, buffer)
  321. __field(int, bufsize)
  322. __field(int, count)
  323. __field(int, firstu)
  324. __field(int, dupcnt)
  325. __field(unsigned int, attr_filter)
  326. __field(u32, bt_hashval)
  327. __field(u32, bt_before)
  328. ),
  329. TP_fast_assign(
  330. __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
  331. __entry->ino = ctx->dp->i_ino;
  332. __entry->hashval = ctx->cursor.hashval;
  333. __entry->blkno = ctx->cursor.blkno;
  334. __entry->offset = ctx->cursor.offset;
  335. __entry->buffer = ctx->buffer;
  336. __entry->bufsize = ctx->bufsize;
  337. __entry->count = ctx->count;
  338. __entry->firstu = ctx->firstu;
  339. __entry->attr_filter = ctx->attr_filter;
  340. __entry->bt_hashval = be32_to_cpu(btree->hashval);
  341. __entry->bt_before = be32_to_cpu(btree->before);
  342. ),
  343. TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
  344. "buffer %p size %u count %u firstu %u filter %s "
  345. "node hashval %u, node before %u",
  346. MAJOR(__entry->dev), MINOR(__entry->dev),
  347. __entry->ino,
  348. __entry->hashval,
  349. __entry->blkno,
  350. __entry->offset,
  351. __entry->dupcnt,
  352. __entry->buffer,
  353. __entry->bufsize,
  354. __entry->count,
  355. __entry->firstu,
  356. __print_flags(__entry->attr_filter, "|",
  357. XFS_ATTR_FILTER_FLAGS),
  358. __entry->bt_hashval,
  359. __entry->bt_before)
  360. );
  361. DECLARE_EVENT_CLASS(xfs_bmap_class,
  362. TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state,
  363. unsigned long caller_ip),
  364. TP_ARGS(ip, cur, state, caller_ip),
  365. TP_STRUCT__entry(
  366. __field(dev_t, dev)
  367. __field(xfs_ino_t, ino)
  368. __field(void *, leaf)
  369. __field(int, pos)
  370. __field(xfs_fileoff_t, startoff)
  371. __field(xfs_fsblock_t, startblock)
  372. __field(xfs_filblks_t, blockcount)
  373. __field(xfs_exntst_t, state)
  374. __field(int, bmap_state)
  375. __field(unsigned long, caller_ip)
  376. ),
  377. TP_fast_assign(
  378. struct xfs_ifork *ifp;
  379. struct xfs_bmbt_irec r;
  380. ifp = xfs_iext_state_to_fork(ip, state);
  381. xfs_iext_get_extent(ifp, cur, &r);
  382. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  383. __entry->ino = ip->i_ino;
  384. __entry->leaf = cur->leaf;
  385. __entry->pos = cur->pos;
  386. __entry->startoff = r.br_startoff;
  387. __entry->startblock = r.br_startblock;
  388. __entry->blockcount = r.br_blockcount;
  389. __entry->state = r.br_state;
  390. __entry->bmap_state = state;
  391. __entry->caller_ip = caller_ip;
  392. ),
  393. TP_printk("dev %d:%d ino 0x%llx state %s cur %p/%d "
  394. "fileoff 0x%llx startblock 0x%llx fsbcount 0x%llx flag %d caller %pS",
  395. MAJOR(__entry->dev), MINOR(__entry->dev),
  396. __entry->ino,
  397. __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
  398. __entry->leaf,
  399. __entry->pos,
  400. __entry->startoff,
  401. (int64_t)__entry->startblock,
  402. __entry->blockcount,
  403. __entry->state,
  404. (char *)__entry->caller_ip)
  405. )
  406. #define DEFINE_BMAP_EVENT(name) \
  407. DEFINE_EVENT(xfs_bmap_class, name, \
  408. TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \
  409. unsigned long caller_ip), \
  410. TP_ARGS(ip, cur, state, caller_ip))
  411. DEFINE_BMAP_EVENT(xfs_iext_insert);
  412. DEFINE_BMAP_EVENT(xfs_iext_remove);
  413. DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
  414. DEFINE_BMAP_EVENT(xfs_bmap_post_update);
  415. DEFINE_BMAP_EVENT(xfs_read_extent);
  416. DEFINE_BMAP_EVENT(xfs_write_extent);
  417. DECLARE_EVENT_CLASS(xfs_buf_class,
  418. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
  419. TP_ARGS(bp, caller_ip),
  420. TP_STRUCT__entry(
  421. __field(dev_t, dev)
  422. __field(xfs_daddr_t, bno)
  423. __field(int, nblks)
  424. __field(int, hold)
  425. __field(int, pincount)
  426. __field(unsigned, lockval)
  427. __field(unsigned, flags)
  428. __field(unsigned long, caller_ip)
  429. __field(const void *, buf_ops)
  430. ),
  431. TP_fast_assign(
  432. __entry->dev = bp->b_target->bt_dev;
  433. __entry->bno = xfs_buf_daddr(bp);
  434. __entry->nblks = bp->b_length;
  435. __entry->hold = atomic_read(&bp->b_hold);
  436. __entry->pincount = atomic_read(&bp->b_pin_count);
  437. __entry->lockval = bp->b_sema.count;
  438. __entry->flags = bp->b_flags;
  439. __entry->caller_ip = caller_ip;
  440. __entry->buf_ops = bp->b_ops;
  441. ),
  442. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  443. "lock %d flags %s bufops %pS caller %pS",
  444. MAJOR(__entry->dev), MINOR(__entry->dev),
  445. (unsigned long long)__entry->bno,
  446. __entry->nblks,
  447. __entry->hold,
  448. __entry->pincount,
  449. __entry->lockval,
  450. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  451. __entry->buf_ops,
  452. (void *)__entry->caller_ip)
  453. )
  454. #define DEFINE_BUF_EVENT(name) \
  455. DEFINE_EVENT(xfs_buf_class, name, \
  456. TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
  457. TP_ARGS(bp, caller_ip))
  458. DEFINE_BUF_EVENT(xfs_buf_init);
  459. DEFINE_BUF_EVENT(xfs_buf_free);
  460. DEFINE_BUF_EVENT(xfs_buf_hold);
  461. DEFINE_BUF_EVENT(xfs_buf_rele);
  462. DEFINE_BUF_EVENT(xfs_buf_iodone);
  463. DEFINE_BUF_EVENT(xfs_buf_submit);
  464. DEFINE_BUF_EVENT(xfs_buf_lock);
  465. DEFINE_BUF_EVENT(xfs_buf_lock_done);
  466. DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
  467. DEFINE_BUF_EVENT(xfs_buf_trylock);
  468. DEFINE_BUF_EVENT(xfs_buf_unlock);
  469. DEFINE_BUF_EVENT(xfs_buf_iowait);
  470. DEFINE_BUF_EVENT(xfs_buf_iowait_done);
  471. DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
  472. DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
  473. DEFINE_BUF_EVENT(xfs_buf_delwri_split);
  474. DEFINE_BUF_EVENT(xfs_buf_delwri_pushbuf);
  475. DEFINE_BUF_EVENT(xfs_buf_get_uncached);
  476. DEFINE_BUF_EVENT(xfs_buf_item_relse);
  477. DEFINE_BUF_EVENT(xfs_buf_iodone_async);
  478. DEFINE_BUF_EVENT(xfs_buf_error_relse);
  479. DEFINE_BUF_EVENT(xfs_buf_drain_buftarg);
  480. DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
  481. /* not really buffer traces, but the buf provides useful information */
  482. DEFINE_BUF_EVENT(xfs_btree_corrupt);
  483. DEFINE_BUF_EVENT(xfs_reset_dqcounts);
  484. /* pass flags explicitly */
  485. DECLARE_EVENT_CLASS(xfs_buf_flags_class,
  486. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
  487. TP_ARGS(bp, flags, caller_ip),
  488. TP_STRUCT__entry(
  489. __field(dev_t, dev)
  490. __field(xfs_daddr_t, bno)
  491. __field(unsigned int, length)
  492. __field(int, hold)
  493. __field(int, pincount)
  494. __field(unsigned, lockval)
  495. __field(unsigned, flags)
  496. __field(unsigned long, caller_ip)
  497. ),
  498. TP_fast_assign(
  499. __entry->dev = bp->b_target->bt_dev;
  500. __entry->bno = xfs_buf_daddr(bp);
  501. __entry->length = bp->b_length;
  502. __entry->flags = flags;
  503. __entry->hold = atomic_read(&bp->b_hold);
  504. __entry->pincount = atomic_read(&bp->b_pin_count);
  505. __entry->lockval = bp->b_sema.count;
  506. __entry->caller_ip = caller_ip;
  507. ),
  508. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  509. "lock %d flags %s caller %pS",
  510. MAJOR(__entry->dev), MINOR(__entry->dev),
  511. (unsigned long long)__entry->bno,
  512. __entry->length,
  513. __entry->hold,
  514. __entry->pincount,
  515. __entry->lockval,
  516. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  517. (void *)__entry->caller_ip)
  518. )
  519. #define DEFINE_BUF_FLAGS_EVENT(name) \
  520. DEFINE_EVENT(xfs_buf_flags_class, name, \
  521. TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
  522. TP_ARGS(bp, flags, caller_ip))
  523. DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
  524. DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
  525. DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
  526. TRACE_EVENT(xfs_buf_ioerror,
  527. TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),
  528. TP_ARGS(bp, error, caller_ip),
  529. TP_STRUCT__entry(
  530. __field(dev_t, dev)
  531. __field(xfs_daddr_t, bno)
  532. __field(unsigned int, length)
  533. __field(unsigned, flags)
  534. __field(int, hold)
  535. __field(int, pincount)
  536. __field(unsigned, lockval)
  537. __field(int, error)
  538. __field(xfs_failaddr_t, caller_ip)
  539. ),
  540. TP_fast_assign(
  541. __entry->dev = bp->b_target->bt_dev;
  542. __entry->bno = xfs_buf_daddr(bp);
  543. __entry->length = bp->b_length;
  544. __entry->hold = atomic_read(&bp->b_hold);
  545. __entry->pincount = atomic_read(&bp->b_pin_count);
  546. __entry->lockval = bp->b_sema.count;
  547. __entry->error = error;
  548. __entry->flags = bp->b_flags;
  549. __entry->caller_ip = caller_ip;
  550. ),
  551. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  552. "lock %d error %d flags %s caller %pS",
  553. MAJOR(__entry->dev), MINOR(__entry->dev),
  554. (unsigned long long)__entry->bno,
  555. __entry->length,
  556. __entry->hold,
  557. __entry->pincount,
  558. __entry->lockval,
  559. __entry->error,
  560. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
  561. (void *)__entry->caller_ip)
  562. );
  563. DECLARE_EVENT_CLASS(xfs_buf_item_class,
  564. TP_PROTO(struct xfs_buf_log_item *bip),
  565. TP_ARGS(bip),
  566. TP_STRUCT__entry(
  567. __field(dev_t, dev)
  568. __field(xfs_daddr_t, buf_bno)
  569. __field(unsigned int, buf_len)
  570. __field(int, buf_hold)
  571. __field(int, buf_pincount)
  572. __field(int, buf_lockval)
  573. __field(unsigned, buf_flags)
  574. __field(unsigned, bli_recur)
  575. __field(int, bli_refcount)
  576. __field(unsigned, bli_flags)
  577. __field(unsigned long, li_flags)
  578. ),
  579. TP_fast_assign(
  580. __entry->dev = bip->bli_buf->b_target->bt_dev;
  581. __entry->bli_flags = bip->bli_flags;
  582. __entry->bli_recur = bip->bli_recur;
  583. __entry->bli_refcount = atomic_read(&bip->bli_refcount);
  584. __entry->buf_bno = xfs_buf_daddr(bip->bli_buf);
  585. __entry->buf_len = bip->bli_buf->b_length;
  586. __entry->buf_flags = bip->bli_buf->b_flags;
  587. __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
  588. __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
  589. __entry->buf_lockval = bip->bli_buf->b_sema.count;
  590. __entry->li_flags = bip->bli_item.li_flags;
  591. ),
  592. TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d "
  593. "lock %d flags %s recur %d refcount %d bliflags %s "
  594. "liflags %s",
  595. MAJOR(__entry->dev), MINOR(__entry->dev),
  596. (unsigned long long)__entry->buf_bno,
  597. __entry->buf_len,
  598. __entry->buf_hold,
  599. __entry->buf_pincount,
  600. __entry->buf_lockval,
  601. __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
  602. __entry->bli_recur,
  603. __entry->bli_refcount,
  604. __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
  605. __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
  606. )
  607. #define DEFINE_BUF_ITEM_EVENT(name) \
  608. DEFINE_EVENT(xfs_buf_item_class, name, \
  609. TP_PROTO(struct xfs_buf_log_item *bip), \
  610. TP_ARGS(bip))
  611. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
  612. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered);
  613. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
  614. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
  615. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
  616. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered);
  617. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
  618. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
  619. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
  620. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_release);
  621. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
  622. DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
  623. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
  624. DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
  625. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
  626. DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
  627. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
  628. DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
  629. DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
  630. DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
  631. DEFINE_BUF_ITEM_EVENT(xfs_trans_bdetach);
  632. DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
  633. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
  634. DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
  635. DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
  636. DECLARE_EVENT_CLASS(xfs_filestream_class,
  637. TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino),
  638. TP_ARGS(pag, ino),
  639. TP_STRUCT__entry(
  640. __field(dev_t, dev)
  641. __field(xfs_ino_t, ino)
  642. __field(xfs_agnumber_t, agno)
  643. __field(int, streams)
  644. ),
  645. TP_fast_assign(
  646. __entry->dev = pag->pag_mount->m_super->s_dev;
  647. __entry->ino = ino;
  648. __entry->agno = pag->pag_agno;
  649. __entry->streams = atomic_read(&pag->pagf_fstrms);
  650. ),
  651. TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d",
  652. MAJOR(__entry->dev), MINOR(__entry->dev),
  653. __entry->ino,
  654. __entry->agno,
  655. __entry->streams)
  656. )
  657. #define DEFINE_FILESTREAM_EVENT(name) \
  658. DEFINE_EVENT(xfs_filestream_class, name, \
  659. TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino), \
  660. TP_ARGS(pag, ino))
  661. DEFINE_FILESTREAM_EVENT(xfs_filestream_free);
  662. DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup);
  663. DEFINE_FILESTREAM_EVENT(xfs_filestream_scan);
  664. TRACE_EVENT(xfs_filestream_pick,
  665. TP_PROTO(struct xfs_perag *pag, xfs_ino_t ino),
  666. TP_ARGS(pag, ino),
  667. TP_STRUCT__entry(
  668. __field(dev_t, dev)
  669. __field(xfs_ino_t, ino)
  670. __field(xfs_agnumber_t, agno)
  671. __field(int, streams)
  672. __field(xfs_extlen_t, free)
  673. ),
  674. TP_fast_assign(
  675. __entry->dev = pag->pag_mount->m_super->s_dev;
  676. __entry->ino = ino;
  677. __entry->agno = pag->pag_agno;
  678. __entry->streams = atomic_read(&pag->pagf_fstrms);
  679. __entry->free = pag->pagf_freeblks;
  680. ),
  681. TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d free %d",
  682. MAJOR(__entry->dev), MINOR(__entry->dev),
  683. __entry->ino,
  684. __entry->agno,
  685. __entry->streams,
  686. __entry->free)
  687. );
  688. DECLARE_EVENT_CLASS(xfs_lock_class,
  689. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
  690. unsigned long caller_ip),
  691. TP_ARGS(ip, lock_flags, caller_ip),
  692. TP_STRUCT__entry(
  693. __field(dev_t, dev)
  694. __field(xfs_ino_t, ino)
  695. __field(int, lock_flags)
  696. __field(unsigned long, caller_ip)
  697. ),
  698. TP_fast_assign(
  699. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  700. __entry->ino = ip->i_ino;
  701. __entry->lock_flags = lock_flags;
  702. __entry->caller_ip = caller_ip;
  703. ),
  704. TP_printk("dev %d:%d ino 0x%llx flags %s caller %pS",
  705. MAJOR(__entry->dev), MINOR(__entry->dev),
  706. __entry->ino,
  707. __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
  708. (void *)__entry->caller_ip)
  709. )
  710. #define DEFINE_LOCK_EVENT(name) \
  711. DEFINE_EVENT(xfs_lock_class, name, \
  712. TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
  713. unsigned long caller_ip), \
  714. TP_ARGS(ip, lock_flags, caller_ip))
  715. DEFINE_LOCK_EVENT(xfs_ilock);
  716. DEFINE_LOCK_EVENT(xfs_ilock_nowait);
  717. DEFINE_LOCK_EVENT(xfs_ilock_demote);
  718. DEFINE_LOCK_EVENT(xfs_iunlock);
  719. DECLARE_EVENT_CLASS(xfs_inode_class,
  720. TP_PROTO(struct xfs_inode *ip),
  721. TP_ARGS(ip),
  722. TP_STRUCT__entry(
  723. __field(dev_t, dev)
  724. __field(xfs_ino_t, ino)
  725. __field(unsigned long, iflags)
  726. ),
  727. TP_fast_assign(
  728. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  729. __entry->ino = ip->i_ino;
  730. __entry->iflags = ip->i_flags;
  731. ),
  732. TP_printk("dev %d:%d ino 0x%llx iflags 0x%lx",
  733. MAJOR(__entry->dev), MINOR(__entry->dev),
  734. __entry->ino,
  735. __entry->iflags)
  736. )
  737. #define DEFINE_INODE_EVENT(name) \
  738. DEFINE_EVENT(xfs_inode_class, name, \
  739. TP_PROTO(struct xfs_inode *ip), \
  740. TP_ARGS(ip))
  741. DEFINE_INODE_EVENT(xfs_iget_skip);
  742. DEFINE_INODE_EVENT(xfs_iget_recycle);
  743. DEFINE_INODE_EVENT(xfs_iget_recycle_fail);
  744. DEFINE_INODE_EVENT(xfs_iget_hit);
  745. DEFINE_INODE_EVENT(xfs_iget_miss);
  746. DEFINE_INODE_EVENT(xfs_getattr);
  747. DEFINE_INODE_EVENT(xfs_setattr);
  748. DEFINE_INODE_EVENT(xfs_readlink);
  749. DEFINE_INODE_EVENT(xfs_inactive_symlink);
  750. DEFINE_INODE_EVENT(xfs_alloc_file_space);
  751. DEFINE_INODE_EVENT(xfs_free_file_space);
  752. DEFINE_INODE_EVENT(xfs_zero_file_space);
  753. DEFINE_INODE_EVENT(xfs_collapse_file_space);
  754. DEFINE_INODE_EVENT(xfs_insert_file_space);
  755. DEFINE_INODE_EVENT(xfs_readdir);
  756. #ifdef CONFIG_XFS_POSIX_ACL
  757. DEFINE_INODE_EVENT(xfs_get_acl);
  758. #endif
  759. DEFINE_INODE_EVENT(xfs_vm_bmap);
  760. DEFINE_INODE_EVENT(xfs_file_ioctl);
  761. DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
  762. DEFINE_INODE_EVENT(xfs_ioctl_setattr);
  763. DEFINE_INODE_EVENT(xfs_dir_fsync);
  764. DEFINE_INODE_EVENT(xfs_file_fsync);
  765. DEFINE_INODE_EVENT(xfs_destroy_inode);
  766. DEFINE_INODE_EVENT(xfs_update_time);
  767. DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
  768. DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
  769. DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag);
  770. DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag);
  771. DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid);
  772. DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag);
  773. DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
  774. DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
  775. DEFINE_INODE_EVENT(xfs_inode_set_reclaimable);
  776. DEFINE_INODE_EVENT(xfs_inode_reclaiming);
  777. DEFINE_INODE_EVENT(xfs_inode_set_need_inactive);
  778. DEFINE_INODE_EVENT(xfs_inode_inactivating);
  779. /*
  780. * ftrace's __print_symbolic requires that all enum values be wrapped in the
  781. * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
  782. * ring buffer. Somehow this was only worth mentioning in the ftrace sample
  783. * code.
  784. */
  785. TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED);
  786. TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW);
  787. TRACE_EVENT(xfs_filemap_fault,
  788. TP_PROTO(struct xfs_inode *ip, unsigned int order, bool write_fault),
  789. TP_ARGS(ip, order, write_fault),
  790. TP_STRUCT__entry(
  791. __field(dev_t, dev)
  792. __field(xfs_ino_t, ino)
  793. __field(unsigned int, order)
  794. __field(bool, write_fault)
  795. ),
  796. TP_fast_assign(
  797. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  798. __entry->ino = ip->i_ino;
  799. __entry->order = order;
  800. __entry->write_fault = write_fault;
  801. ),
  802. TP_printk("dev %d:%d ino 0x%llx order %u write_fault %d",
  803. MAJOR(__entry->dev), MINOR(__entry->dev),
  804. __entry->ino,
  805. __entry->order,
  806. __entry->write_fault)
  807. )
  808. DECLARE_EVENT_CLASS(xfs_iref_class,
  809. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
  810. TP_ARGS(ip, caller_ip),
  811. TP_STRUCT__entry(
  812. __field(dev_t, dev)
  813. __field(xfs_ino_t, ino)
  814. __field(int, count)
  815. __field(int, pincount)
  816. __field(unsigned long, caller_ip)
  817. ),
  818. TP_fast_assign(
  819. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  820. __entry->ino = ip->i_ino;
  821. __entry->count = atomic_read(&VFS_I(ip)->i_count);
  822. __entry->pincount = atomic_read(&ip->i_pincount);
  823. __entry->caller_ip = caller_ip;
  824. ),
  825. TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pS",
  826. MAJOR(__entry->dev), MINOR(__entry->dev),
  827. __entry->ino,
  828. __entry->count,
  829. __entry->pincount,
  830. (char *)__entry->caller_ip)
  831. )
  832. TRACE_EVENT(xfs_iomap_prealloc_size,
  833. TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t blocks, int shift,
  834. unsigned int writeio_blocks),
  835. TP_ARGS(ip, blocks, shift, writeio_blocks),
  836. TP_STRUCT__entry(
  837. __field(dev_t, dev)
  838. __field(xfs_ino_t, ino)
  839. __field(xfs_fsblock_t, blocks)
  840. __field(int, shift)
  841. __field(unsigned int, writeio_blocks)
  842. ),
  843. TP_fast_assign(
  844. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  845. __entry->ino = ip->i_ino;
  846. __entry->blocks = blocks;
  847. __entry->shift = shift;
  848. __entry->writeio_blocks = writeio_blocks;
  849. ),
  850. TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d "
  851. "m_allocsize_blocks %u",
  852. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
  853. __entry->blocks, __entry->shift, __entry->writeio_blocks)
  854. )
  855. TRACE_EVENT(xfs_irec_merge_pre,
  856. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
  857. uint16_t holemask, xfs_agino_t nagino, uint16_t nholemask),
  858. TP_ARGS(mp, agno, agino, holemask, nagino, nholemask),
  859. TP_STRUCT__entry(
  860. __field(dev_t, dev)
  861. __field(xfs_agnumber_t, agno)
  862. __field(xfs_agino_t, agino)
  863. __field(uint16_t, holemask)
  864. __field(xfs_agino_t, nagino)
  865. __field(uint16_t, nholemask)
  866. ),
  867. TP_fast_assign(
  868. __entry->dev = mp->m_super->s_dev;
  869. __entry->agno = agno;
  870. __entry->agino = agino;
  871. __entry->holemask = holemask;
  872. __entry->nagino = nagino;
  873. __entry->nholemask = holemask;
  874. ),
  875. TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x new_agino 0x%x new_holemask 0x%x",
  876. MAJOR(__entry->dev), MINOR(__entry->dev),
  877. __entry->agno,
  878. __entry->agino,
  879. __entry->holemask,
  880. __entry->nagino,
  881. __entry->nholemask)
  882. )
  883. TRACE_EVENT(xfs_irec_merge_post,
  884. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
  885. uint16_t holemask),
  886. TP_ARGS(mp, agno, agino, holemask),
  887. TP_STRUCT__entry(
  888. __field(dev_t, dev)
  889. __field(xfs_agnumber_t, agno)
  890. __field(xfs_agino_t, agino)
  891. __field(uint16_t, holemask)
  892. ),
  893. TP_fast_assign(
  894. __entry->dev = mp->m_super->s_dev;
  895. __entry->agno = agno;
  896. __entry->agino = agino;
  897. __entry->holemask = holemask;
  898. ),
  899. TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x",
  900. MAJOR(__entry->dev),
  901. MINOR(__entry->dev),
  902. __entry->agno,
  903. __entry->agino,
  904. __entry->holemask)
  905. )
  906. #define DEFINE_IREF_EVENT(name) \
  907. DEFINE_EVENT(xfs_iref_class, name, \
  908. TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
  909. TP_ARGS(ip, caller_ip))
  910. DEFINE_IREF_EVENT(xfs_irele);
  911. DEFINE_IREF_EVENT(xfs_inode_pin);
  912. DEFINE_IREF_EVENT(xfs_inode_unpin);
  913. DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
  914. DECLARE_EVENT_CLASS(xfs_namespace_class,
  915. TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name),
  916. TP_ARGS(dp, name),
  917. TP_STRUCT__entry(
  918. __field(dev_t, dev)
  919. __field(xfs_ino_t, dp_ino)
  920. __field(int, namelen)
  921. __dynamic_array(char, name, name->len)
  922. ),
  923. TP_fast_assign(
  924. __entry->dev = VFS_I(dp)->i_sb->s_dev;
  925. __entry->dp_ino = dp->i_ino;
  926. __entry->namelen = name->len;
  927. memcpy(__get_str(name), name->name, name->len);
  928. ),
  929. TP_printk("dev %d:%d dp ino 0x%llx name %.*s",
  930. MAJOR(__entry->dev), MINOR(__entry->dev),
  931. __entry->dp_ino,
  932. __entry->namelen,
  933. __get_str(name))
  934. )
  935. #define DEFINE_NAMESPACE_EVENT(name) \
  936. DEFINE_EVENT(xfs_namespace_class, name, \
  937. TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name), \
  938. TP_ARGS(dp, name))
  939. DEFINE_NAMESPACE_EVENT(xfs_remove);
  940. DEFINE_NAMESPACE_EVENT(xfs_link);
  941. DEFINE_NAMESPACE_EVENT(xfs_lookup);
  942. DEFINE_NAMESPACE_EVENT(xfs_create);
  943. DEFINE_NAMESPACE_EVENT(xfs_symlink);
  944. TRACE_EVENT(xfs_rename,
  945. TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
  946. struct xfs_name *src_name, struct xfs_name *target_name),
  947. TP_ARGS(src_dp, target_dp, src_name, target_name),
  948. TP_STRUCT__entry(
  949. __field(dev_t, dev)
  950. __field(xfs_ino_t, src_dp_ino)
  951. __field(xfs_ino_t, target_dp_ino)
  952. __field(int, src_namelen)
  953. __field(int, target_namelen)
  954. __dynamic_array(char, src_name, src_name->len)
  955. __dynamic_array(char, target_name, target_name->len)
  956. ),
  957. TP_fast_assign(
  958. __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
  959. __entry->src_dp_ino = src_dp->i_ino;
  960. __entry->target_dp_ino = target_dp->i_ino;
  961. __entry->src_namelen = src_name->len;
  962. __entry->target_namelen = target_name->len;
  963. memcpy(__get_str(src_name), src_name->name, src_name->len);
  964. memcpy(__get_str(target_name), target_name->name,
  965. target_name->len);
  966. ),
  967. TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
  968. " src name %.*s target name %.*s",
  969. MAJOR(__entry->dev), MINOR(__entry->dev),
  970. __entry->src_dp_ino,
  971. __entry->target_dp_ino,
  972. __entry->src_namelen,
  973. __get_str(src_name),
  974. __entry->target_namelen,
  975. __get_str(target_name))
  976. )
  977. DECLARE_EVENT_CLASS(xfs_dquot_class,
  978. TP_PROTO(struct xfs_dquot *dqp),
  979. TP_ARGS(dqp),
  980. TP_STRUCT__entry(
  981. __field(dev_t, dev)
  982. __field(u32, id)
  983. __field(xfs_dqtype_t, type)
  984. __field(unsigned, flags)
  985. __field(unsigned, nrefs)
  986. __field(unsigned long long, res_bcount)
  987. __field(unsigned long long, res_rtbcount)
  988. __field(unsigned long long, res_icount)
  989. __field(unsigned long long, bcount)
  990. __field(unsigned long long, rtbcount)
  991. __field(unsigned long long, icount)
  992. __field(unsigned long long, blk_hardlimit)
  993. __field(unsigned long long, blk_softlimit)
  994. __field(unsigned long long, rtb_hardlimit)
  995. __field(unsigned long long, rtb_softlimit)
  996. __field(unsigned long long, ino_hardlimit)
  997. __field(unsigned long long, ino_softlimit)
  998. ),
  999. TP_fast_assign(
  1000. __entry->dev = dqp->q_mount->m_super->s_dev;
  1001. __entry->id = dqp->q_id;
  1002. __entry->type = dqp->q_type;
  1003. __entry->flags = dqp->q_flags;
  1004. __entry->nrefs = dqp->q_nrefs;
  1005. __entry->res_bcount = dqp->q_blk.reserved;
  1006. __entry->res_rtbcount = dqp->q_rtb.reserved;
  1007. __entry->res_icount = dqp->q_ino.reserved;
  1008. __entry->bcount = dqp->q_blk.count;
  1009. __entry->rtbcount = dqp->q_rtb.count;
  1010. __entry->icount = dqp->q_ino.count;
  1011. __entry->blk_hardlimit = dqp->q_blk.hardlimit;
  1012. __entry->blk_softlimit = dqp->q_blk.softlimit;
  1013. __entry->rtb_hardlimit = dqp->q_rtb.hardlimit;
  1014. __entry->rtb_softlimit = dqp->q_rtb.softlimit;
  1015. __entry->ino_hardlimit = dqp->q_ino.hardlimit;
  1016. __entry->ino_softlimit = dqp->q_ino.softlimit;
  1017. ),
  1018. TP_printk("dev %d:%d id 0x%x type %s flags %s nrefs %u "
  1019. "res_bc 0x%llx res_rtbc 0x%llx res_ic 0x%llx "
  1020. "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx "
  1021. "rtbcnt 0x%llx rtbhardlimit 0x%llx rtbsoftlimit 0x%llx "
  1022. "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]",
  1023. MAJOR(__entry->dev), MINOR(__entry->dev),
  1024. __entry->id,
  1025. __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
  1026. __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
  1027. __entry->nrefs,
  1028. __entry->res_bcount,
  1029. __entry->res_rtbcount,
  1030. __entry->res_icount,
  1031. __entry->bcount,
  1032. __entry->blk_hardlimit,
  1033. __entry->blk_softlimit,
  1034. __entry->rtbcount,
  1035. __entry->rtb_hardlimit,
  1036. __entry->rtb_softlimit,
  1037. __entry->icount,
  1038. __entry->ino_hardlimit,
  1039. __entry->ino_softlimit)
  1040. )
  1041. #define DEFINE_DQUOT_EVENT(name) \
  1042. DEFINE_EVENT(xfs_dquot_class, name, \
  1043. TP_PROTO(struct xfs_dquot *dqp), \
  1044. TP_ARGS(dqp))
  1045. DEFINE_DQUOT_EVENT(xfs_dqadjust);
  1046. DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
  1047. DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
  1048. DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy);
  1049. DEFINE_DQUOT_EVENT(xfs_dqreclaim_done);
  1050. DEFINE_DQUOT_EVENT(xfs_dqattach_found);
  1051. DEFINE_DQUOT_EVENT(xfs_dqattach_get);
  1052. DEFINE_DQUOT_EVENT(xfs_dqalloc);
  1053. DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
  1054. DEFINE_DQUOT_EVENT(xfs_dqread);
  1055. DEFINE_DQUOT_EVENT(xfs_dqread_fail);
  1056. DEFINE_DQUOT_EVENT(xfs_dqget_hit);
  1057. DEFINE_DQUOT_EVENT(xfs_dqget_miss);
  1058. DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
  1059. DEFINE_DQUOT_EVENT(xfs_dqget_dup);
  1060. DEFINE_DQUOT_EVENT(xfs_dqput);
  1061. DEFINE_DQUOT_EVENT(xfs_dqput_free);
  1062. DEFINE_DQUOT_EVENT(xfs_dqrele);
  1063. DEFINE_DQUOT_EVENT(xfs_dqflush);
  1064. DEFINE_DQUOT_EVENT(xfs_dqflush_force);
  1065. DEFINE_DQUOT_EVENT(xfs_dqflush_done);
  1066. DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_before);
  1067. DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_after);
  1068. TRACE_EVENT(xfs_trans_mod_dquot,
  1069. TP_PROTO(struct xfs_trans *tp, struct xfs_dquot *dqp,
  1070. unsigned int field, int64_t delta),
  1071. TP_ARGS(tp, dqp, field, delta),
  1072. TP_STRUCT__entry(
  1073. __field(dev_t, dev)
  1074. __field(xfs_dqtype_t, type)
  1075. __field(unsigned int, flags)
  1076. __field(unsigned int, dqid)
  1077. __field(unsigned int, field)
  1078. __field(int64_t, delta)
  1079. ),
  1080. TP_fast_assign(
  1081. __entry->dev = tp->t_mountp->m_super->s_dev;
  1082. __entry->type = dqp->q_type;
  1083. __entry->flags = dqp->q_flags;
  1084. __entry->dqid = dqp->q_id;
  1085. __entry->field = field;
  1086. __entry->delta = delta;
  1087. ),
  1088. TP_printk("dev %d:%d dquot id 0x%x type %s flags %s field %s delta %lld",
  1089. MAJOR(__entry->dev), MINOR(__entry->dev),
  1090. __entry->dqid,
  1091. __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
  1092. __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
  1093. __print_flags(__entry->field, "|", XFS_QMOPT_FLAGS),
  1094. __entry->delta)
  1095. );
  1096. DECLARE_EVENT_CLASS(xfs_dqtrx_class,
  1097. TP_PROTO(struct xfs_dqtrx *qtrx),
  1098. TP_ARGS(qtrx),
  1099. TP_STRUCT__entry(
  1100. __field(dev_t, dev)
  1101. __field(xfs_dqtype_t, type)
  1102. __field(unsigned int, flags)
  1103. __field(u32, dqid)
  1104. __field(uint64_t, blk_res)
  1105. __field(int64_t, bcount_delta)
  1106. __field(int64_t, delbcnt_delta)
  1107. __field(uint64_t, rtblk_res)
  1108. __field(uint64_t, rtblk_res_used)
  1109. __field(int64_t, rtbcount_delta)
  1110. __field(int64_t, delrtb_delta)
  1111. __field(uint64_t, ino_res)
  1112. __field(uint64_t, ino_res_used)
  1113. __field(int64_t, icount_delta)
  1114. ),
  1115. TP_fast_assign(
  1116. __entry->dev = qtrx->qt_dquot->q_mount->m_super->s_dev;
  1117. __entry->type = qtrx->qt_dquot->q_type;
  1118. __entry->flags = qtrx->qt_dquot->q_flags;
  1119. __entry->dqid = qtrx->qt_dquot->q_id;
  1120. __entry->blk_res = qtrx->qt_blk_res;
  1121. __entry->bcount_delta = qtrx->qt_bcount_delta;
  1122. __entry->delbcnt_delta = qtrx->qt_delbcnt_delta;
  1123. __entry->rtblk_res = qtrx->qt_rtblk_res;
  1124. __entry->rtblk_res_used = qtrx->qt_rtblk_res_used;
  1125. __entry->rtbcount_delta = qtrx->qt_rtbcount_delta;
  1126. __entry->delrtb_delta = qtrx->qt_delrtb_delta;
  1127. __entry->ino_res = qtrx->qt_ino_res;
  1128. __entry->ino_res_used = qtrx->qt_ino_res_used;
  1129. __entry->icount_delta = qtrx->qt_icount_delta;
  1130. ),
  1131. TP_printk("dev %d:%d dquot id 0x%x type %s flags %s "
  1132. "blk_res %llu bcount_delta %lld delbcnt_delta %lld "
  1133. "rtblk_res %llu rtblk_res_used %llu rtbcount_delta %lld delrtb_delta %lld "
  1134. "ino_res %llu ino_res_used %llu icount_delta %lld",
  1135. MAJOR(__entry->dev), MINOR(__entry->dev),
  1136. __entry->dqid,
  1137. __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS),
  1138. __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS),
  1139. __entry->blk_res,
  1140. __entry->bcount_delta,
  1141. __entry->delbcnt_delta,
  1142. __entry->rtblk_res,
  1143. __entry->rtblk_res_used,
  1144. __entry->rtbcount_delta,
  1145. __entry->delrtb_delta,
  1146. __entry->ino_res,
  1147. __entry->ino_res_used,
  1148. __entry->icount_delta)
  1149. )
  1150. #define DEFINE_DQTRX_EVENT(name) \
  1151. DEFINE_EVENT(xfs_dqtrx_class, name, \
  1152. TP_PROTO(struct xfs_dqtrx *qtrx), \
  1153. TP_ARGS(qtrx))
  1154. DEFINE_DQTRX_EVENT(xfs_trans_apply_dquot_deltas);
  1155. DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_before);
  1156. DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_after);
  1157. DECLARE_EVENT_CLASS(xfs_loggrant_class,
  1158. TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
  1159. TP_ARGS(log, tic),
  1160. TP_STRUCT__entry(
  1161. __field(dev_t, dev)
  1162. __field(unsigned long, tic)
  1163. __field(char, ocnt)
  1164. __field(char, cnt)
  1165. __field(int, curr_res)
  1166. __field(int, unit_res)
  1167. __field(unsigned int, flags)
  1168. __field(int, reserveq)
  1169. __field(int, writeq)
  1170. __field(uint64_t, grant_reserve_bytes)
  1171. __field(uint64_t, grant_write_bytes)
  1172. __field(uint64_t, tail_space)
  1173. __field(int, curr_cycle)
  1174. __field(int, curr_block)
  1175. __field(xfs_lsn_t, tail_lsn)
  1176. ),
  1177. TP_fast_assign(
  1178. __entry->dev = log->l_mp->m_super->s_dev;
  1179. __entry->tic = (unsigned long)tic;
  1180. __entry->ocnt = tic->t_ocnt;
  1181. __entry->cnt = tic->t_cnt;
  1182. __entry->curr_res = tic->t_curr_res;
  1183. __entry->unit_res = tic->t_unit_res;
  1184. __entry->flags = tic->t_flags;
  1185. __entry->reserveq = list_empty(&log->l_reserve_head.waiters);
  1186. __entry->writeq = list_empty(&log->l_write_head.waiters);
  1187. __entry->tail_space = READ_ONCE(log->l_tail_space);
  1188. __entry->grant_reserve_bytes = __entry->tail_space +
  1189. atomic64_read(&log->l_reserve_head.grant);
  1190. __entry->grant_write_bytes = __entry->tail_space +
  1191. atomic64_read(&log->l_write_head.grant);
  1192. __entry->curr_cycle = log->l_curr_cycle;
  1193. __entry->curr_block = log->l_curr_block;
  1194. __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
  1195. ),
  1196. TP_printk("dev %d:%d tic 0x%lx t_ocnt %u t_cnt %u t_curr_res %u "
  1197. "t_unit_res %u t_flags %s reserveq %s writeq %s "
  1198. "tail space %llu grant_reserve_bytes %llu "
  1199. "grant_write_bytes %llu curr_cycle %d curr_block %d "
  1200. "tail_cycle %d tail_block %d",
  1201. MAJOR(__entry->dev), MINOR(__entry->dev),
  1202. __entry->tic,
  1203. __entry->ocnt,
  1204. __entry->cnt,
  1205. __entry->curr_res,
  1206. __entry->unit_res,
  1207. __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
  1208. __entry->reserveq ? "empty" : "active",
  1209. __entry->writeq ? "empty" : "active",
  1210. __entry->tail_space,
  1211. __entry->grant_reserve_bytes,
  1212. __entry->grant_write_bytes,
  1213. __entry->curr_cycle,
  1214. __entry->curr_block,
  1215. CYCLE_LSN(__entry->tail_lsn),
  1216. BLOCK_LSN(__entry->tail_lsn)
  1217. )
  1218. )
  1219. #define DEFINE_LOGGRANT_EVENT(name) \
  1220. DEFINE_EVENT(xfs_loggrant_class, name, \
  1221. TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
  1222. TP_ARGS(log, tic))
  1223. DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
  1224. DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
  1225. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
  1226. DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
  1227. DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
  1228. DEFINE_LOGGRANT_EVENT(xfs_log_reserve_exit);
  1229. DEFINE_LOGGRANT_EVENT(xfs_log_regrant);
  1230. DEFINE_LOGGRANT_EVENT(xfs_log_regrant_exit);
  1231. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant);
  1232. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_exit);
  1233. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_sub);
  1234. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant);
  1235. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_sub);
  1236. DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_exit);
  1237. DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait);
  1238. DEFINE_LOGGRANT_EVENT(xfs_log_cil_return);
  1239. DECLARE_EVENT_CLASS(xfs_log_item_class,
  1240. TP_PROTO(struct xfs_log_item *lip),
  1241. TP_ARGS(lip),
  1242. TP_STRUCT__entry(
  1243. __field(dev_t, dev)
  1244. __field(void *, lip)
  1245. __field(uint, type)
  1246. __field(unsigned long, flags)
  1247. __field(xfs_lsn_t, lsn)
  1248. ),
  1249. TP_fast_assign(
  1250. __entry->dev = lip->li_log->l_mp->m_super->s_dev;
  1251. __entry->lip = lip;
  1252. __entry->type = lip->li_type;
  1253. __entry->flags = lip->li_flags;
  1254. __entry->lsn = lip->li_lsn;
  1255. ),
  1256. TP_printk("dev %d:%d lip %p lsn %d/%d type %s flags %s",
  1257. MAJOR(__entry->dev), MINOR(__entry->dev),
  1258. __entry->lip,
  1259. CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
  1260. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1261. __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
  1262. )
  1263. TRACE_EVENT(xfs_log_force,
  1264. TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn, unsigned long caller_ip),
  1265. TP_ARGS(mp, lsn, caller_ip),
  1266. TP_STRUCT__entry(
  1267. __field(dev_t, dev)
  1268. __field(xfs_lsn_t, lsn)
  1269. __field(unsigned long, caller_ip)
  1270. ),
  1271. TP_fast_assign(
  1272. __entry->dev = mp->m_super->s_dev;
  1273. __entry->lsn = lsn;
  1274. __entry->caller_ip = caller_ip;
  1275. ),
  1276. TP_printk("dev %d:%d lsn 0x%llx caller %pS",
  1277. MAJOR(__entry->dev), MINOR(__entry->dev),
  1278. __entry->lsn, (void *)__entry->caller_ip)
  1279. )
  1280. #define DEFINE_LOG_ITEM_EVENT(name) \
  1281. DEFINE_EVENT(xfs_log_item_class, name, \
  1282. TP_PROTO(struct xfs_log_item *lip), \
  1283. TP_ARGS(lip))
  1284. DEFINE_LOG_ITEM_EVENT(xfs_ail_push);
  1285. DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned);
  1286. DEFINE_LOG_ITEM_EVENT(xfs_ail_locked);
  1287. DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing);
  1288. DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_mark);
  1289. DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_skip);
  1290. DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_unpin);
  1291. DECLARE_EVENT_CLASS(xfs_ail_class,
  1292. TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn),
  1293. TP_ARGS(lip, old_lsn, new_lsn),
  1294. TP_STRUCT__entry(
  1295. __field(dev_t, dev)
  1296. __field(void *, lip)
  1297. __field(uint, type)
  1298. __field(unsigned long, flags)
  1299. __field(xfs_lsn_t, old_lsn)
  1300. __field(xfs_lsn_t, new_lsn)
  1301. ),
  1302. TP_fast_assign(
  1303. __entry->dev = lip->li_log->l_mp->m_super->s_dev;
  1304. __entry->lip = lip;
  1305. __entry->type = lip->li_type;
  1306. __entry->flags = lip->li_flags;
  1307. __entry->old_lsn = old_lsn;
  1308. __entry->new_lsn = new_lsn;
  1309. ),
  1310. TP_printk("dev %d:%d lip %p old lsn %d/%d new lsn %d/%d type %s flags %s",
  1311. MAJOR(__entry->dev), MINOR(__entry->dev),
  1312. __entry->lip,
  1313. CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
  1314. CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
  1315. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  1316. __print_flags(__entry->flags, "|", XFS_LI_FLAGS))
  1317. )
  1318. #define DEFINE_AIL_EVENT(name) \
  1319. DEFINE_EVENT(xfs_ail_class, name, \
  1320. TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), \
  1321. TP_ARGS(lip, old_lsn, new_lsn))
  1322. DEFINE_AIL_EVENT(xfs_ail_insert);
  1323. DEFINE_AIL_EVENT(xfs_ail_move);
  1324. DEFINE_AIL_EVENT(xfs_ail_delete);
  1325. TRACE_EVENT(xfs_log_assign_tail_lsn,
  1326. TP_PROTO(struct xlog *log, xfs_lsn_t new_lsn),
  1327. TP_ARGS(log, new_lsn),
  1328. TP_STRUCT__entry(
  1329. __field(dev_t, dev)
  1330. __field(xfs_lsn_t, new_lsn)
  1331. __field(xfs_lsn_t, old_lsn)
  1332. __field(xfs_lsn_t, head_lsn)
  1333. ),
  1334. TP_fast_assign(
  1335. __entry->dev = log->l_mp->m_super->s_dev;
  1336. __entry->new_lsn = new_lsn;
  1337. __entry->old_lsn = atomic64_read(&log->l_tail_lsn);
  1338. __entry->head_lsn = log->l_ailp->ail_head_lsn;
  1339. ),
  1340. TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, head lsn %d/%d",
  1341. MAJOR(__entry->dev), MINOR(__entry->dev),
  1342. CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn),
  1343. CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
  1344. CYCLE_LSN(__entry->head_lsn), BLOCK_LSN(__entry->head_lsn))
  1345. )
  1346. DECLARE_EVENT_CLASS(xfs_file_class,
  1347. TP_PROTO(struct kiocb *iocb, struct iov_iter *iter),
  1348. TP_ARGS(iocb, iter),
  1349. TP_STRUCT__entry(
  1350. __field(dev_t, dev)
  1351. __field(xfs_ino_t, ino)
  1352. __field(xfs_fsize_t, size)
  1353. __field(loff_t, offset)
  1354. __field(size_t, count)
  1355. ),
  1356. TP_fast_assign(
  1357. __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
  1358. __entry->ino = XFS_I(file_inode(iocb->ki_filp))->i_ino;
  1359. __entry->size = XFS_I(file_inode(iocb->ki_filp))->i_disk_size;
  1360. __entry->offset = iocb->ki_pos;
  1361. __entry->count = iov_iter_count(iter);
  1362. ),
  1363. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx",
  1364. MAJOR(__entry->dev), MINOR(__entry->dev),
  1365. __entry->ino,
  1366. __entry->size,
  1367. __entry->offset,
  1368. __entry->count)
  1369. )
  1370. #define DEFINE_RW_EVENT(name) \
  1371. DEFINE_EVENT(xfs_file_class, name, \
  1372. TP_PROTO(struct kiocb *iocb, struct iov_iter *iter), \
  1373. TP_ARGS(iocb, iter))
  1374. DEFINE_RW_EVENT(xfs_file_buffered_read);
  1375. DEFINE_RW_EVENT(xfs_file_direct_read);
  1376. DEFINE_RW_EVENT(xfs_file_dax_read);
  1377. DEFINE_RW_EVENT(xfs_file_buffered_write);
  1378. DEFINE_RW_EVENT(xfs_file_direct_write);
  1379. DEFINE_RW_EVENT(xfs_file_dax_write);
  1380. DEFINE_RW_EVENT(xfs_reflink_bounce_dio_write);
  1381. DECLARE_EVENT_CLASS(xfs_imap_class,
  1382. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
  1383. int whichfork, struct xfs_bmbt_irec *irec),
  1384. TP_ARGS(ip, offset, count, whichfork, irec),
  1385. TP_STRUCT__entry(
  1386. __field(dev_t, dev)
  1387. __field(xfs_ino_t, ino)
  1388. __field(loff_t, size)
  1389. __field(loff_t, offset)
  1390. __field(size_t, count)
  1391. __field(int, whichfork)
  1392. __field(xfs_fileoff_t, startoff)
  1393. __field(xfs_fsblock_t, startblock)
  1394. __field(xfs_filblks_t, blockcount)
  1395. ),
  1396. TP_fast_assign(
  1397. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1398. __entry->ino = ip->i_ino;
  1399. __entry->size = ip->i_disk_size;
  1400. __entry->offset = offset;
  1401. __entry->count = count;
  1402. __entry->whichfork = whichfork;
  1403. __entry->startoff = irec ? irec->br_startoff : 0;
  1404. __entry->startblock = irec ? irec->br_startblock : 0;
  1405. __entry->blockcount = irec ? irec->br_blockcount : 0;
  1406. ),
  1407. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx "
  1408. "fork %s startoff 0x%llx startblock 0x%llx fsbcount 0x%llx",
  1409. MAJOR(__entry->dev), MINOR(__entry->dev),
  1410. __entry->ino,
  1411. __entry->size,
  1412. __entry->offset,
  1413. __entry->count,
  1414. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  1415. __entry->startoff,
  1416. (int64_t)__entry->startblock,
  1417. __entry->blockcount)
  1418. )
  1419. #define DEFINE_IMAP_EVENT(name) \
  1420. DEFINE_EVENT(xfs_imap_class, name, \
  1421. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
  1422. int whichfork, struct xfs_bmbt_irec *irec), \
  1423. TP_ARGS(ip, offset, count, whichfork, irec))
  1424. DEFINE_IMAP_EVENT(xfs_map_blocks_found);
  1425. DEFINE_IMAP_EVENT(xfs_map_blocks_alloc);
  1426. DEFINE_IMAP_EVENT(xfs_iomap_alloc);
  1427. DEFINE_IMAP_EVENT(xfs_iomap_found);
  1428. DECLARE_EVENT_CLASS(xfs_simple_io_class,
  1429. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
  1430. TP_ARGS(ip, offset, count),
  1431. TP_STRUCT__entry(
  1432. __field(dev_t, dev)
  1433. __field(xfs_ino_t, ino)
  1434. __field(loff_t, isize)
  1435. __field(loff_t, disize)
  1436. __field(loff_t, offset)
  1437. __field(size_t, count)
  1438. ),
  1439. TP_fast_assign(
  1440. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1441. __entry->ino = ip->i_ino;
  1442. __entry->isize = VFS_I(ip)->i_size;
  1443. __entry->disize = ip->i_disk_size;
  1444. __entry->offset = offset;
  1445. __entry->count = count;
  1446. ),
  1447. TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
  1448. "pos 0x%llx bytecount 0x%zx",
  1449. MAJOR(__entry->dev), MINOR(__entry->dev),
  1450. __entry->ino,
  1451. __entry->isize,
  1452. __entry->disize,
  1453. __entry->offset,
  1454. __entry->count)
  1455. );
  1456. #define DEFINE_SIMPLE_IO_EVENT(name) \
  1457. DEFINE_EVENT(xfs_simple_io_class, name, \
  1458. TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
  1459. TP_ARGS(ip, offset, count))
  1460. DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
  1461. DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
  1462. DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
  1463. DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
  1464. DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
  1465. DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_unwritten);
  1466. DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_append);
  1467. DEFINE_SIMPLE_IO_EVENT(xfs_file_splice_read);
  1468. DECLARE_EVENT_CLASS(xfs_itrunc_class,
  1469. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
  1470. TP_ARGS(ip, new_size),
  1471. TP_STRUCT__entry(
  1472. __field(dev_t, dev)
  1473. __field(xfs_ino_t, ino)
  1474. __field(xfs_fsize_t, size)
  1475. __field(xfs_fsize_t, new_size)
  1476. ),
  1477. TP_fast_assign(
  1478. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1479. __entry->ino = ip->i_ino;
  1480. __entry->size = ip->i_disk_size;
  1481. __entry->new_size = new_size;
  1482. ),
  1483. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx new_size 0x%llx",
  1484. MAJOR(__entry->dev), MINOR(__entry->dev),
  1485. __entry->ino,
  1486. __entry->size,
  1487. __entry->new_size)
  1488. )
  1489. #define DEFINE_ITRUNC_EVENT(name) \
  1490. DEFINE_EVENT(xfs_itrunc_class, name, \
  1491. TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
  1492. TP_ARGS(ip, new_size))
  1493. DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start);
  1494. DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end);
  1495. TRACE_EVENT(xfs_pagecache_inval,
  1496. TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
  1497. TP_ARGS(ip, start, finish),
  1498. TP_STRUCT__entry(
  1499. __field(dev_t, dev)
  1500. __field(xfs_ino_t, ino)
  1501. __field(xfs_fsize_t, size)
  1502. __field(xfs_off_t, start)
  1503. __field(xfs_off_t, finish)
  1504. ),
  1505. TP_fast_assign(
  1506. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1507. __entry->ino = ip->i_ino;
  1508. __entry->size = ip->i_disk_size;
  1509. __entry->start = start;
  1510. __entry->finish = finish;
  1511. ),
  1512. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx start 0x%llx finish 0x%llx",
  1513. MAJOR(__entry->dev), MINOR(__entry->dev),
  1514. __entry->ino,
  1515. __entry->size,
  1516. __entry->start,
  1517. __entry->finish)
  1518. );
  1519. TRACE_EVENT(xfs_bunmap,
  1520. TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t fileoff, xfs_filblks_t len,
  1521. int flags, unsigned long caller_ip),
  1522. TP_ARGS(ip, fileoff, len, flags, caller_ip),
  1523. TP_STRUCT__entry(
  1524. __field(dev_t, dev)
  1525. __field(xfs_ino_t, ino)
  1526. __field(xfs_fsize_t, size)
  1527. __field(xfs_fileoff_t, fileoff)
  1528. __field(xfs_filblks_t, len)
  1529. __field(unsigned long, caller_ip)
  1530. __field(int, flags)
  1531. ),
  1532. TP_fast_assign(
  1533. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  1534. __entry->ino = ip->i_ino;
  1535. __entry->size = ip->i_disk_size;
  1536. __entry->fileoff = fileoff;
  1537. __entry->len = len;
  1538. __entry->caller_ip = caller_ip;
  1539. __entry->flags = flags;
  1540. ),
  1541. TP_printk("dev %d:%d ino 0x%llx disize 0x%llx fileoff 0x%llx fsbcount 0x%llx "
  1542. "flags %s caller %pS",
  1543. MAJOR(__entry->dev), MINOR(__entry->dev),
  1544. __entry->ino,
  1545. __entry->size,
  1546. __entry->fileoff,
  1547. __entry->len,
  1548. __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
  1549. (void *)__entry->caller_ip)
  1550. );
  1551. DECLARE_EVENT_CLASS(xfs_extent_busy_class,
  1552. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1553. xfs_agblock_t agbno, xfs_extlen_t len),
  1554. TP_ARGS(mp, agno, agbno, len),
  1555. TP_STRUCT__entry(
  1556. __field(dev_t, dev)
  1557. __field(xfs_agnumber_t, agno)
  1558. __field(xfs_agblock_t, agbno)
  1559. __field(xfs_extlen_t, len)
  1560. ),
  1561. TP_fast_assign(
  1562. __entry->dev = mp->m_super->s_dev;
  1563. __entry->agno = agno;
  1564. __entry->agbno = agbno;
  1565. __entry->len = len;
  1566. ),
  1567. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
  1568. MAJOR(__entry->dev), MINOR(__entry->dev),
  1569. __entry->agno,
  1570. __entry->agbno,
  1571. __entry->len)
  1572. );
  1573. #define DEFINE_BUSY_EVENT(name) \
  1574. DEFINE_EVENT(xfs_extent_busy_class, name, \
  1575. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  1576. xfs_agblock_t agbno, xfs_extlen_t len), \
  1577. TP_ARGS(mp, agno, agbno, len))
  1578. DEFINE_BUSY_EVENT(xfs_extent_busy);
  1579. DEFINE_BUSY_EVENT(xfs_extent_busy_force);
  1580. DEFINE_BUSY_EVENT(xfs_extent_busy_reuse);
  1581. DEFINE_BUSY_EVENT(xfs_extent_busy_clear);
  1582. TRACE_EVENT(xfs_extent_busy_trim,
  1583. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  1584. xfs_agblock_t agbno, xfs_extlen_t len,
  1585. xfs_agblock_t tbno, xfs_extlen_t tlen),
  1586. TP_ARGS(mp, agno, agbno, len, tbno, tlen),
  1587. TP_STRUCT__entry(
  1588. __field(dev_t, dev)
  1589. __field(xfs_agnumber_t, agno)
  1590. __field(xfs_agblock_t, agbno)
  1591. __field(xfs_extlen_t, len)
  1592. __field(xfs_agblock_t, tbno)
  1593. __field(xfs_extlen_t, tlen)
  1594. ),
  1595. TP_fast_assign(
  1596. __entry->dev = mp->m_super->s_dev;
  1597. __entry->agno = agno;
  1598. __entry->agbno = agbno;
  1599. __entry->len = len;
  1600. __entry->tbno = tbno;
  1601. __entry->tlen = tlen;
  1602. ),
  1603. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x",
  1604. MAJOR(__entry->dev), MINOR(__entry->dev),
  1605. __entry->agno,
  1606. __entry->agbno,
  1607. __entry->len,
  1608. __entry->tbno,
  1609. __entry->tlen)
  1610. );
  1611. DECLARE_EVENT_CLASS(xfs_agf_class,
  1612. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
  1613. unsigned long caller_ip),
  1614. TP_ARGS(mp, agf, flags, caller_ip),
  1615. TP_STRUCT__entry(
  1616. __field(dev_t, dev)
  1617. __field(xfs_agnumber_t, agno)
  1618. __field(int, flags)
  1619. __field(__u32, length)
  1620. __field(__u32, bno_root)
  1621. __field(__u32, cnt_root)
  1622. __field(__u32, bno_level)
  1623. __field(__u32, cnt_level)
  1624. __field(__u32, flfirst)
  1625. __field(__u32, fllast)
  1626. __field(__u32, flcount)
  1627. __field(__u32, freeblks)
  1628. __field(__u32, longest)
  1629. __field(unsigned long, caller_ip)
  1630. ),
  1631. TP_fast_assign(
  1632. __entry->dev = mp->m_super->s_dev;
  1633. __entry->agno = be32_to_cpu(agf->agf_seqno),
  1634. __entry->flags = flags;
  1635. __entry->length = be32_to_cpu(agf->agf_length),
  1636. __entry->bno_root = be32_to_cpu(agf->agf_bno_root),
  1637. __entry->cnt_root = be32_to_cpu(agf->agf_cnt_root),
  1638. __entry->bno_level = be32_to_cpu(agf->agf_bno_level),
  1639. __entry->cnt_level = be32_to_cpu(agf->agf_cnt_level),
  1640. __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
  1641. __entry->fllast = be32_to_cpu(agf->agf_fllast),
  1642. __entry->flcount = be32_to_cpu(agf->agf_flcount),
  1643. __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
  1644. __entry->longest = be32_to_cpu(agf->agf_longest);
  1645. __entry->caller_ip = caller_ip;
  1646. ),
  1647. TP_printk("dev %d:%d agno 0x%x flags %s length %u roots b %u c %u "
  1648. "levels b %u c %u flfirst %u fllast %u flcount %u "
  1649. "freeblks %u longest %u caller %pS",
  1650. MAJOR(__entry->dev), MINOR(__entry->dev),
  1651. __entry->agno,
  1652. __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
  1653. __entry->length,
  1654. __entry->bno_root,
  1655. __entry->cnt_root,
  1656. __entry->bno_level,
  1657. __entry->cnt_level,
  1658. __entry->flfirst,
  1659. __entry->fllast,
  1660. __entry->flcount,
  1661. __entry->freeblks,
  1662. __entry->longest,
  1663. (void *)__entry->caller_ip)
  1664. );
  1665. #define DEFINE_AGF_EVENT(name) \
  1666. DEFINE_EVENT(xfs_agf_class, name, \
  1667. TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
  1668. unsigned long caller_ip), \
  1669. TP_ARGS(mp, agf, flags, caller_ip))
  1670. DEFINE_AGF_EVENT(xfs_agf);
  1671. DEFINE_AGF_EVENT(xfs_agfl_reset);
  1672. TRACE_EVENT(xfs_free_extent,
  1673. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
  1674. xfs_extlen_t len, enum xfs_ag_resv_type resv, int haveleft,
  1675. int haveright),
  1676. TP_ARGS(mp, agno, agbno, len, resv, haveleft, haveright),
  1677. TP_STRUCT__entry(
  1678. __field(dev_t, dev)
  1679. __field(xfs_agnumber_t, agno)
  1680. __field(xfs_agblock_t, agbno)
  1681. __field(xfs_extlen_t, len)
  1682. __field(int, resv)
  1683. __field(int, haveleft)
  1684. __field(int, haveright)
  1685. ),
  1686. TP_fast_assign(
  1687. __entry->dev = mp->m_super->s_dev;
  1688. __entry->agno = agno;
  1689. __entry->agbno = agbno;
  1690. __entry->len = len;
  1691. __entry->resv = resv;
  1692. __entry->haveleft = haveleft;
  1693. __entry->haveright = haveright;
  1694. ),
  1695. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x resv %d %s",
  1696. MAJOR(__entry->dev), MINOR(__entry->dev),
  1697. __entry->agno,
  1698. __entry->agbno,
  1699. __entry->len,
  1700. __entry->resv,
  1701. __entry->haveleft ?
  1702. (__entry->haveright ? "both" : "left") :
  1703. (__entry->haveright ? "right" : "none"))
  1704. );
  1705. DECLARE_EVENT_CLASS(xfs_alloc_class,
  1706. TP_PROTO(struct xfs_alloc_arg *args),
  1707. TP_ARGS(args),
  1708. TP_STRUCT__entry(
  1709. __field(dev_t, dev)
  1710. __field(xfs_agnumber_t, agno)
  1711. __field(xfs_agblock_t, agbno)
  1712. __field(xfs_extlen_t, minlen)
  1713. __field(xfs_extlen_t, maxlen)
  1714. __field(xfs_extlen_t, mod)
  1715. __field(xfs_extlen_t, prod)
  1716. __field(xfs_extlen_t, minleft)
  1717. __field(xfs_extlen_t, total)
  1718. __field(xfs_extlen_t, alignment)
  1719. __field(xfs_extlen_t, minalignslop)
  1720. __field(xfs_extlen_t, len)
  1721. __field(char, wasdel)
  1722. __field(char, wasfromfl)
  1723. __field(int, resv)
  1724. __field(int, datatype)
  1725. __field(xfs_agnumber_t, highest_agno)
  1726. ),
  1727. TP_fast_assign(
  1728. __entry->dev = args->mp->m_super->s_dev;
  1729. __entry->agno = args->agno;
  1730. __entry->agbno = args->agbno;
  1731. __entry->minlen = args->minlen;
  1732. __entry->maxlen = args->maxlen;
  1733. __entry->mod = args->mod;
  1734. __entry->prod = args->prod;
  1735. __entry->minleft = args->minleft;
  1736. __entry->total = args->total;
  1737. __entry->alignment = args->alignment;
  1738. __entry->minalignslop = args->minalignslop;
  1739. __entry->len = args->len;
  1740. __entry->wasdel = args->wasdel;
  1741. __entry->wasfromfl = args->wasfromfl;
  1742. __entry->resv = args->resv;
  1743. __entry->datatype = args->datatype;
  1744. __entry->highest_agno = args->tp->t_highest_agno;
  1745. ),
  1746. TP_printk("dev %d:%d agno 0x%x agbno 0x%x minlen %u maxlen %u mod %u "
  1747. "prod %u minleft %u total %u alignment %u minalignslop %u "
  1748. "len %u wasdel %d wasfromfl %d resv %d "
  1749. "datatype 0x%x highest_agno 0x%x",
  1750. MAJOR(__entry->dev), MINOR(__entry->dev),
  1751. __entry->agno,
  1752. __entry->agbno,
  1753. __entry->minlen,
  1754. __entry->maxlen,
  1755. __entry->mod,
  1756. __entry->prod,
  1757. __entry->minleft,
  1758. __entry->total,
  1759. __entry->alignment,
  1760. __entry->minalignslop,
  1761. __entry->len,
  1762. __entry->wasdel,
  1763. __entry->wasfromfl,
  1764. __entry->resv,
  1765. __entry->datatype,
  1766. __entry->highest_agno)
  1767. )
  1768. #define DEFINE_ALLOC_EVENT(name) \
  1769. DEFINE_EVENT(xfs_alloc_class, name, \
  1770. TP_PROTO(struct xfs_alloc_arg *args), \
  1771. TP_ARGS(args))
  1772. DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
  1773. DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
  1774. DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
  1775. DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
  1776. DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
  1777. DEFINE_ALLOC_EVENT(xfs_alloc_cur);
  1778. DEFINE_ALLOC_EVENT(xfs_alloc_cur_right);
  1779. DEFINE_ALLOC_EVENT(xfs_alloc_cur_left);
  1780. DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup);
  1781. DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup_done);
  1782. DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
  1783. DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
  1784. DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
  1785. DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
  1786. DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
  1787. DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
  1788. DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
  1789. DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
  1790. DEFINE_ALLOC_EVENT(xfs_alloc_size_busy);
  1791. DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
  1792. DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
  1793. DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
  1794. DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
  1795. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
  1796. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_skip_deadlock);
  1797. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
  1798. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
  1799. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
  1800. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
  1801. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_this_ag);
  1802. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_start_ag);
  1803. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_first_ag);
  1804. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_exact_bno);
  1805. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_near_bno);
  1806. DEFINE_ALLOC_EVENT(xfs_alloc_vextent_finish);
  1807. TRACE_EVENT(xfs_alloc_cur_check,
  1808. TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t bno,
  1809. xfs_extlen_t len, xfs_extlen_t diff, bool new),
  1810. TP_ARGS(cur, bno, len, diff, new),
  1811. TP_STRUCT__entry(
  1812. __field(dev_t, dev)
  1813. __string(name, cur->bc_ops->name)
  1814. __field(xfs_agblock_t, bno)
  1815. __field(xfs_extlen_t, len)
  1816. __field(xfs_extlen_t, diff)
  1817. __field(bool, new)
  1818. ),
  1819. TP_fast_assign(
  1820. __entry->dev = cur->bc_mp->m_super->s_dev;
  1821. __assign_str(name);
  1822. __entry->bno = bno;
  1823. __entry->len = len;
  1824. __entry->diff = diff;
  1825. __entry->new = new;
  1826. ),
  1827. TP_printk("dev %d:%d %sbt agbno 0x%x fsbcount 0x%x diff 0x%x new %d",
  1828. MAJOR(__entry->dev), MINOR(__entry->dev),
  1829. __get_str(name),
  1830. __entry->bno, __entry->len, __entry->diff, __entry->new)
  1831. )
  1832. DECLARE_EVENT_CLASS(xfs_da_class,
  1833. TP_PROTO(struct xfs_da_args *args),
  1834. TP_ARGS(args),
  1835. TP_STRUCT__entry(
  1836. __field(dev_t, dev)
  1837. __field(xfs_ino_t, ino)
  1838. __dynamic_array(char, name, args->namelen)
  1839. __field(int, namelen)
  1840. __field(xfs_dahash_t, hashval)
  1841. __field(xfs_ino_t, inumber)
  1842. __field(uint32_t, op_flags)
  1843. __field(xfs_ino_t, owner)
  1844. ),
  1845. TP_fast_assign(
  1846. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1847. __entry->ino = args->dp->i_ino;
  1848. if (args->namelen)
  1849. memcpy(__get_str(name), args->name, args->namelen);
  1850. __entry->namelen = args->namelen;
  1851. __entry->hashval = args->hashval;
  1852. __entry->inumber = args->inumber;
  1853. __entry->op_flags = args->op_flags;
  1854. __entry->owner = args->owner;
  1855. ),
  1856. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
  1857. "inumber 0x%llx op_flags %s owner 0x%llx",
  1858. MAJOR(__entry->dev), MINOR(__entry->dev),
  1859. __entry->ino,
  1860. __entry->namelen,
  1861. __entry->namelen ? __get_str(name) : NULL,
  1862. __entry->namelen,
  1863. __entry->hashval,
  1864. __entry->inumber,
  1865. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  1866. __entry->owner)
  1867. )
  1868. #define DEFINE_DIR2_EVENT(name) \
  1869. DEFINE_EVENT(xfs_da_class, name, \
  1870. TP_PROTO(struct xfs_da_args *args), \
  1871. TP_ARGS(args))
  1872. DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
  1873. DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
  1874. DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
  1875. DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
  1876. DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
  1877. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
  1878. DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
  1879. DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
  1880. DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
  1881. DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
  1882. DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
  1883. DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
  1884. DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
  1885. DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
  1886. DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
  1887. DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
  1888. DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
  1889. DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
  1890. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
  1891. DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
  1892. DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
  1893. DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
  1894. DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
  1895. DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
  1896. DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
  1897. DECLARE_EVENT_CLASS(xfs_attr_class,
  1898. TP_PROTO(struct xfs_da_args *args),
  1899. TP_ARGS(args),
  1900. TP_STRUCT__entry(
  1901. __field(dev_t, dev)
  1902. __field(xfs_ino_t, ino)
  1903. __dynamic_array(char, name, args->namelen)
  1904. __field(int, namelen)
  1905. __field(int, valuelen)
  1906. __field(xfs_dahash_t, hashval)
  1907. __field(unsigned int, attr_filter)
  1908. __field(uint32_t, op_flags)
  1909. ),
  1910. TP_fast_assign(
  1911. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  1912. __entry->ino = args->dp->i_ino;
  1913. if (args->namelen)
  1914. memcpy(__get_str(name), args->name, args->namelen);
  1915. __entry->namelen = args->namelen;
  1916. __entry->valuelen = args->valuelen;
  1917. __entry->hashval = args->hashval;
  1918. __entry->attr_filter = args->attr_filter;
  1919. __entry->op_flags = args->op_flags;
  1920. ),
  1921. TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d valuelen %d "
  1922. "hashval 0x%x filter %s op_flags %s",
  1923. MAJOR(__entry->dev), MINOR(__entry->dev),
  1924. __entry->ino,
  1925. __entry->namelen,
  1926. __entry->namelen ? __get_str(name) : NULL,
  1927. __entry->namelen,
  1928. __entry->valuelen,
  1929. __entry->hashval,
  1930. __print_flags(__entry->attr_filter, "|",
  1931. XFS_ATTR_FILTER_FLAGS),
  1932. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
  1933. )
  1934. #define DEFINE_ATTR_EVENT(name) \
  1935. DEFINE_EVENT(xfs_attr_class, name, \
  1936. TP_PROTO(struct xfs_da_args *args), \
  1937. TP_ARGS(args))
  1938. DEFINE_ATTR_EVENT(xfs_attr_sf_add);
  1939. DEFINE_ATTR_EVENT(xfs_attr_sf_addname);
  1940. DEFINE_ATTR_EVENT(xfs_attr_sf_create);
  1941. DEFINE_ATTR_EVENT(xfs_attr_sf_lookup);
  1942. DEFINE_ATTR_EVENT(xfs_attr_sf_remove);
  1943. DEFINE_ATTR_EVENT(xfs_attr_sf_to_leaf);
  1944. DEFINE_ATTR_EVENT(xfs_attr_leaf_add);
  1945. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_old);
  1946. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_new);
  1947. DEFINE_ATTR_EVENT(xfs_attr_leaf_add_work);
  1948. DEFINE_ATTR_EVENT(xfs_attr_leaf_create);
  1949. DEFINE_ATTR_EVENT(xfs_attr_leaf_compact);
  1950. DEFINE_ATTR_EVENT(xfs_attr_leaf_get);
  1951. DEFINE_ATTR_EVENT(xfs_attr_leaf_lookup);
  1952. DEFINE_ATTR_EVENT(xfs_attr_leaf_replace);
  1953. DEFINE_ATTR_EVENT(xfs_attr_leaf_remove);
  1954. DEFINE_ATTR_EVENT(xfs_attr_leaf_removename);
  1955. DEFINE_ATTR_EVENT(xfs_attr_leaf_split);
  1956. DEFINE_ATTR_EVENT(xfs_attr_leaf_split_before);
  1957. DEFINE_ATTR_EVENT(xfs_attr_leaf_split_after);
  1958. DEFINE_ATTR_EVENT(xfs_attr_leaf_clearflag);
  1959. DEFINE_ATTR_EVENT(xfs_attr_leaf_setflag);
  1960. DEFINE_ATTR_EVENT(xfs_attr_leaf_flipflags);
  1961. DEFINE_ATTR_EVENT(xfs_attr_leaf_to_sf);
  1962. DEFINE_ATTR_EVENT(xfs_attr_leaf_to_node);
  1963. DEFINE_ATTR_EVENT(xfs_attr_leaf_rebalance);
  1964. DEFINE_ATTR_EVENT(xfs_attr_leaf_unbalance);
  1965. DEFINE_ATTR_EVENT(xfs_attr_leaf_toosmall);
  1966. DEFINE_ATTR_EVENT(xfs_attr_node_addname);
  1967. DEFINE_ATTR_EVENT(xfs_attr_node_get);
  1968. DEFINE_ATTR_EVENT(xfs_attr_node_replace);
  1969. DEFINE_ATTR_EVENT(xfs_attr_node_removename);
  1970. DEFINE_ATTR_EVENT(xfs_attr_fillstate);
  1971. DEFINE_ATTR_EVENT(xfs_attr_refillstate);
  1972. DEFINE_ATTR_EVENT(xfs_attr_rmtval_get);
  1973. DEFINE_ATTR_EVENT(xfs_attr_rmtval_set);
  1974. #define DEFINE_DA_EVENT(name) \
  1975. DEFINE_EVENT(xfs_da_class, name, \
  1976. TP_PROTO(struct xfs_da_args *args), \
  1977. TP_ARGS(args))
  1978. DEFINE_DA_EVENT(xfs_da_split);
  1979. DEFINE_DA_EVENT(xfs_da_join);
  1980. DEFINE_DA_EVENT(xfs_da_link_before);
  1981. DEFINE_DA_EVENT(xfs_da_link_after);
  1982. DEFINE_DA_EVENT(xfs_da_unlink_back);
  1983. DEFINE_DA_EVENT(xfs_da_unlink_forward);
  1984. DEFINE_DA_EVENT(xfs_da_root_split);
  1985. DEFINE_DA_EVENT(xfs_da_root_join);
  1986. DEFINE_DA_EVENT(xfs_da_node_add);
  1987. DEFINE_DA_EVENT(xfs_da_node_create);
  1988. DEFINE_DA_EVENT(xfs_da_node_split);
  1989. DEFINE_DA_EVENT(xfs_da_node_remove);
  1990. DEFINE_DA_EVENT(xfs_da_node_rebalance);
  1991. DEFINE_DA_EVENT(xfs_da_node_unbalance);
  1992. DEFINE_DA_EVENT(xfs_da_node_toosmall);
  1993. DEFINE_DA_EVENT(xfs_da_swap_lastblock);
  1994. DEFINE_DA_EVENT(xfs_da_grow_inode);
  1995. DEFINE_DA_EVENT(xfs_da_shrink_inode);
  1996. DEFINE_DA_EVENT(xfs_da_fixhashpath);
  1997. DEFINE_DA_EVENT(xfs_da_path_shift);
  1998. DECLARE_EVENT_CLASS(xfs_dir2_space_class,
  1999. TP_PROTO(struct xfs_da_args *args, int idx),
  2000. TP_ARGS(args, idx),
  2001. TP_STRUCT__entry(
  2002. __field(dev_t, dev)
  2003. __field(xfs_ino_t, ino)
  2004. __field(uint32_t, op_flags)
  2005. __field(int, idx)
  2006. ),
  2007. TP_fast_assign(
  2008. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  2009. __entry->ino = args->dp->i_ino;
  2010. __entry->op_flags = args->op_flags;
  2011. __entry->idx = idx;
  2012. ),
  2013. TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
  2014. MAJOR(__entry->dev), MINOR(__entry->dev),
  2015. __entry->ino,
  2016. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  2017. __entry->idx)
  2018. )
  2019. #define DEFINE_DIR2_SPACE_EVENT(name) \
  2020. DEFINE_EVENT(xfs_dir2_space_class, name, \
  2021. TP_PROTO(struct xfs_da_args *args, int idx), \
  2022. TP_ARGS(args, idx))
  2023. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
  2024. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
  2025. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
  2026. DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
  2027. TRACE_EVENT(xfs_dir2_leafn_moveents,
  2028. TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
  2029. TP_ARGS(args, src_idx, dst_idx, count),
  2030. TP_STRUCT__entry(
  2031. __field(dev_t, dev)
  2032. __field(xfs_ino_t, ino)
  2033. __field(uint32_t, op_flags)
  2034. __field(int, src_idx)
  2035. __field(int, dst_idx)
  2036. __field(int, count)
  2037. ),
  2038. TP_fast_assign(
  2039. __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
  2040. __entry->ino = args->dp->i_ino;
  2041. __entry->op_flags = args->op_flags;
  2042. __entry->src_idx = src_idx;
  2043. __entry->dst_idx = dst_idx;
  2044. __entry->count = count;
  2045. ),
  2046. TP_printk("dev %d:%d ino 0x%llx op_flags %s "
  2047. "src_idx %d dst_idx %d count %d",
  2048. MAJOR(__entry->dev), MINOR(__entry->dev),
  2049. __entry->ino,
  2050. __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
  2051. __entry->src_idx,
  2052. __entry->dst_idx,
  2053. __entry->count)
  2054. );
  2055. #define XFS_SWAPEXT_INODES \
  2056. { 0, "target" }, \
  2057. { 1, "temp" }
  2058. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_DEV);
  2059. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_LOCAL);
  2060. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_EXTENTS);
  2061. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_BTREE);
  2062. TRACE_DEFINE_ENUM(XFS_DINODE_FMT_UUID);
  2063. DECLARE_EVENT_CLASS(xfs_swap_extent_class,
  2064. TP_PROTO(struct xfs_inode *ip, int which),
  2065. TP_ARGS(ip, which),
  2066. TP_STRUCT__entry(
  2067. __field(dev_t, dev)
  2068. __field(int, which)
  2069. __field(xfs_ino_t, ino)
  2070. __field(int, format)
  2071. __field(xfs_extnum_t, nex)
  2072. __field(int, broot_size)
  2073. __field(int, fork_off)
  2074. ),
  2075. TP_fast_assign(
  2076. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  2077. __entry->which = which;
  2078. __entry->ino = ip->i_ino;
  2079. __entry->format = ip->i_df.if_format;
  2080. __entry->nex = ip->i_df.if_nextents;
  2081. __entry->broot_size = ip->i_df.if_broot_bytes;
  2082. __entry->fork_off = xfs_inode_fork_boff(ip);
  2083. ),
  2084. TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %llu, "
  2085. "broot size %d, forkoff 0x%x",
  2086. MAJOR(__entry->dev), MINOR(__entry->dev),
  2087. __entry->ino,
  2088. __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
  2089. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  2090. __entry->nex,
  2091. __entry->broot_size,
  2092. __entry->fork_off)
  2093. )
  2094. #define DEFINE_SWAPEXT_EVENT(name) \
  2095. DEFINE_EVENT(xfs_swap_extent_class, name, \
  2096. TP_PROTO(struct xfs_inode *ip, int which), \
  2097. TP_ARGS(ip, which))
  2098. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
  2099. DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
  2100. TRACE_EVENT(xfs_log_recover,
  2101. TP_PROTO(struct xlog *log, xfs_daddr_t headblk, xfs_daddr_t tailblk),
  2102. TP_ARGS(log, headblk, tailblk),
  2103. TP_STRUCT__entry(
  2104. __field(dev_t, dev)
  2105. __field(xfs_daddr_t, headblk)
  2106. __field(xfs_daddr_t, tailblk)
  2107. ),
  2108. TP_fast_assign(
  2109. __entry->dev = log->l_mp->m_super->s_dev;
  2110. __entry->headblk = headblk;
  2111. __entry->tailblk = tailblk;
  2112. ),
  2113. TP_printk("dev %d:%d headblk 0x%llx tailblk 0x%llx",
  2114. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->headblk,
  2115. __entry->tailblk)
  2116. )
  2117. TRACE_EVENT(xfs_log_recover_record,
  2118. TP_PROTO(struct xlog *log, struct xlog_rec_header *rhead, int pass),
  2119. TP_ARGS(log, rhead, pass),
  2120. TP_STRUCT__entry(
  2121. __field(dev_t, dev)
  2122. __field(xfs_lsn_t, lsn)
  2123. __field(int, len)
  2124. __field(int, num_logops)
  2125. __field(int, pass)
  2126. ),
  2127. TP_fast_assign(
  2128. __entry->dev = log->l_mp->m_super->s_dev;
  2129. __entry->lsn = be64_to_cpu(rhead->h_lsn);
  2130. __entry->len = be32_to_cpu(rhead->h_len);
  2131. __entry->num_logops = be32_to_cpu(rhead->h_num_logops);
  2132. __entry->pass = pass;
  2133. ),
  2134. TP_printk("dev %d:%d lsn 0x%llx len 0x%x num_logops 0x%x pass %d",
  2135. MAJOR(__entry->dev), MINOR(__entry->dev),
  2136. __entry->lsn, __entry->len, __entry->num_logops,
  2137. __entry->pass)
  2138. )
  2139. DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
  2140. TP_PROTO(struct xlog *log, struct xlog_recover *trans,
  2141. struct xlog_recover_item *item, int pass),
  2142. TP_ARGS(log, trans, item, pass),
  2143. TP_STRUCT__entry(
  2144. __field(dev_t, dev)
  2145. __field(unsigned long, item)
  2146. __field(xlog_tid_t, tid)
  2147. __field(xfs_lsn_t, lsn)
  2148. __field(int, type)
  2149. __field(int, pass)
  2150. __field(int, count)
  2151. __field(int, total)
  2152. ),
  2153. TP_fast_assign(
  2154. __entry->dev = log->l_mp->m_super->s_dev;
  2155. __entry->item = (unsigned long)item;
  2156. __entry->tid = trans->r_log_tid;
  2157. __entry->lsn = trans->r_lsn;
  2158. __entry->type = ITEM_TYPE(item);
  2159. __entry->pass = pass;
  2160. __entry->count = item->ri_cnt;
  2161. __entry->total = item->ri_total;
  2162. ),
  2163. TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item %p, "
  2164. "item type %s item region count/total %d/%d",
  2165. MAJOR(__entry->dev), MINOR(__entry->dev),
  2166. __entry->tid,
  2167. __entry->lsn,
  2168. __entry->pass,
  2169. (void *)__entry->item,
  2170. __print_symbolic(__entry->type, XFS_LI_TYPE_DESC),
  2171. __entry->count,
  2172. __entry->total)
  2173. )
  2174. #define DEFINE_LOG_RECOVER_ITEM(name) \
  2175. DEFINE_EVENT(xfs_log_recover_item_class, name, \
  2176. TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
  2177. struct xlog_recover_item *item, int pass), \
  2178. TP_ARGS(log, trans, item, pass))
  2179. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add);
  2180. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont);
  2181. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head);
  2182. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
  2183. DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
  2184. DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
  2185. TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
  2186. TP_ARGS(log, buf_f),
  2187. TP_STRUCT__entry(
  2188. __field(dev_t, dev)
  2189. __field(int64_t, blkno)
  2190. __field(unsigned short, len)
  2191. __field(unsigned short, flags)
  2192. __field(unsigned short, size)
  2193. __field(unsigned int, map_size)
  2194. ),
  2195. TP_fast_assign(
  2196. __entry->dev = log->l_mp->m_super->s_dev;
  2197. __entry->blkno = buf_f->blf_blkno;
  2198. __entry->len = buf_f->blf_len;
  2199. __entry->flags = buf_f->blf_flags;
  2200. __entry->size = buf_f->blf_size;
  2201. __entry->map_size = buf_f->blf_map_size;
  2202. ),
  2203. TP_printk("dev %d:%d daddr 0x%llx, bbcount 0x%x, flags 0x%x, size %d, "
  2204. "map_size %d",
  2205. MAJOR(__entry->dev), MINOR(__entry->dev),
  2206. __entry->blkno,
  2207. __entry->len,
  2208. __entry->flags,
  2209. __entry->size,
  2210. __entry->map_size)
  2211. )
  2212. #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
  2213. DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
  2214. TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
  2215. TP_ARGS(log, buf_f))
  2216. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
  2217. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel);
  2218. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add);
  2219. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc);
  2220. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover);
  2221. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_skip);
  2222. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf);
  2223. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
  2224. DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
  2225. DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
  2226. TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
  2227. TP_ARGS(log, in_f),
  2228. TP_STRUCT__entry(
  2229. __field(dev_t, dev)
  2230. __field(xfs_ino_t, ino)
  2231. __field(unsigned short, size)
  2232. __field(int, fields)
  2233. __field(unsigned short, asize)
  2234. __field(unsigned short, dsize)
  2235. __field(int64_t, blkno)
  2236. __field(int, len)
  2237. __field(int, boffset)
  2238. ),
  2239. TP_fast_assign(
  2240. __entry->dev = log->l_mp->m_super->s_dev;
  2241. __entry->ino = in_f->ilf_ino;
  2242. __entry->size = in_f->ilf_size;
  2243. __entry->fields = in_f->ilf_fields;
  2244. __entry->asize = in_f->ilf_asize;
  2245. __entry->dsize = in_f->ilf_dsize;
  2246. __entry->blkno = in_f->ilf_blkno;
  2247. __entry->len = in_f->ilf_len;
  2248. __entry->boffset = in_f->ilf_boffset;
  2249. ),
  2250. TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, "
  2251. "dsize %d, daddr 0x%llx, bbcount 0x%x, boffset %d",
  2252. MAJOR(__entry->dev), MINOR(__entry->dev),
  2253. __entry->ino,
  2254. __entry->size,
  2255. __entry->fields,
  2256. __entry->asize,
  2257. __entry->dsize,
  2258. __entry->blkno,
  2259. __entry->len,
  2260. __entry->boffset)
  2261. )
  2262. #define DEFINE_LOG_RECOVER_INO_ITEM(name) \
  2263. DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
  2264. TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
  2265. TP_ARGS(log, in_f))
  2266. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
  2267. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
  2268. DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
  2269. DECLARE_EVENT_CLASS(xfs_log_recover_icreate_item_class,
  2270. TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f),
  2271. TP_ARGS(log, in_f),
  2272. TP_STRUCT__entry(
  2273. __field(dev_t, dev)
  2274. __field(xfs_agnumber_t, agno)
  2275. __field(xfs_agblock_t, agbno)
  2276. __field(unsigned int, count)
  2277. __field(unsigned int, isize)
  2278. __field(xfs_agblock_t, length)
  2279. __field(unsigned int, gen)
  2280. ),
  2281. TP_fast_assign(
  2282. __entry->dev = log->l_mp->m_super->s_dev;
  2283. __entry->agno = be32_to_cpu(in_f->icl_ag);
  2284. __entry->agbno = be32_to_cpu(in_f->icl_agbno);
  2285. __entry->count = be32_to_cpu(in_f->icl_count);
  2286. __entry->isize = be32_to_cpu(in_f->icl_isize);
  2287. __entry->length = be32_to_cpu(in_f->icl_length);
  2288. __entry->gen = be32_to_cpu(in_f->icl_gen);
  2289. ),
  2290. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x ireccount %u isize %u gen 0x%x",
  2291. MAJOR(__entry->dev), MINOR(__entry->dev),
  2292. __entry->agno,
  2293. __entry->agbno,
  2294. __entry->length,
  2295. __entry->count,
  2296. __entry->isize,
  2297. __entry->gen)
  2298. )
  2299. #define DEFINE_LOG_RECOVER_ICREATE_ITEM(name) \
  2300. DEFINE_EVENT(xfs_log_recover_icreate_item_class, name, \
  2301. TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f), \
  2302. TP_ARGS(log, in_f))
  2303. DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_cancel);
  2304. DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_recover);
  2305. DECLARE_EVENT_CLASS(xfs_discard_class,
  2306. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  2307. xfs_agblock_t agbno, xfs_extlen_t len),
  2308. TP_ARGS(mp, agno, agbno, len),
  2309. TP_STRUCT__entry(
  2310. __field(dev_t, dev)
  2311. __field(xfs_agnumber_t, agno)
  2312. __field(xfs_agblock_t, agbno)
  2313. __field(xfs_extlen_t, len)
  2314. ),
  2315. TP_fast_assign(
  2316. __entry->dev = mp->m_super->s_dev;
  2317. __entry->agno = agno;
  2318. __entry->agbno = agbno;
  2319. __entry->len = len;
  2320. ),
  2321. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
  2322. MAJOR(__entry->dev), MINOR(__entry->dev),
  2323. __entry->agno,
  2324. __entry->agbno,
  2325. __entry->len)
  2326. )
  2327. #define DEFINE_DISCARD_EVENT(name) \
  2328. DEFINE_EVENT(xfs_discard_class, name, \
  2329. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  2330. xfs_agblock_t agbno, xfs_extlen_t len), \
  2331. TP_ARGS(mp, agno, agbno, len))
  2332. DEFINE_DISCARD_EVENT(xfs_discard_extent);
  2333. DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
  2334. DEFINE_DISCARD_EVENT(xfs_discard_exclude);
  2335. DEFINE_DISCARD_EVENT(xfs_discard_busy);
  2336. DECLARE_EVENT_CLASS(xfs_rtdiscard_class,
  2337. TP_PROTO(struct xfs_mount *mp,
  2338. xfs_rtblock_t rtbno, xfs_rtblock_t len),
  2339. TP_ARGS(mp, rtbno, len),
  2340. TP_STRUCT__entry(
  2341. __field(dev_t, dev)
  2342. __field(xfs_rtblock_t, rtbno)
  2343. __field(xfs_rtblock_t, len)
  2344. ),
  2345. TP_fast_assign(
  2346. __entry->dev = mp->m_rtdev_targp->bt_dev;
  2347. __entry->rtbno = rtbno;
  2348. __entry->len = len;
  2349. ),
  2350. TP_printk("dev %d:%d rtbno 0x%llx rtbcount 0x%llx",
  2351. MAJOR(__entry->dev), MINOR(__entry->dev),
  2352. __entry->rtbno,
  2353. __entry->len)
  2354. )
  2355. #define DEFINE_RTDISCARD_EVENT(name) \
  2356. DEFINE_EVENT(xfs_rtdiscard_class, name, \
  2357. TP_PROTO(struct xfs_mount *mp, \
  2358. xfs_rtblock_t rtbno, xfs_rtblock_t len), \
  2359. TP_ARGS(mp, rtbno, len))
  2360. DEFINE_RTDISCARD_EVENT(xfs_discard_rtextent);
  2361. DEFINE_RTDISCARD_EVENT(xfs_discard_rttoosmall);
  2362. DEFINE_RTDISCARD_EVENT(xfs_discard_rtrelax);
  2363. DECLARE_EVENT_CLASS(xfs_btree_cur_class,
  2364. TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
  2365. TP_ARGS(cur, level, bp),
  2366. TP_STRUCT__entry(
  2367. __field(dev_t, dev)
  2368. __string(name, cur->bc_ops->name)
  2369. __field(int, level)
  2370. __field(int, nlevels)
  2371. __field(int, ptr)
  2372. __field(xfs_daddr_t, daddr)
  2373. ),
  2374. TP_fast_assign(
  2375. __entry->dev = cur->bc_mp->m_super->s_dev;
  2376. __assign_str(name);
  2377. __entry->level = level;
  2378. __entry->nlevels = cur->bc_nlevels;
  2379. __entry->ptr = cur->bc_levels[level].ptr;
  2380. __entry->daddr = bp ? xfs_buf_daddr(bp) : -1;
  2381. ),
  2382. TP_printk("dev %d:%d %sbt level %d/%d ptr %d daddr 0x%llx",
  2383. MAJOR(__entry->dev), MINOR(__entry->dev),
  2384. __get_str(name),
  2385. __entry->level,
  2386. __entry->nlevels,
  2387. __entry->ptr,
  2388. (unsigned long long)__entry->daddr)
  2389. )
  2390. #define DEFINE_BTREE_CUR_EVENT(name) \
  2391. DEFINE_EVENT(xfs_btree_cur_class, name, \
  2392. TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), \
  2393. TP_ARGS(cur, level, bp))
  2394. DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys);
  2395. DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
  2396. TRACE_EVENT(xfs_btree_alloc_block,
  2397. TP_PROTO(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int stat,
  2398. int error),
  2399. TP_ARGS(cur, ptr, stat, error),
  2400. TP_STRUCT__entry(
  2401. __field(dev_t, dev)
  2402. __field(xfs_agnumber_t, agno)
  2403. __field(xfs_ino_t, ino)
  2404. __string(name, cur->bc_ops->name)
  2405. __field(int, error)
  2406. __field(xfs_agblock_t, agbno)
  2407. ),
  2408. TP_fast_assign(
  2409. __entry->dev = cur->bc_mp->m_super->s_dev;
  2410. switch (cur->bc_ops->type) {
  2411. case XFS_BTREE_TYPE_INODE:
  2412. __entry->agno = 0;
  2413. __entry->ino = cur->bc_ino.ip->i_ino;
  2414. break;
  2415. case XFS_BTREE_TYPE_AG:
  2416. __entry->agno = cur->bc_ag.pag->pag_agno;
  2417. __entry->ino = 0;
  2418. break;
  2419. case XFS_BTREE_TYPE_MEM:
  2420. __entry->agno = 0;
  2421. __entry->ino = 0;
  2422. break;
  2423. }
  2424. __assign_str(name);
  2425. __entry->error = error;
  2426. if (!error && stat) {
  2427. if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
  2428. xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
  2429. __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp,
  2430. fsb);
  2431. __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp,
  2432. fsb);
  2433. } else {
  2434. __entry->agbno = be32_to_cpu(ptr->s);
  2435. }
  2436. } else {
  2437. __entry->agbno = NULLAGBLOCK;
  2438. }
  2439. ),
  2440. TP_printk("dev %d:%d %sbt agno 0x%x ino 0x%llx agbno 0x%x error %d",
  2441. MAJOR(__entry->dev), MINOR(__entry->dev),
  2442. __get_str(name),
  2443. __entry->agno,
  2444. __entry->ino,
  2445. __entry->agbno,
  2446. __entry->error)
  2447. );
  2448. TRACE_EVENT(xfs_btree_free_block,
  2449. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_buf *bp),
  2450. TP_ARGS(cur, bp),
  2451. TP_STRUCT__entry(
  2452. __field(dev_t, dev)
  2453. __field(xfs_agnumber_t, agno)
  2454. __field(xfs_ino_t, ino)
  2455. __string(name, cur->bc_ops->name)
  2456. __field(xfs_agblock_t, agbno)
  2457. ),
  2458. TP_fast_assign(
  2459. __entry->dev = cur->bc_mp->m_super->s_dev;
  2460. __entry->agno = xfs_daddr_to_agno(cur->bc_mp,
  2461. xfs_buf_daddr(bp));
  2462. if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
  2463. __entry->ino = cur->bc_ino.ip->i_ino;
  2464. else
  2465. __entry->ino = 0;
  2466. __assign_str(name);
  2467. __entry->agbno = xfs_daddr_to_agbno(cur->bc_mp,
  2468. xfs_buf_daddr(bp));
  2469. ),
  2470. TP_printk("dev %d:%d %sbt agno 0x%x ino 0x%llx agbno 0x%x",
  2471. MAJOR(__entry->dev), MINOR(__entry->dev),
  2472. __get_str(name),
  2473. __entry->agno,
  2474. __entry->ino,
  2475. __entry->agbno)
  2476. );
  2477. /* deferred ops */
  2478. struct xfs_defer_pending;
  2479. DECLARE_EVENT_CLASS(xfs_defer_class,
  2480. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
  2481. TP_ARGS(tp, caller_ip),
  2482. TP_STRUCT__entry(
  2483. __field(dev_t, dev)
  2484. __field(struct xfs_trans *, tp)
  2485. __field(char, committed)
  2486. __field(unsigned long, caller_ip)
  2487. ),
  2488. TP_fast_assign(
  2489. __entry->dev = tp->t_mountp->m_super->s_dev;
  2490. __entry->tp = tp;
  2491. __entry->caller_ip = caller_ip;
  2492. ),
  2493. TP_printk("dev %d:%d tp %p caller %pS",
  2494. MAJOR(__entry->dev), MINOR(__entry->dev),
  2495. __entry->tp,
  2496. (char *)__entry->caller_ip)
  2497. )
  2498. #define DEFINE_DEFER_EVENT(name) \
  2499. DEFINE_EVENT(xfs_defer_class, name, \
  2500. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
  2501. TP_ARGS(tp, caller_ip))
  2502. DECLARE_EVENT_CLASS(xfs_defer_error_class,
  2503. TP_PROTO(struct xfs_trans *tp, int error),
  2504. TP_ARGS(tp, error),
  2505. TP_STRUCT__entry(
  2506. __field(dev_t, dev)
  2507. __field(struct xfs_trans *, tp)
  2508. __field(char, committed)
  2509. __field(int, error)
  2510. ),
  2511. TP_fast_assign(
  2512. __entry->dev = tp->t_mountp->m_super->s_dev;
  2513. __entry->tp = tp;
  2514. __entry->error = error;
  2515. ),
  2516. TP_printk("dev %d:%d tp %p err %d",
  2517. MAJOR(__entry->dev), MINOR(__entry->dev),
  2518. __entry->tp,
  2519. __entry->error)
  2520. )
  2521. #define DEFINE_DEFER_ERROR_EVENT(name) \
  2522. DEFINE_EVENT(xfs_defer_error_class, name, \
  2523. TP_PROTO(struct xfs_trans *tp, int error), \
  2524. TP_ARGS(tp, error))
  2525. DECLARE_EVENT_CLASS(xfs_defer_pending_class,
  2526. TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
  2527. TP_ARGS(mp, dfp),
  2528. TP_STRUCT__entry(
  2529. __field(dev_t, dev)
  2530. __string(name, dfp->dfp_ops->name)
  2531. __field(void *, intent)
  2532. __field(unsigned int, flags)
  2533. __field(char, committed)
  2534. __field(int, nr)
  2535. ),
  2536. TP_fast_assign(
  2537. __entry->dev = mp ? mp->m_super->s_dev : 0;
  2538. __assign_str(name);
  2539. __entry->intent = dfp->dfp_intent;
  2540. __entry->flags = dfp->dfp_flags;
  2541. __entry->committed = dfp->dfp_done != NULL;
  2542. __entry->nr = dfp->dfp_count;
  2543. ),
  2544. TP_printk("dev %d:%d optype %s intent %p flags %s committed %d nr %d",
  2545. MAJOR(__entry->dev), MINOR(__entry->dev),
  2546. __get_str(name),
  2547. __entry->intent,
  2548. __print_flags(__entry->flags, "|", XFS_DEFER_PENDING_STRINGS),
  2549. __entry->committed,
  2550. __entry->nr)
  2551. )
  2552. #define DEFINE_DEFER_PENDING_EVENT(name) \
  2553. DEFINE_EVENT(xfs_defer_pending_class, name, \
  2554. TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), \
  2555. TP_ARGS(mp, dfp))
  2556. DEFINE_DEFER_EVENT(xfs_defer_cancel);
  2557. DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
  2558. DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
  2559. DEFINE_DEFER_EVENT(xfs_defer_finish);
  2560. DEFINE_DEFER_EVENT(xfs_defer_finish_done);
  2561. DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
  2562. DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
  2563. DEFINE_DEFER_PENDING_EVENT(xfs_defer_create_intent);
  2564. DEFINE_DEFER_PENDING_EVENT(xfs_defer_cancel_list);
  2565. DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
  2566. DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
  2567. DEFINE_DEFER_PENDING_EVENT(xfs_defer_relog_intent);
  2568. DEFINE_DEFER_PENDING_EVENT(xfs_defer_isolate_paused);
  2569. DEFINE_DEFER_PENDING_EVENT(xfs_defer_item_pause);
  2570. DEFINE_DEFER_PENDING_EVENT(xfs_defer_item_unpause);
  2571. DECLARE_EVENT_CLASS(xfs_free_extent_deferred_class,
  2572. TP_PROTO(struct xfs_mount *mp, struct xfs_extent_free_item *free),
  2573. TP_ARGS(mp, free),
  2574. TP_STRUCT__entry(
  2575. __field(dev_t, dev)
  2576. __field(xfs_agnumber_t, agno)
  2577. __field(xfs_agblock_t, agbno)
  2578. __field(xfs_extlen_t, len)
  2579. __field(unsigned int, flags)
  2580. ),
  2581. TP_fast_assign(
  2582. __entry->dev = mp->m_super->s_dev;
  2583. __entry->agno = XFS_FSB_TO_AGNO(mp, free->xefi_startblock);
  2584. __entry->agbno = XFS_FSB_TO_AGBNO(mp, free->xefi_startblock);
  2585. __entry->len = free->xefi_blockcount;
  2586. __entry->flags = free->xefi_flags;
  2587. ),
  2588. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x flags 0x%x",
  2589. MAJOR(__entry->dev), MINOR(__entry->dev),
  2590. __entry->agno,
  2591. __entry->agbno,
  2592. __entry->len,
  2593. __entry->flags)
  2594. );
  2595. #define DEFINE_FREE_EXTENT_DEFERRED_EVENT(name) \
  2596. DEFINE_EVENT(xfs_free_extent_deferred_class, name, \
  2597. TP_PROTO(struct xfs_mount *mp, struct xfs_extent_free_item *free), \
  2598. TP_ARGS(mp, free))
  2599. DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_agfl_free_defer);
  2600. DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_agfl_free_deferred);
  2601. DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_extent_free_defer);
  2602. DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_extent_free_deferred);
  2603. DECLARE_EVENT_CLASS(xfs_defer_pending_item_class,
  2604. TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp,
  2605. void *item),
  2606. TP_ARGS(mp, dfp, item),
  2607. TP_STRUCT__entry(
  2608. __field(dev_t, dev)
  2609. __string(name, dfp->dfp_ops->name)
  2610. __field(void *, intent)
  2611. __field(void *, item)
  2612. __field(char, committed)
  2613. __field(unsigned int, flags)
  2614. __field(int, nr)
  2615. ),
  2616. TP_fast_assign(
  2617. __entry->dev = mp ? mp->m_super->s_dev : 0;
  2618. __assign_str(name);
  2619. __entry->intent = dfp->dfp_intent;
  2620. __entry->item = item;
  2621. __entry->committed = dfp->dfp_done != NULL;
  2622. __entry->flags = dfp->dfp_flags;
  2623. __entry->nr = dfp->dfp_count;
  2624. ),
  2625. TP_printk("dev %d:%d optype %s intent %p item %p flags %s committed %d nr %d",
  2626. MAJOR(__entry->dev), MINOR(__entry->dev),
  2627. __get_str(name),
  2628. __entry->intent,
  2629. __entry->item,
  2630. __print_flags(__entry->flags, "|", XFS_DEFER_PENDING_STRINGS),
  2631. __entry->committed,
  2632. __entry->nr)
  2633. )
  2634. #define DEFINE_DEFER_PENDING_ITEM_EVENT(name) \
  2635. DEFINE_EVENT(xfs_defer_pending_item_class, name, \
  2636. TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp, \
  2637. void *item), \
  2638. TP_ARGS(mp, dfp, item))
  2639. DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_add_item);
  2640. DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_cancel_item);
  2641. DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_finish_item);
  2642. /* rmap tracepoints */
  2643. DECLARE_EVENT_CLASS(xfs_rmap_class,
  2644. TP_PROTO(struct xfs_btree_cur *cur,
  2645. xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
  2646. const struct xfs_owner_info *oinfo),
  2647. TP_ARGS(cur, agbno, len, unwritten, oinfo),
  2648. TP_STRUCT__entry(
  2649. __field(dev_t, dev)
  2650. __field(xfs_agnumber_t, agno)
  2651. __field(xfs_agblock_t, agbno)
  2652. __field(xfs_extlen_t, len)
  2653. __field(uint64_t, owner)
  2654. __field(uint64_t, offset)
  2655. __field(unsigned long, flags)
  2656. ),
  2657. TP_fast_assign(
  2658. __entry->dev = cur->bc_mp->m_super->s_dev;
  2659. __entry->agno = cur->bc_ag.pag->pag_agno;
  2660. __entry->agbno = agbno;
  2661. __entry->len = len;
  2662. __entry->owner = oinfo->oi_owner;
  2663. __entry->offset = oinfo->oi_offset;
  2664. __entry->flags = oinfo->oi_flags;
  2665. if (unwritten)
  2666. __entry->flags |= XFS_RMAP_UNWRITTEN;
  2667. ),
  2668. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx",
  2669. MAJOR(__entry->dev), MINOR(__entry->dev),
  2670. __entry->agno,
  2671. __entry->agbno,
  2672. __entry->len,
  2673. __entry->owner,
  2674. __entry->offset,
  2675. __entry->flags)
  2676. );
  2677. #define DEFINE_RMAP_EVENT(name) \
  2678. DEFINE_EVENT(xfs_rmap_class, name, \
  2679. TP_PROTO(struct xfs_btree_cur *cur, \
  2680. xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
  2681. const struct xfs_owner_info *oinfo), \
  2682. TP_ARGS(cur, agbno, len, unwritten, oinfo))
  2683. /* btree cursor error/%ip tracepoint class */
  2684. DECLARE_EVENT_CLASS(xfs_btree_error_class,
  2685. TP_PROTO(struct xfs_btree_cur *cur, int error,
  2686. unsigned long caller_ip),
  2687. TP_ARGS(cur, error, caller_ip),
  2688. TP_STRUCT__entry(
  2689. __field(dev_t, dev)
  2690. __field(xfs_agnumber_t, agno)
  2691. __field(xfs_ino_t, ino)
  2692. __field(int, error)
  2693. __field(unsigned long, caller_ip)
  2694. ),
  2695. TP_fast_assign(
  2696. __entry->dev = cur->bc_mp->m_super->s_dev;
  2697. switch (cur->bc_ops->type) {
  2698. case XFS_BTREE_TYPE_INODE:
  2699. __entry->agno = 0;
  2700. __entry->ino = cur->bc_ino.ip->i_ino;
  2701. break;
  2702. case XFS_BTREE_TYPE_AG:
  2703. __entry->agno = cur->bc_ag.pag->pag_agno;
  2704. __entry->ino = 0;
  2705. break;
  2706. case XFS_BTREE_TYPE_MEM:
  2707. __entry->agno = 0;
  2708. __entry->ino = 0;
  2709. break;
  2710. }
  2711. __entry->error = error;
  2712. __entry->caller_ip = caller_ip;
  2713. ),
  2714. TP_printk("dev %d:%d agno 0x%x ino 0x%llx error %d caller %pS",
  2715. MAJOR(__entry->dev), MINOR(__entry->dev),
  2716. __entry->agno,
  2717. __entry->ino,
  2718. __entry->error,
  2719. (char *)__entry->caller_ip)
  2720. );
  2721. #define DEFINE_BTREE_ERROR_EVENT(name) \
  2722. DEFINE_EVENT(xfs_btree_error_class, name, \
  2723. TP_PROTO(struct xfs_btree_cur *cur, int error, \
  2724. unsigned long caller_ip), \
  2725. TP_ARGS(cur, error, caller_ip))
  2726. DEFINE_RMAP_EVENT(xfs_rmap_unmap);
  2727. DEFINE_RMAP_EVENT(xfs_rmap_unmap_done);
  2728. DEFINE_BTREE_ERROR_EVENT(xfs_rmap_unmap_error);
  2729. DEFINE_RMAP_EVENT(xfs_rmap_map);
  2730. DEFINE_RMAP_EVENT(xfs_rmap_map_done);
  2731. DEFINE_BTREE_ERROR_EVENT(xfs_rmap_map_error);
  2732. DEFINE_RMAP_EVENT(xfs_rmap_convert);
  2733. DEFINE_RMAP_EVENT(xfs_rmap_convert_done);
  2734. DEFINE_BTREE_ERROR_EVENT(xfs_rmap_convert_error);
  2735. TRACE_EVENT(xfs_rmap_convert_state,
  2736. TP_PROTO(struct xfs_btree_cur *cur, int state,
  2737. unsigned long caller_ip),
  2738. TP_ARGS(cur, state, caller_ip),
  2739. TP_STRUCT__entry(
  2740. __field(dev_t, dev)
  2741. __field(xfs_agnumber_t, agno)
  2742. __field(xfs_ino_t, ino)
  2743. __field(int, state)
  2744. __field(unsigned long, caller_ip)
  2745. ),
  2746. TP_fast_assign(
  2747. __entry->dev = cur->bc_mp->m_super->s_dev;
  2748. switch (cur->bc_ops->type) {
  2749. case XFS_BTREE_TYPE_INODE:
  2750. __entry->agno = 0;
  2751. __entry->ino = cur->bc_ino.ip->i_ino;
  2752. break;
  2753. case XFS_BTREE_TYPE_AG:
  2754. __entry->agno = cur->bc_ag.pag->pag_agno;
  2755. __entry->ino = 0;
  2756. break;
  2757. case XFS_BTREE_TYPE_MEM:
  2758. __entry->agno = 0;
  2759. __entry->ino = 0;
  2760. break;
  2761. }
  2762. __entry->state = state;
  2763. __entry->caller_ip = caller_ip;
  2764. ),
  2765. TP_printk("dev %d:%d agno 0x%x ino 0x%llx state %d caller %pS",
  2766. MAJOR(__entry->dev), MINOR(__entry->dev),
  2767. __entry->agno,
  2768. __entry->ino,
  2769. __entry->state,
  2770. (char *)__entry->caller_ip)
  2771. );
  2772. DECLARE_EVENT_CLASS(xfs_rmapbt_class,
  2773. TP_PROTO(struct xfs_btree_cur *cur,
  2774. xfs_agblock_t agbno, xfs_extlen_t len,
  2775. uint64_t owner, uint64_t offset, unsigned int flags),
  2776. TP_ARGS(cur, agbno, len, owner, offset, flags),
  2777. TP_STRUCT__entry(
  2778. __field(dev_t, dev)
  2779. __field(xfs_agnumber_t, agno)
  2780. __field(xfs_agblock_t, agbno)
  2781. __field(xfs_extlen_t, len)
  2782. __field(uint64_t, owner)
  2783. __field(uint64_t, offset)
  2784. __field(unsigned int, flags)
  2785. ),
  2786. TP_fast_assign(
  2787. __entry->dev = cur->bc_mp->m_super->s_dev;
  2788. __entry->agno = cur->bc_ag.pag->pag_agno;
  2789. __entry->agbno = agbno;
  2790. __entry->len = len;
  2791. __entry->owner = owner;
  2792. __entry->offset = offset;
  2793. __entry->flags = flags;
  2794. ),
  2795. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x",
  2796. MAJOR(__entry->dev), MINOR(__entry->dev),
  2797. __entry->agno,
  2798. __entry->agbno,
  2799. __entry->len,
  2800. __entry->owner,
  2801. __entry->offset,
  2802. __entry->flags)
  2803. );
  2804. #define DEFINE_RMAPBT_EVENT(name) \
  2805. DEFINE_EVENT(xfs_rmapbt_class, name, \
  2806. TP_PROTO(struct xfs_btree_cur *cur, \
  2807. xfs_agblock_t agbno, xfs_extlen_t len, \
  2808. uint64_t owner, uint64_t offset, unsigned int flags), \
  2809. TP_ARGS(cur, agbno, len, owner, offset, flags))
  2810. TRACE_DEFINE_ENUM(XFS_RMAP_MAP);
  2811. TRACE_DEFINE_ENUM(XFS_RMAP_MAP_SHARED);
  2812. TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP);
  2813. TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP_SHARED);
  2814. TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT);
  2815. TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT_SHARED);
  2816. TRACE_DEFINE_ENUM(XFS_RMAP_ALLOC);
  2817. TRACE_DEFINE_ENUM(XFS_RMAP_FREE);
  2818. DECLARE_EVENT_CLASS(xfs_rmap_deferred_class,
  2819. TP_PROTO(struct xfs_mount *mp, struct xfs_rmap_intent *ri),
  2820. TP_ARGS(mp, ri),
  2821. TP_STRUCT__entry(
  2822. __field(dev_t, dev)
  2823. __field(unsigned long long, owner)
  2824. __field(xfs_agnumber_t, agno)
  2825. __field(xfs_agblock_t, agbno)
  2826. __field(int, whichfork)
  2827. __field(xfs_fileoff_t, l_loff)
  2828. __field(xfs_filblks_t, l_len)
  2829. __field(xfs_exntst_t, l_state)
  2830. __field(int, op)
  2831. ),
  2832. TP_fast_assign(
  2833. __entry->dev = mp->m_super->s_dev;
  2834. __entry->agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
  2835. __entry->agbno = XFS_FSB_TO_AGBNO(mp,
  2836. ri->ri_bmap.br_startblock);
  2837. __entry->owner = ri->ri_owner;
  2838. __entry->whichfork = ri->ri_whichfork;
  2839. __entry->l_loff = ri->ri_bmap.br_startoff;
  2840. __entry->l_len = ri->ri_bmap.br_blockcount;
  2841. __entry->l_state = ri->ri_bmap.br_state;
  2842. __entry->op = ri->ri_type;
  2843. ),
  2844. TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
  2845. MAJOR(__entry->dev), MINOR(__entry->dev),
  2846. __print_symbolic(__entry->op, XFS_RMAP_INTENT_STRINGS),
  2847. __entry->agno,
  2848. __entry->agbno,
  2849. __entry->owner,
  2850. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  2851. __entry->l_loff,
  2852. __entry->l_len,
  2853. __entry->l_state)
  2854. );
  2855. #define DEFINE_RMAP_DEFERRED_EVENT(name) \
  2856. DEFINE_EVENT(xfs_rmap_deferred_class, name, \
  2857. TP_PROTO(struct xfs_mount *mp, struct xfs_rmap_intent *ri), \
  2858. TP_ARGS(mp, ri))
  2859. DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
  2860. DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
  2861. DEFINE_RMAPBT_EVENT(xfs_rmap_update);
  2862. DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
  2863. DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
  2864. DEFINE_BTREE_ERROR_EVENT(xfs_rmap_insert_error);
  2865. DEFINE_BTREE_ERROR_EVENT(xfs_rmap_delete_error);
  2866. DEFINE_BTREE_ERROR_EVENT(xfs_rmap_update_error);
  2867. DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_candidate);
  2868. DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_query);
  2869. DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_candidate);
  2870. DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range);
  2871. DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result);
  2872. DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result);
  2873. DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result);
  2874. /* deferred bmbt updates */
  2875. TRACE_DEFINE_ENUM(XFS_BMAP_MAP);
  2876. TRACE_DEFINE_ENUM(XFS_BMAP_UNMAP);
  2877. DECLARE_EVENT_CLASS(xfs_bmap_deferred_class,
  2878. TP_PROTO(struct xfs_bmap_intent *bi),
  2879. TP_ARGS(bi),
  2880. TP_STRUCT__entry(
  2881. __field(dev_t, dev)
  2882. __field(dev_t, opdev)
  2883. __field(xfs_agnumber_t, agno)
  2884. __field(xfs_ino_t, ino)
  2885. __field(xfs_agblock_t, agbno)
  2886. __field(xfs_fsblock_t, rtbno)
  2887. __field(int, whichfork)
  2888. __field(xfs_fileoff_t, l_loff)
  2889. __field(xfs_filblks_t, l_len)
  2890. __field(xfs_exntst_t, l_state)
  2891. __field(int, op)
  2892. ),
  2893. TP_fast_assign(
  2894. struct xfs_inode *ip = bi->bi_owner;
  2895. __entry->dev = ip->i_mount->m_super->s_dev;
  2896. if (xfs_ifork_is_realtime(ip, bi->bi_whichfork)) {
  2897. __entry->agno = 0;
  2898. __entry->agbno = 0;
  2899. __entry->rtbno = bi->bi_bmap.br_startblock;
  2900. __entry->opdev = ip->i_mount->m_rtdev_targp->bt_dev;
  2901. } else {
  2902. __entry->agno = XFS_FSB_TO_AGNO(ip->i_mount,
  2903. bi->bi_bmap.br_startblock);
  2904. __entry->agbno = XFS_FSB_TO_AGBNO(ip->i_mount,
  2905. bi->bi_bmap.br_startblock);
  2906. __entry->rtbno = 0;
  2907. __entry->opdev = __entry->dev;
  2908. }
  2909. __entry->ino = ip->i_ino;
  2910. __entry->whichfork = bi->bi_whichfork;
  2911. __entry->l_loff = bi->bi_bmap.br_startoff;
  2912. __entry->l_len = bi->bi_bmap.br_blockcount;
  2913. __entry->l_state = bi->bi_bmap.br_state;
  2914. __entry->op = bi->bi_type;
  2915. ),
  2916. TP_printk("dev %d:%d op %s opdev %d:%d ino 0x%llx agno 0x%x agbno 0x%x rtbno 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d",
  2917. MAJOR(__entry->dev), MINOR(__entry->dev),
  2918. __print_symbolic(__entry->op, XFS_BMAP_INTENT_STRINGS),
  2919. MAJOR(__entry->opdev), MINOR(__entry->opdev),
  2920. __entry->ino,
  2921. __entry->agno,
  2922. __entry->agbno,
  2923. __entry->rtbno,
  2924. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  2925. __entry->l_loff,
  2926. __entry->l_len,
  2927. __entry->l_state)
  2928. );
  2929. #define DEFINE_BMAP_DEFERRED_EVENT(name) \
  2930. DEFINE_EVENT(xfs_bmap_deferred_class, name, \
  2931. TP_PROTO(struct xfs_bmap_intent *bi), \
  2932. TP_ARGS(bi))
  2933. DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_defer);
  2934. DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_deferred);
  2935. /* per-AG reservation */
  2936. DECLARE_EVENT_CLASS(xfs_ag_resv_class,
  2937. TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type resv,
  2938. xfs_extlen_t len),
  2939. TP_ARGS(pag, resv, len),
  2940. TP_STRUCT__entry(
  2941. __field(dev_t, dev)
  2942. __field(xfs_agnumber_t, agno)
  2943. __field(int, resv)
  2944. __field(xfs_extlen_t, freeblks)
  2945. __field(xfs_extlen_t, flcount)
  2946. __field(xfs_extlen_t, reserved)
  2947. __field(xfs_extlen_t, asked)
  2948. __field(xfs_extlen_t, len)
  2949. ),
  2950. TP_fast_assign(
  2951. struct xfs_ag_resv *r = xfs_perag_resv(pag, resv);
  2952. __entry->dev = pag->pag_mount->m_super->s_dev;
  2953. __entry->agno = pag->pag_agno;
  2954. __entry->resv = resv;
  2955. __entry->freeblks = pag->pagf_freeblks;
  2956. __entry->flcount = pag->pagf_flcount;
  2957. __entry->reserved = r ? r->ar_reserved : 0;
  2958. __entry->asked = r ? r->ar_asked : 0;
  2959. __entry->len = len;
  2960. ),
  2961. TP_printk("dev %d:%d agno 0x%x resv %d freeblks %u flcount %u "
  2962. "resv %u ask %u len %u",
  2963. MAJOR(__entry->dev), MINOR(__entry->dev),
  2964. __entry->agno,
  2965. __entry->resv,
  2966. __entry->freeblks,
  2967. __entry->flcount,
  2968. __entry->reserved,
  2969. __entry->asked,
  2970. __entry->len)
  2971. )
  2972. #define DEFINE_AG_RESV_EVENT(name) \
  2973. DEFINE_EVENT(xfs_ag_resv_class, name, \
  2974. TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type type, \
  2975. xfs_extlen_t len), \
  2976. TP_ARGS(pag, type, len))
  2977. /* per-AG reservation tracepoints */
  2978. DEFINE_AG_RESV_EVENT(xfs_ag_resv_init);
  2979. DEFINE_AG_RESV_EVENT(xfs_ag_resv_free);
  2980. DEFINE_AG_RESV_EVENT(xfs_ag_resv_alloc_extent);
  2981. DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent);
  2982. DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical);
  2983. DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
  2984. /* simple AG-based error/%ip tracepoint class */
  2985. DECLARE_EVENT_CLASS(xfs_ag_error_class,
  2986. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
  2987. unsigned long caller_ip),
  2988. TP_ARGS(mp, agno, error, caller_ip),
  2989. TP_STRUCT__entry(
  2990. __field(dev_t, dev)
  2991. __field(xfs_agnumber_t, agno)
  2992. __field(int, error)
  2993. __field(unsigned long, caller_ip)
  2994. ),
  2995. TP_fast_assign(
  2996. __entry->dev = mp->m_super->s_dev;
  2997. __entry->agno = agno;
  2998. __entry->error = error;
  2999. __entry->caller_ip = caller_ip;
  3000. ),
  3001. TP_printk("dev %d:%d agno 0x%x error %d caller %pS",
  3002. MAJOR(__entry->dev), MINOR(__entry->dev),
  3003. __entry->agno,
  3004. __entry->error,
  3005. (char *)__entry->caller_ip)
  3006. );
  3007. #define DEFINE_AG_ERROR_EVENT(name) \
  3008. DEFINE_EVENT(xfs_ag_error_class, name, \
  3009. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
  3010. unsigned long caller_ip), \
  3011. TP_ARGS(mp, agno, error, caller_ip))
  3012. DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
  3013. /* refcount tracepoint classes */
  3014. DECLARE_EVENT_CLASS(xfs_refcount_class,
  3015. TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno,
  3016. xfs_extlen_t len),
  3017. TP_ARGS(cur, agbno, len),
  3018. TP_STRUCT__entry(
  3019. __field(dev_t, dev)
  3020. __field(xfs_agnumber_t, agno)
  3021. __field(xfs_agblock_t, agbno)
  3022. __field(xfs_extlen_t, len)
  3023. ),
  3024. TP_fast_assign(
  3025. __entry->dev = cur->bc_mp->m_super->s_dev;
  3026. __entry->agno = cur->bc_ag.pag->pag_agno;
  3027. __entry->agbno = agbno;
  3028. __entry->len = len;
  3029. ),
  3030. TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x",
  3031. MAJOR(__entry->dev), MINOR(__entry->dev),
  3032. __entry->agno,
  3033. __entry->agbno,
  3034. __entry->len)
  3035. );
  3036. #define DEFINE_REFCOUNT_EVENT(name) \
  3037. DEFINE_EVENT(xfs_refcount_class, name, \
  3038. TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno, \
  3039. xfs_extlen_t len), \
  3040. TP_ARGS(cur, agbno, len))
  3041. TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi);
  3042. TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi);
  3043. TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi);
  3044. TRACE_EVENT(xfs_refcount_lookup,
  3045. TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno,
  3046. xfs_lookup_t dir),
  3047. TP_ARGS(cur, agbno, dir),
  3048. TP_STRUCT__entry(
  3049. __field(dev_t, dev)
  3050. __field(xfs_agnumber_t, agno)
  3051. __field(xfs_agblock_t, agbno)
  3052. __field(xfs_lookup_t, dir)
  3053. ),
  3054. TP_fast_assign(
  3055. __entry->dev = cur->bc_mp->m_super->s_dev;
  3056. __entry->agno = cur->bc_ag.pag->pag_agno;
  3057. __entry->agbno = agbno;
  3058. __entry->dir = dir;
  3059. ),
  3060. TP_printk("dev %d:%d agno 0x%x agbno 0x%x cmp %s(%d)",
  3061. MAJOR(__entry->dev), MINOR(__entry->dev),
  3062. __entry->agno,
  3063. __entry->agbno,
  3064. __print_symbolic(__entry->dir, XFS_AG_BTREE_CMP_FORMAT_STR),
  3065. __entry->dir)
  3066. )
  3067. /* single-rcext tracepoint class */
  3068. DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
  3069. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec),
  3070. TP_ARGS(cur, irec),
  3071. TP_STRUCT__entry(
  3072. __field(dev_t, dev)
  3073. __field(xfs_agnumber_t, agno)
  3074. __field(enum xfs_refc_domain, domain)
  3075. __field(xfs_agblock_t, startblock)
  3076. __field(xfs_extlen_t, blockcount)
  3077. __field(xfs_nlink_t, refcount)
  3078. ),
  3079. TP_fast_assign(
  3080. __entry->dev = cur->bc_mp->m_super->s_dev;
  3081. __entry->agno = cur->bc_ag.pag->pag_agno;
  3082. __entry->domain = irec->rc_domain;
  3083. __entry->startblock = irec->rc_startblock;
  3084. __entry->blockcount = irec->rc_blockcount;
  3085. __entry->refcount = irec->rc_refcount;
  3086. ),
  3087. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u",
  3088. MAJOR(__entry->dev), MINOR(__entry->dev),
  3089. __entry->agno,
  3090. __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
  3091. __entry->startblock,
  3092. __entry->blockcount,
  3093. __entry->refcount)
  3094. )
  3095. #define DEFINE_REFCOUNT_EXTENT_EVENT(name) \
  3096. DEFINE_EVENT(xfs_refcount_extent_class, name, \
  3097. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec), \
  3098. TP_ARGS(cur, irec))
  3099. /* single-rcext and an agbno tracepoint class */
  3100. DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
  3101. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec,
  3102. xfs_agblock_t agbno),
  3103. TP_ARGS(cur, irec, agbno),
  3104. TP_STRUCT__entry(
  3105. __field(dev_t, dev)
  3106. __field(xfs_agnumber_t, agno)
  3107. __field(enum xfs_refc_domain, domain)
  3108. __field(xfs_agblock_t, startblock)
  3109. __field(xfs_extlen_t, blockcount)
  3110. __field(xfs_nlink_t, refcount)
  3111. __field(xfs_agblock_t, agbno)
  3112. ),
  3113. TP_fast_assign(
  3114. __entry->dev = cur->bc_mp->m_super->s_dev;
  3115. __entry->agno = cur->bc_ag.pag->pag_agno;
  3116. __entry->domain = irec->rc_domain;
  3117. __entry->startblock = irec->rc_startblock;
  3118. __entry->blockcount = irec->rc_blockcount;
  3119. __entry->refcount = irec->rc_refcount;
  3120. __entry->agbno = agbno;
  3121. ),
  3122. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x",
  3123. MAJOR(__entry->dev), MINOR(__entry->dev),
  3124. __entry->agno,
  3125. __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS),
  3126. __entry->startblock,
  3127. __entry->blockcount,
  3128. __entry->refcount,
  3129. __entry->agbno)
  3130. )
  3131. #define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \
  3132. DEFINE_EVENT(xfs_refcount_extent_at_class, name, \
  3133. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, \
  3134. xfs_agblock_t agbno), \
  3135. TP_ARGS(cur, irec, agbno))
  3136. /* double-rcext tracepoint class */
  3137. DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
  3138. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
  3139. struct xfs_refcount_irec *i2),
  3140. TP_ARGS(cur, i1, i2),
  3141. TP_STRUCT__entry(
  3142. __field(dev_t, dev)
  3143. __field(xfs_agnumber_t, agno)
  3144. __field(enum xfs_refc_domain, i1_domain)
  3145. __field(xfs_agblock_t, i1_startblock)
  3146. __field(xfs_extlen_t, i1_blockcount)
  3147. __field(xfs_nlink_t, i1_refcount)
  3148. __field(enum xfs_refc_domain, i2_domain)
  3149. __field(xfs_agblock_t, i2_startblock)
  3150. __field(xfs_extlen_t, i2_blockcount)
  3151. __field(xfs_nlink_t, i2_refcount)
  3152. ),
  3153. TP_fast_assign(
  3154. __entry->dev = cur->bc_mp->m_super->s_dev;
  3155. __entry->agno = cur->bc_ag.pag->pag_agno;
  3156. __entry->i1_domain = i1->rc_domain;
  3157. __entry->i1_startblock = i1->rc_startblock;
  3158. __entry->i1_blockcount = i1->rc_blockcount;
  3159. __entry->i1_refcount = i1->rc_refcount;
  3160. __entry->i2_domain = i2->rc_domain;
  3161. __entry->i2_startblock = i2->rc_startblock;
  3162. __entry->i2_blockcount = i2->rc_blockcount;
  3163. __entry->i2_refcount = i2->rc_refcount;
  3164. ),
  3165. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  3166. "dom %s agbno 0x%x fsbcount 0x%x refcount %u",
  3167. MAJOR(__entry->dev), MINOR(__entry->dev),
  3168. __entry->agno,
  3169. __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
  3170. __entry->i1_startblock,
  3171. __entry->i1_blockcount,
  3172. __entry->i1_refcount,
  3173. __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS),
  3174. __entry->i2_startblock,
  3175. __entry->i2_blockcount,
  3176. __entry->i2_refcount)
  3177. )
  3178. #define DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(name) \
  3179. DEFINE_EVENT(xfs_refcount_double_extent_class, name, \
  3180. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
  3181. struct xfs_refcount_irec *i2), \
  3182. TP_ARGS(cur, i1, i2))
  3183. /* double-rcext and an agbno tracepoint class */
  3184. DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
  3185. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
  3186. struct xfs_refcount_irec *i2, xfs_agblock_t agbno),
  3187. TP_ARGS(cur, i1, i2, agbno),
  3188. TP_STRUCT__entry(
  3189. __field(dev_t, dev)
  3190. __field(xfs_agnumber_t, agno)
  3191. __field(enum xfs_refc_domain, i1_domain)
  3192. __field(xfs_agblock_t, i1_startblock)
  3193. __field(xfs_extlen_t, i1_blockcount)
  3194. __field(xfs_nlink_t, i1_refcount)
  3195. __field(enum xfs_refc_domain, i2_domain)
  3196. __field(xfs_agblock_t, i2_startblock)
  3197. __field(xfs_extlen_t, i2_blockcount)
  3198. __field(xfs_nlink_t, i2_refcount)
  3199. __field(xfs_agblock_t, agbno)
  3200. ),
  3201. TP_fast_assign(
  3202. __entry->dev = cur->bc_mp->m_super->s_dev;
  3203. __entry->agno = cur->bc_ag.pag->pag_agno;
  3204. __entry->i1_domain = i1->rc_domain;
  3205. __entry->i1_startblock = i1->rc_startblock;
  3206. __entry->i1_blockcount = i1->rc_blockcount;
  3207. __entry->i1_refcount = i1->rc_refcount;
  3208. __entry->i2_domain = i2->rc_domain;
  3209. __entry->i2_startblock = i2->rc_startblock;
  3210. __entry->i2_blockcount = i2->rc_blockcount;
  3211. __entry->i2_refcount = i2->rc_refcount;
  3212. __entry->agbno = agbno;
  3213. ),
  3214. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  3215. "dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x",
  3216. MAJOR(__entry->dev), MINOR(__entry->dev),
  3217. __entry->agno,
  3218. __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
  3219. __entry->i1_startblock,
  3220. __entry->i1_blockcount,
  3221. __entry->i1_refcount,
  3222. __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS),
  3223. __entry->i2_startblock,
  3224. __entry->i2_blockcount,
  3225. __entry->i2_refcount,
  3226. __entry->agbno)
  3227. )
  3228. #define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \
  3229. DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \
  3230. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
  3231. struct xfs_refcount_irec *i2, xfs_agblock_t agbno), \
  3232. TP_ARGS(cur, i1, i2, agbno))
  3233. /* triple-rcext tracepoint class */
  3234. DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
  3235. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1,
  3236. struct xfs_refcount_irec *i2, struct xfs_refcount_irec *i3),
  3237. TP_ARGS(cur, i1, i2, i3),
  3238. TP_STRUCT__entry(
  3239. __field(dev_t, dev)
  3240. __field(xfs_agnumber_t, agno)
  3241. __field(enum xfs_refc_domain, i1_domain)
  3242. __field(xfs_agblock_t, i1_startblock)
  3243. __field(xfs_extlen_t, i1_blockcount)
  3244. __field(xfs_nlink_t, i1_refcount)
  3245. __field(enum xfs_refc_domain, i2_domain)
  3246. __field(xfs_agblock_t, i2_startblock)
  3247. __field(xfs_extlen_t, i2_blockcount)
  3248. __field(xfs_nlink_t, i2_refcount)
  3249. __field(enum xfs_refc_domain, i3_domain)
  3250. __field(xfs_agblock_t, i3_startblock)
  3251. __field(xfs_extlen_t, i3_blockcount)
  3252. __field(xfs_nlink_t, i3_refcount)
  3253. ),
  3254. TP_fast_assign(
  3255. __entry->dev = cur->bc_mp->m_super->s_dev;
  3256. __entry->agno = cur->bc_ag.pag->pag_agno;
  3257. __entry->i1_domain = i1->rc_domain;
  3258. __entry->i1_startblock = i1->rc_startblock;
  3259. __entry->i1_blockcount = i1->rc_blockcount;
  3260. __entry->i1_refcount = i1->rc_refcount;
  3261. __entry->i2_domain = i2->rc_domain;
  3262. __entry->i2_startblock = i2->rc_startblock;
  3263. __entry->i2_blockcount = i2->rc_blockcount;
  3264. __entry->i2_refcount = i2->rc_refcount;
  3265. __entry->i3_domain = i3->rc_domain;
  3266. __entry->i3_startblock = i3->rc_startblock;
  3267. __entry->i3_blockcount = i3->rc_blockcount;
  3268. __entry->i3_refcount = i3->rc_refcount;
  3269. ),
  3270. TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  3271. "dom %s agbno 0x%x fsbcount 0x%x refcount %u -- "
  3272. "dom %s agbno 0x%x fsbcount 0x%x refcount %u",
  3273. MAJOR(__entry->dev), MINOR(__entry->dev),
  3274. __entry->agno,
  3275. __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS),
  3276. __entry->i1_startblock,
  3277. __entry->i1_blockcount,
  3278. __entry->i1_refcount,
  3279. __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS),
  3280. __entry->i2_startblock,
  3281. __entry->i2_blockcount,
  3282. __entry->i2_refcount,
  3283. __print_symbolic(__entry->i3_domain, XFS_REFC_DOMAIN_STRINGS),
  3284. __entry->i3_startblock,
  3285. __entry->i3_blockcount,
  3286. __entry->i3_refcount)
  3287. );
  3288. #define DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(name) \
  3289. DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \
  3290. TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \
  3291. struct xfs_refcount_irec *i2, struct xfs_refcount_irec *i3), \
  3292. TP_ARGS(cur, i1, i2, i3))
  3293. /* refcount btree tracepoints */
  3294. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get);
  3295. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update);
  3296. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_insert);
  3297. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_delete);
  3298. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_insert_error);
  3299. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_delete_error);
  3300. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_update_error);
  3301. /* refcount adjustment tracepoints */
  3302. DEFINE_REFCOUNT_EVENT(xfs_refcount_increase);
  3303. DEFINE_REFCOUNT_EVENT(xfs_refcount_decrease);
  3304. DEFINE_REFCOUNT_EVENT(xfs_refcount_cow_increase);
  3305. DEFINE_REFCOUNT_EVENT(xfs_refcount_cow_decrease);
  3306. DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(xfs_refcount_merge_center_extents);
  3307. DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_modify_extent);
  3308. DEFINE_REFCOUNT_EXTENT_AT_EVENT(xfs_refcount_split_extent);
  3309. DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_left_extent);
  3310. DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_right_extent);
  3311. DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_left_extent);
  3312. DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_right_extent);
  3313. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_adjust_error);
  3314. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_adjust_cow_error);
  3315. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_center_extents_error);
  3316. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_modify_extent_error);
  3317. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_split_extent_error);
  3318. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_left_extent_error);
  3319. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_right_extent_error);
  3320. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_left_extent_error);
  3321. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_right_extent_error);
  3322. /* reflink helpers */
  3323. DEFINE_REFCOUNT_EVENT(xfs_refcount_find_shared);
  3324. DEFINE_REFCOUNT_EVENT(xfs_refcount_find_shared_result);
  3325. DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_shared_error);
  3326. TRACE_DEFINE_ENUM(XFS_REFCOUNT_INCREASE);
  3327. TRACE_DEFINE_ENUM(XFS_REFCOUNT_DECREASE);
  3328. TRACE_DEFINE_ENUM(XFS_REFCOUNT_ALLOC_COW);
  3329. TRACE_DEFINE_ENUM(XFS_REFCOUNT_FREE_COW);
  3330. DECLARE_EVENT_CLASS(xfs_refcount_deferred_class,
  3331. TP_PROTO(struct xfs_mount *mp, struct xfs_refcount_intent *refc),
  3332. TP_ARGS(mp, refc),
  3333. TP_STRUCT__entry(
  3334. __field(dev_t, dev)
  3335. __field(xfs_agnumber_t, agno)
  3336. __field(int, op)
  3337. __field(xfs_agblock_t, agbno)
  3338. __field(xfs_extlen_t, len)
  3339. ),
  3340. TP_fast_assign(
  3341. __entry->dev = mp->m_super->s_dev;
  3342. __entry->agno = XFS_FSB_TO_AGNO(mp, refc->ri_startblock);
  3343. __entry->op = refc->ri_type;
  3344. __entry->agbno = XFS_FSB_TO_AGBNO(mp, refc->ri_startblock);
  3345. __entry->len = refc->ri_blockcount;
  3346. ),
  3347. TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x fsbcount 0x%x",
  3348. MAJOR(__entry->dev), MINOR(__entry->dev),
  3349. __print_symbolic(__entry->op, XFS_REFCOUNT_INTENT_STRINGS),
  3350. __entry->agno,
  3351. __entry->agbno,
  3352. __entry->len)
  3353. );
  3354. #define DEFINE_REFCOUNT_DEFERRED_EVENT(name) \
  3355. DEFINE_EVENT(xfs_refcount_deferred_class, name, \
  3356. TP_PROTO(struct xfs_mount *mp, struct xfs_refcount_intent *refc), \
  3357. TP_ARGS(mp, refc))
  3358. DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_defer);
  3359. DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_deferred);
  3360. DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_finish_one_leftover);
  3361. /* simple inode-based error/%ip tracepoint class */
  3362. DECLARE_EVENT_CLASS(xfs_inode_error_class,
  3363. TP_PROTO(struct xfs_inode *ip, int error, unsigned long caller_ip),
  3364. TP_ARGS(ip, error, caller_ip),
  3365. TP_STRUCT__entry(
  3366. __field(dev_t, dev)
  3367. __field(xfs_ino_t, ino)
  3368. __field(int, error)
  3369. __field(unsigned long, caller_ip)
  3370. ),
  3371. TP_fast_assign(
  3372. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3373. __entry->ino = ip->i_ino;
  3374. __entry->error = error;
  3375. __entry->caller_ip = caller_ip;
  3376. ),
  3377. TP_printk("dev %d:%d ino 0x%llx error %d caller %pS",
  3378. MAJOR(__entry->dev), MINOR(__entry->dev),
  3379. __entry->ino,
  3380. __entry->error,
  3381. (char *)__entry->caller_ip)
  3382. );
  3383. #define DEFINE_INODE_ERROR_EVENT(name) \
  3384. DEFINE_EVENT(xfs_inode_error_class, name, \
  3385. TP_PROTO(struct xfs_inode *ip, int error, \
  3386. unsigned long caller_ip), \
  3387. TP_ARGS(ip, error, caller_ip))
  3388. /* reflink tracepoint classes */
  3389. /* two-file io tracepoint class */
  3390. DECLARE_EVENT_CLASS(xfs_double_io_class,
  3391. TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len,
  3392. struct xfs_inode *dest, xfs_off_t doffset),
  3393. TP_ARGS(src, soffset, len, dest, doffset),
  3394. TP_STRUCT__entry(
  3395. __field(dev_t, dev)
  3396. __field(xfs_ino_t, src_ino)
  3397. __field(loff_t, src_isize)
  3398. __field(loff_t, src_disize)
  3399. __field(loff_t, src_offset)
  3400. __field(long long, len)
  3401. __field(xfs_ino_t, dest_ino)
  3402. __field(loff_t, dest_isize)
  3403. __field(loff_t, dest_disize)
  3404. __field(loff_t, dest_offset)
  3405. ),
  3406. TP_fast_assign(
  3407. __entry->dev = VFS_I(src)->i_sb->s_dev;
  3408. __entry->src_ino = src->i_ino;
  3409. __entry->src_isize = VFS_I(src)->i_size;
  3410. __entry->src_disize = src->i_disk_size;
  3411. __entry->src_offset = soffset;
  3412. __entry->len = len;
  3413. __entry->dest_ino = dest->i_ino;
  3414. __entry->dest_isize = VFS_I(dest)->i_size;
  3415. __entry->dest_disize = dest->i_disk_size;
  3416. __entry->dest_offset = doffset;
  3417. ),
  3418. TP_printk("dev %d:%d bytecount 0x%llx "
  3419. "ino 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx -> "
  3420. "ino 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx",
  3421. MAJOR(__entry->dev), MINOR(__entry->dev),
  3422. __entry->len,
  3423. __entry->src_ino,
  3424. __entry->src_isize,
  3425. __entry->src_disize,
  3426. __entry->src_offset,
  3427. __entry->dest_ino,
  3428. __entry->dest_isize,
  3429. __entry->dest_disize,
  3430. __entry->dest_offset)
  3431. )
  3432. #define DEFINE_DOUBLE_IO_EVENT(name) \
  3433. DEFINE_EVENT(xfs_double_io_class, name, \
  3434. TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len, \
  3435. struct xfs_inode *dest, xfs_off_t doffset), \
  3436. TP_ARGS(src, soffset, len, dest, doffset))
  3437. /* inode/irec events */
  3438. DECLARE_EVENT_CLASS(xfs_inode_irec_class,
  3439. TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec),
  3440. TP_ARGS(ip, irec),
  3441. TP_STRUCT__entry(
  3442. __field(dev_t, dev)
  3443. __field(xfs_ino_t, ino)
  3444. __field(xfs_fileoff_t, lblk)
  3445. __field(xfs_extlen_t, len)
  3446. __field(xfs_fsblock_t, pblk)
  3447. __field(int, state)
  3448. ),
  3449. TP_fast_assign(
  3450. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3451. __entry->ino = ip->i_ino;
  3452. __entry->lblk = irec->br_startoff;
  3453. __entry->len = irec->br_blockcount;
  3454. __entry->pblk = irec->br_startblock;
  3455. __entry->state = irec->br_state;
  3456. ),
  3457. TP_printk("dev %d:%d ino 0x%llx fileoff 0x%llx fsbcount 0x%x startblock 0x%llx st %d",
  3458. MAJOR(__entry->dev), MINOR(__entry->dev),
  3459. __entry->ino,
  3460. __entry->lblk,
  3461. __entry->len,
  3462. __entry->pblk,
  3463. __entry->state)
  3464. );
  3465. #define DEFINE_INODE_IREC_EVENT(name) \
  3466. DEFINE_EVENT(xfs_inode_irec_class, name, \
  3467. TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec), \
  3468. TP_ARGS(ip, irec))
  3469. /* inode iomap invalidation events */
  3470. DECLARE_EVENT_CLASS(xfs_wb_invalid_class,
  3471. TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap, unsigned int wpcseq, int whichfork),
  3472. TP_ARGS(ip, iomap, wpcseq, whichfork),
  3473. TP_STRUCT__entry(
  3474. __field(dev_t, dev)
  3475. __field(xfs_ino_t, ino)
  3476. __field(u64, addr)
  3477. __field(loff_t, pos)
  3478. __field(u64, len)
  3479. __field(u16, type)
  3480. __field(u16, flags)
  3481. __field(u32, wpcseq)
  3482. __field(u32, forkseq)
  3483. ),
  3484. TP_fast_assign(
  3485. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3486. __entry->ino = ip->i_ino;
  3487. __entry->addr = iomap->addr;
  3488. __entry->pos = iomap->offset;
  3489. __entry->len = iomap->length;
  3490. __entry->type = iomap->type;
  3491. __entry->flags = iomap->flags;
  3492. __entry->wpcseq = wpcseq;
  3493. __entry->forkseq = READ_ONCE(xfs_ifork_ptr(ip, whichfork)->if_seq);
  3494. ),
  3495. TP_printk("dev %d:%d ino 0x%llx pos 0x%llx addr 0x%llx bytecount 0x%llx type 0x%x flags 0x%x wpcseq 0x%x forkseq 0x%x",
  3496. MAJOR(__entry->dev), MINOR(__entry->dev),
  3497. __entry->ino,
  3498. __entry->pos,
  3499. __entry->addr,
  3500. __entry->len,
  3501. __entry->type,
  3502. __entry->flags,
  3503. __entry->wpcseq,
  3504. __entry->forkseq)
  3505. );
  3506. #define DEFINE_WB_INVALID_EVENT(name) \
  3507. DEFINE_EVENT(xfs_wb_invalid_class, name, \
  3508. TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap, unsigned int wpcseq, int whichfork), \
  3509. TP_ARGS(ip, iomap, wpcseq, whichfork))
  3510. DEFINE_WB_INVALID_EVENT(xfs_wb_cow_iomap_invalid);
  3511. DEFINE_WB_INVALID_EVENT(xfs_wb_data_iomap_invalid);
  3512. DECLARE_EVENT_CLASS(xfs_iomap_invalid_class,
  3513. TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap),
  3514. TP_ARGS(ip, iomap),
  3515. TP_STRUCT__entry(
  3516. __field(dev_t, dev)
  3517. __field(xfs_ino_t, ino)
  3518. __field(u64, addr)
  3519. __field(loff_t, pos)
  3520. __field(u64, len)
  3521. __field(u64, validity_cookie)
  3522. __field(u64, inodeseq)
  3523. __field(u16, type)
  3524. __field(u16, flags)
  3525. ),
  3526. TP_fast_assign(
  3527. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3528. __entry->ino = ip->i_ino;
  3529. __entry->addr = iomap->addr;
  3530. __entry->pos = iomap->offset;
  3531. __entry->len = iomap->length;
  3532. __entry->validity_cookie = iomap->validity_cookie;
  3533. __entry->type = iomap->type;
  3534. __entry->flags = iomap->flags;
  3535. __entry->inodeseq = xfs_iomap_inode_sequence(ip, iomap->flags);
  3536. ),
  3537. TP_printk("dev %d:%d ino 0x%llx pos 0x%llx addr 0x%llx bytecount 0x%llx type 0x%x flags 0x%x validity_cookie 0x%llx inodeseq 0x%llx",
  3538. MAJOR(__entry->dev), MINOR(__entry->dev),
  3539. __entry->ino,
  3540. __entry->pos,
  3541. __entry->addr,
  3542. __entry->len,
  3543. __entry->type,
  3544. __entry->flags,
  3545. __entry->validity_cookie,
  3546. __entry->inodeseq)
  3547. );
  3548. #define DEFINE_IOMAP_INVALID_EVENT(name) \
  3549. DEFINE_EVENT(xfs_iomap_invalid_class, name, \
  3550. TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap), \
  3551. TP_ARGS(ip, iomap))
  3552. DEFINE_IOMAP_INVALID_EVENT(xfs_iomap_invalid);
  3553. /* refcount/reflink tracepoint definitions */
  3554. /* reflink tracepoints */
  3555. DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag);
  3556. DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag);
  3557. DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size);
  3558. TRACE_EVENT(xfs_reflink_remap_blocks,
  3559. TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset,
  3560. xfs_filblks_t len, struct xfs_inode *dest,
  3561. xfs_fileoff_t doffset),
  3562. TP_ARGS(src, soffset, len, dest, doffset),
  3563. TP_STRUCT__entry(
  3564. __field(dev_t, dev)
  3565. __field(xfs_ino_t, src_ino)
  3566. __field(xfs_fileoff_t, src_lblk)
  3567. __field(xfs_filblks_t, len)
  3568. __field(xfs_ino_t, dest_ino)
  3569. __field(xfs_fileoff_t, dest_lblk)
  3570. ),
  3571. TP_fast_assign(
  3572. __entry->dev = VFS_I(src)->i_sb->s_dev;
  3573. __entry->src_ino = src->i_ino;
  3574. __entry->src_lblk = soffset;
  3575. __entry->len = len;
  3576. __entry->dest_ino = dest->i_ino;
  3577. __entry->dest_lblk = doffset;
  3578. ),
  3579. TP_printk("dev %d:%d fsbcount 0x%llx "
  3580. "ino 0x%llx fileoff 0x%llx -> ino 0x%llx fileoff 0x%llx",
  3581. MAJOR(__entry->dev), MINOR(__entry->dev),
  3582. __entry->len,
  3583. __entry->src_ino,
  3584. __entry->src_lblk,
  3585. __entry->dest_ino,
  3586. __entry->dest_lblk)
  3587. );
  3588. DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range);
  3589. DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error);
  3590. DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error);
  3591. DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error);
  3592. DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error);
  3593. DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error);
  3594. DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_src);
  3595. DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_dest);
  3596. /* dedupe tracepoints */
  3597. DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents);
  3598. DEFINE_INODE_ERROR_EVENT(xfs_reflink_compare_extents_error);
  3599. /* ioctl tracepoints */
  3600. TRACE_EVENT(xfs_ioctl_clone,
  3601. TP_PROTO(struct inode *src, struct inode *dest),
  3602. TP_ARGS(src, dest),
  3603. TP_STRUCT__entry(
  3604. __field(dev_t, dev)
  3605. __field(unsigned long, src_ino)
  3606. __field(loff_t, src_isize)
  3607. __field(unsigned long, dest_ino)
  3608. __field(loff_t, dest_isize)
  3609. ),
  3610. TP_fast_assign(
  3611. __entry->dev = src->i_sb->s_dev;
  3612. __entry->src_ino = src->i_ino;
  3613. __entry->src_isize = i_size_read(src);
  3614. __entry->dest_ino = dest->i_ino;
  3615. __entry->dest_isize = i_size_read(dest);
  3616. ),
  3617. TP_printk("dev %d:%d ino 0x%lx isize 0x%llx -> ino 0x%lx isize 0x%llx",
  3618. MAJOR(__entry->dev), MINOR(__entry->dev),
  3619. __entry->src_ino,
  3620. __entry->src_isize,
  3621. __entry->dest_ino,
  3622. __entry->dest_isize)
  3623. );
  3624. /* unshare tracepoints */
  3625. DEFINE_SIMPLE_IO_EVENT(xfs_reflink_unshare);
  3626. DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error);
  3627. /* copy on write */
  3628. DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared);
  3629. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
  3630. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
  3631. DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
  3632. DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
  3633. DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
  3634. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_from);
  3635. DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_to);
  3636. DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
  3637. DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
  3638. DEFINE_INODE_IREC_EVENT(xfs_reflink_cancel_cow);
  3639. /* rmap swapext tracepoints */
  3640. DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap);
  3641. DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap_piece);
  3642. DEFINE_INODE_ERROR_EVENT(xfs_swap_extent_rmap_error);
  3643. /* fsmap traces */
  3644. DECLARE_EVENT_CLASS(xfs_fsmap_class,
  3645. TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno,
  3646. const struct xfs_rmap_irec *rmap),
  3647. TP_ARGS(mp, keydev, agno, rmap),
  3648. TP_STRUCT__entry(
  3649. __field(dev_t, dev)
  3650. __field(dev_t, keydev)
  3651. __field(xfs_agnumber_t, agno)
  3652. __field(xfs_fsblock_t, bno)
  3653. __field(xfs_filblks_t, len)
  3654. __field(uint64_t, owner)
  3655. __field(uint64_t, offset)
  3656. __field(unsigned int, flags)
  3657. ),
  3658. TP_fast_assign(
  3659. __entry->dev = mp->m_super->s_dev;
  3660. __entry->keydev = new_decode_dev(keydev);
  3661. __entry->agno = agno;
  3662. __entry->bno = rmap->rm_startblock;
  3663. __entry->len = rmap->rm_blockcount;
  3664. __entry->owner = rmap->rm_owner;
  3665. __entry->offset = rmap->rm_offset;
  3666. __entry->flags = rmap->rm_flags;
  3667. ),
  3668. TP_printk("dev %d:%d keydev %d:%d agno 0x%x startblock 0x%llx fsbcount 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x",
  3669. MAJOR(__entry->dev), MINOR(__entry->dev),
  3670. MAJOR(__entry->keydev), MINOR(__entry->keydev),
  3671. __entry->agno,
  3672. __entry->bno,
  3673. __entry->len,
  3674. __entry->owner,
  3675. __entry->offset,
  3676. __entry->flags)
  3677. )
  3678. #define DEFINE_FSMAP_EVENT(name) \
  3679. DEFINE_EVENT(xfs_fsmap_class, name, \
  3680. TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, \
  3681. const struct xfs_rmap_irec *rmap), \
  3682. TP_ARGS(mp, keydev, agno, rmap))
  3683. DEFINE_FSMAP_EVENT(xfs_fsmap_low_key);
  3684. DEFINE_FSMAP_EVENT(xfs_fsmap_high_key);
  3685. DEFINE_FSMAP_EVENT(xfs_fsmap_mapping);
  3686. DECLARE_EVENT_CLASS(xfs_fsmap_linear_class,
  3687. TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno),
  3688. TP_ARGS(mp, keydev, bno),
  3689. TP_STRUCT__entry(
  3690. __field(dev_t, dev)
  3691. __field(dev_t, keydev)
  3692. __field(xfs_fsblock_t, bno)
  3693. ),
  3694. TP_fast_assign(
  3695. __entry->dev = mp->m_super->s_dev;
  3696. __entry->keydev = new_decode_dev(keydev);
  3697. __entry->bno = bno;
  3698. ),
  3699. TP_printk("dev %d:%d keydev %d:%d bno 0x%llx",
  3700. MAJOR(__entry->dev), MINOR(__entry->dev),
  3701. MAJOR(__entry->keydev), MINOR(__entry->keydev),
  3702. __entry->bno)
  3703. )
  3704. #define DEFINE_FSMAP_LINEAR_EVENT(name) \
  3705. DEFINE_EVENT(xfs_fsmap_linear_class, name, \
  3706. TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno), \
  3707. TP_ARGS(mp, keydev, bno))
  3708. DEFINE_FSMAP_LINEAR_EVENT(xfs_fsmap_low_key_linear);
  3709. DEFINE_FSMAP_LINEAR_EVENT(xfs_fsmap_high_key_linear);
  3710. DECLARE_EVENT_CLASS(xfs_getfsmap_class,
  3711. TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap),
  3712. TP_ARGS(mp, fsmap),
  3713. TP_STRUCT__entry(
  3714. __field(dev_t, dev)
  3715. __field(dev_t, keydev)
  3716. __field(xfs_daddr_t, block)
  3717. __field(xfs_daddr_t, len)
  3718. __field(uint64_t, owner)
  3719. __field(uint64_t, offset)
  3720. __field(uint64_t, flags)
  3721. ),
  3722. TP_fast_assign(
  3723. __entry->dev = mp->m_super->s_dev;
  3724. __entry->keydev = new_decode_dev(fsmap->fmr_device);
  3725. __entry->block = fsmap->fmr_physical;
  3726. __entry->len = fsmap->fmr_length;
  3727. __entry->owner = fsmap->fmr_owner;
  3728. __entry->offset = fsmap->fmr_offset;
  3729. __entry->flags = fsmap->fmr_flags;
  3730. ),
  3731. TP_printk("dev %d:%d keydev %d:%d daddr 0x%llx bbcount 0x%llx owner 0x%llx fileoff_daddr 0x%llx flags 0x%llx",
  3732. MAJOR(__entry->dev), MINOR(__entry->dev),
  3733. MAJOR(__entry->keydev), MINOR(__entry->keydev),
  3734. __entry->block,
  3735. __entry->len,
  3736. __entry->owner,
  3737. __entry->offset,
  3738. __entry->flags)
  3739. )
  3740. #define DEFINE_GETFSMAP_EVENT(name) \
  3741. DEFINE_EVENT(xfs_getfsmap_class, name, \
  3742. TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap), \
  3743. TP_ARGS(mp, fsmap))
  3744. DEFINE_GETFSMAP_EVENT(xfs_getfsmap_low_key);
  3745. DEFINE_GETFSMAP_EVENT(xfs_getfsmap_high_key);
  3746. DEFINE_GETFSMAP_EVENT(xfs_getfsmap_mapping);
  3747. DECLARE_EVENT_CLASS(xfs_trans_resv_class,
  3748. TP_PROTO(struct xfs_mount *mp, unsigned int type,
  3749. struct xfs_trans_res *res),
  3750. TP_ARGS(mp, type, res),
  3751. TP_STRUCT__entry(
  3752. __field(dev_t, dev)
  3753. __field(int, type)
  3754. __field(uint, logres)
  3755. __field(int, logcount)
  3756. __field(int, logflags)
  3757. ),
  3758. TP_fast_assign(
  3759. __entry->dev = mp->m_super->s_dev;
  3760. __entry->type = type;
  3761. __entry->logres = res->tr_logres;
  3762. __entry->logcount = res->tr_logcount;
  3763. __entry->logflags = res->tr_logflags;
  3764. ),
  3765. TP_printk("dev %d:%d type %d logres %u logcount %d flags 0x%x",
  3766. MAJOR(__entry->dev), MINOR(__entry->dev),
  3767. __entry->type,
  3768. __entry->logres,
  3769. __entry->logcount,
  3770. __entry->logflags)
  3771. )
  3772. #define DEFINE_TRANS_RESV_EVENT(name) \
  3773. DEFINE_EVENT(xfs_trans_resv_class, name, \
  3774. TP_PROTO(struct xfs_mount *mp, unsigned int type, \
  3775. struct xfs_trans_res *res), \
  3776. TP_ARGS(mp, type, res))
  3777. DEFINE_TRANS_RESV_EVENT(xfs_trans_resv_calc);
  3778. DEFINE_TRANS_RESV_EVENT(xfs_trans_resv_calc_minlogsize);
  3779. TRACE_EVENT(xfs_log_get_max_trans_res,
  3780. TP_PROTO(struct xfs_mount *mp, const struct xfs_trans_res *res),
  3781. TP_ARGS(mp, res),
  3782. TP_STRUCT__entry(
  3783. __field(dev_t, dev)
  3784. __field(uint, logres)
  3785. __field(int, logcount)
  3786. ),
  3787. TP_fast_assign(
  3788. __entry->dev = mp->m_super->s_dev;
  3789. __entry->logres = res->tr_logres;
  3790. __entry->logcount = res->tr_logcount;
  3791. ),
  3792. TP_printk("dev %d:%d logres %u logcount %d",
  3793. MAJOR(__entry->dev), MINOR(__entry->dev),
  3794. __entry->logres,
  3795. __entry->logcount)
  3796. );
  3797. DECLARE_EVENT_CLASS(xfs_trans_class,
  3798. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip),
  3799. TP_ARGS(tp, caller_ip),
  3800. TP_STRUCT__entry(
  3801. __field(dev_t, dev)
  3802. __field(uint32_t, tid)
  3803. __field(uint32_t, flags)
  3804. __field(unsigned long, caller_ip)
  3805. ),
  3806. TP_fast_assign(
  3807. __entry->dev = tp->t_mountp->m_super->s_dev;
  3808. __entry->tid = 0;
  3809. if (tp->t_ticket)
  3810. __entry->tid = tp->t_ticket->t_tid;
  3811. __entry->flags = tp->t_flags;
  3812. __entry->caller_ip = caller_ip;
  3813. ),
  3814. TP_printk("dev %d:%d trans %x flags 0x%x caller %pS",
  3815. MAJOR(__entry->dev), MINOR(__entry->dev),
  3816. __entry->tid,
  3817. __entry->flags,
  3818. (char *)__entry->caller_ip)
  3819. )
  3820. #define DEFINE_TRANS_EVENT(name) \
  3821. DEFINE_EVENT(xfs_trans_class, name, \
  3822. TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \
  3823. TP_ARGS(tp, caller_ip))
  3824. DEFINE_TRANS_EVENT(xfs_trans_alloc);
  3825. DEFINE_TRANS_EVENT(xfs_trans_cancel);
  3826. DEFINE_TRANS_EVENT(xfs_trans_commit);
  3827. DEFINE_TRANS_EVENT(xfs_trans_dup);
  3828. DEFINE_TRANS_EVENT(xfs_trans_free);
  3829. DEFINE_TRANS_EVENT(xfs_trans_roll);
  3830. DEFINE_TRANS_EVENT(xfs_trans_add_item);
  3831. DEFINE_TRANS_EVENT(xfs_trans_commit_items);
  3832. DEFINE_TRANS_EVENT(xfs_trans_free_items);
  3833. TRACE_EVENT(xfs_iunlink_update_bucket,
  3834. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int bucket,
  3835. xfs_agino_t old_ptr, xfs_agino_t new_ptr),
  3836. TP_ARGS(mp, agno, bucket, old_ptr, new_ptr),
  3837. TP_STRUCT__entry(
  3838. __field(dev_t, dev)
  3839. __field(xfs_agnumber_t, agno)
  3840. __field(unsigned int, bucket)
  3841. __field(xfs_agino_t, old_ptr)
  3842. __field(xfs_agino_t, new_ptr)
  3843. ),
  3844. TP_fast_assign(
  3845. __entry->dev = mp->m_super->s_dev;
  3846. __entry->agno = agno;
  3847. __entry->bucket = bucket;
  3848. __entry->old_ptr = old_ptr;
  3849. __entry->new_ptr = new_ptr;
  3850. ),
  3851. TP_printk("dev %d:%d agno 0x%x bucket %u old 0x%x new 0x%x",
  3852. MAJOR(__entry->dev), MINOR(__entry->dev),
  3853. __entry->agno,
  3854. __entry->bucket,
  3855. __entry->old_ptr,
  3856. __entry->new_ptr)
  3857. );
  3858. TRACE_EVENT(xfs_iunlink_update_dinode,
  3859. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino,
  3860. xfs_agino_t old_ptr, xfs_agino_t new_ptr),
  3861. TP_ARGS(mp, agno, agino, old_ptr, new_ptr),
  3862. TP_STRUCT__entry(
  3863. __field(dev_t, dev)
  3864. __field(xfs_agnumber_t, agno)
  3865. __field(xfs_agino_t, agino)
  3866. __field(xfs_agino_t, old_ptr)
  3867. __field(xfs_agino_t, new_ptr)
  3868. ),
  3869. TP_fast_assign(
  3870. __entry->dev = mp->m_super->s_dev;
  3871. __entry->agno = agno;
  3872. __entry->agino = agino;
  3873. __entry->old_ptr = old_ptr;
  3874. __entry->new_ptr = new_ptr;
  3875. ),
  3876. TP_printk("dev %d:%d agno 0x%x agino 0x%x old 0x%x new 0x%x",
  3877. MAJOR(__entry->dev), MINOR(__entry->dev),
  3878. __entry->agno,
  3879. __entry->agino,
  3880. __entry->old_ptr,
  3881. __entry->new_ptr)
  3882. );
  3883. TRACE_EVENT(xfs_iunlink_reload_next,
  3884. TP_PROTO(struct xfs_inode *ip),
  3885. TP_ARGS(ip),
  3886. TP_STRUCT__entry(
  3887. __field(dev_t, dev)
  3888. __field(xfs_agnumber_t, agno)
  3889. __field(xfs_agino_t, agino)
  3890. __field(xfs_agino_t, prev_agino)
  3891. __field(xfs_agino_t, next_agino)
  3892. ),
  3893. TP_fast_assign(
  3894. __entry->dev = ip->i_mount->m_super->s_dev;
  3895. __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
  3896. __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
  3897. __entry->prev_agino = ip->i_prev_unlinked;
  3898. __entry->next_agino = ip->i_next_unlinked;
  3899. ),
  3900. TP_printk("dev %d:%d agno 0x%x agino 0x%x prev_unlinked 0x%x next_unlinked 0x%x",
  3901. MAJOR(__entry->dev), MINOR(__entry->dev),
  3902. __entry->agno,
  3903. __entry->agino,
  3904. __entry->prev_agino,
  3905. __entry->next_agino)
  3906. );
  3907. TRACE_EVENT(xfs_inode_reload_unlinked_bucket,
  3908. TP_PROTO(struct xfs_inode *ip),
  3909. TP_ARGS(ip),
  3910. TP_STRUCT__entry(
  3911. __field(dev_t, dev)
  3912. __field(xfs_agnumber_t, agno)
  3913. __field(xfs_agino_t, agino)
  3914. ),
  3915. TP_fast_assign(
  3916. __entry->dev = ip->i_mount->m_super->s_dev;
  3917. __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
  3918. __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
  3919. ),
  3920. TP_printk("dev %d:%d agno 0x%x agino 0x%x bucket %u",
  3921. MAJOR(__entry->dev), MINOR(__entry->dev),
  3922. __entry->agno,
  3923. __entry->agino,
  3924. __entry->agino % XFS_AGI_UNLINKED_BUCKETS)
  3925. );
  3926. DECLARE_EVENT_CLASS(xfs_ag_inode_class,
  3927. TP_PROTO(struct xfs_inode *ip),
  3928. TP_ARGS(ip),
  3929. TP_STRUCT__entry(
  3930. __field(dev_t, dev)
  3931. __field(xfs_agnumber_t, agno)
  3932. __field(xfs_agino_t, agino)
  3933. ),
  3934. TP_fast_assign(
  3935. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  3936. __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
  3937. __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
  3938. ),
  3939. TP_printk("dev %d:%d agno 0x%x agino 0x%x",
  3940. MAJOR(__entry->dev), MINOR(__entry->dev),
  3941. __entry->agno, __entry->agino)
  3942. )
  3943. #define DEFINE_AGINODE_EVENT(name) \
  3944. DEFINE_EVENT(xfs_ag_inode_class, name, \
  3945. TP_PROTO(struct xfs_inode *ip), \
  3946. TP_ARGS(ip))
  3947. DEFINE_AGINODE_EVENT(xfs_iunlink);
  3948. DEFINE_AGINODE_EVENT(xfs_iunlink_remove);
  3949. DECLARE_EVENT_CLASS(xfs_fs_corrupt_class,
  3950. TP_PROTO(struct xfs_mount *mp, unsigned int flags),
  3951. TP_ARGS(mp, flags),
  3952. TP_STRUCT__entry(
  3953. __field(dev_t, dev)
  3954. __field(unsigned int, flags)
  3955. ),
  3956. TP_fast_assign(
  3957. __entry->dev = mp->m_super->s_dev;
  3958. __entry->flags = flags;
  3959. ),
  3960. TP_printk("dev %d:%d flags 0x%x",
  3961. MAJOR(__entry->dev), MINOR(__entry->dev),
  3962. __entry->flags)
  3963. );
  3964. #define DEFINE_FS_CORRUPT_EVENT(name) \
  3965. DEFINE_EVENT(xfs_fs_corrupt_class, name, \
  3966. TP_PROTO(struct xfs_mount *mp, unsigned int flags), \
  3967. TP_ARGS(mp, flags))
  3968. DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_sick);
  3969. DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_corrupt);
  3970. DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_healthy);
  3971. DEFINE_FS_CORRUPT_EVENT(xfs_fs_unfixed_corruption);
  3972. DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_sick);
  3973. DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_corrupt);
  3974. DEFINE_FS_CORRUPT_EVENT(xfs_rt_mark_healthy);
  3975. DEFINE_FS_CORRUPT_EVENT(xfs_rt_unfixed_corruption);
  3976. DECLARE_EVENT_CLASS(xfs_ag_corrupt_class,
  3977. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int flags),
  3978. TP_ARGS(mp, agno, flags),
  3979. TP_STRUCT__entry(
  3980. __field(dev_t, dev)
  3981. __field(xfs_agnumber_t, agno)
  3982. __field(unsigned int, flags)
  3983. ),
  3984. TP_fast_assign(
  3985. __entry->dev = mp->m_super->s_dev;
  3986. __entry->agno = agno;
  3987. __entry->flags = flags;
  3988. ),
  3989. TP_printk("dev %d:%d agno 0x%x flags 0x%x",
  3990. MAJOR(__entry->dev), MINOR(__entry->dev),
  3991. __entry->agno, __entry->flags)
  3992. );
  3993. #define DEFINE_AG_CORRUPT_EVENT(name) \
  3994. DEFINE_EVENT(xfs_ag_corrupt_class, name, \
  3995. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
  3996. unsigned int flags), \
  3997. TP_ARGS(mp, agno, flags))
  3998. DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_sick);
  3999. DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_corrupt);
  4000. DEFINE_AG_CORRUPT_EVENT(xfs_ag_mark_healthy);
  4001. DEFINE_AG_CORRUPT_EVENT(xfs_ag_unfixed_corruption);
  4002. DECLARE_EVENT_CLASS(xfs_inode_corrupt_class,
  4003. TP_PROTO(struct xfs_inode *ip, unsigned int flags),
  4004. TP_ARGS(ip, flags),
  4005. TP_STRUCT__entry(
  4006. __field(dev_t, dev)
  4007. __field(xfs_ino_t, ino)
  4008. __field(unsigned int, flags)
  4009. ),
  4010. TP_fast_assign(
  4011. __entry->dev = ip->i_mount->m_super->s_dev;
  4012. __entry->ino = ip->i_ino;
  4013. __entry->flags = flags;
  4014. ),
  4015. TP_printk("dev %d:%d ino 0x%llx flags 0x%x",
  4016. MAJOR(__entry->dev), MINOR(__entry->dev),
  4017. __entry->ino, __entry->flags)
  4018. );
  4019. #define DEFINE_INODE_CORRUPT_EVENT(name) \
  4020. DEFINE_EVENT(xfs_inode_corrupt_class, name, \
  4021. TP_PROTO(struct xfs_inode *ip, unsigned int flags), \
  4022. TP_ARGS(ip, flags))
  4023. DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_sick);
  4024. DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_corrupt);
  4025. DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_healthy);
  4026. DEFINE_INODE_CORRUPT_EVENT(xfs_inode_unfixed_corruption);
  4027. TRACE_EVENT(xfs_iwalk_ag,
  4028. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  4029. xfs_agino_t startino),
  4030. TP_ARGS(mp, agno, startino),
  4031. TP_STRUCT__entry(
  4032. __field(dev_t, dev)
  4033. __field(xfs_agnumber_t, agno)
  4034. __field(xfs_agino_t, startino)
  4035. ),
  4036. TP_fast_assign(
  4037. __entry->dev = mp->m_super->s_dev;
  4038. __entry->agno = agno;
  4039. __entry->startino = startino;
  4040. ),
  4041. TP_printk("dev %d:%d agno 0x%x startino 0x%x",
  4042. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno,
  4043. __entry->startino)
  4044. )
  4045. TRACE_EVENT(xfs_iwalk_ag_rec,
  4046. TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
  4047. struct xfs_inobt_rec_incore *irec),
  4048. TP_ARGS(mp, agno, irec),
  4049. TP_STRUCT__entry(
  4050. __field(dev_t, dev)
  4051. __field(xfs_agnumber_t, agno)
  4052. __field(xfs_agino_t, startino)
  4053. __field(uint64_t, freemask)
  4054. ),
  4055. TP_fast_assign(
  4056. __entry->dev = mp->m_super->s_dev;
  4057. __entry->agno = agno;
  4058. __entry->startino = irec->ir_startino;
  4059. __entry->freemask = irec->ir_free;
  4060. ),
  4061. TP_printk("dev %d:%d agno 0x%x startino 0x%x freemask 0x%llx",
  4062. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno,
  4063. __entry->startino, __entry->freemask)
  4064. )
  4065. TRACE_EVENT(xfs_pwork_init,
  4066. TP_PROTO(struct xfs_mount *mp, unsigned int nr_threads, pid_t pid),
  4067. TP_ARGS(mp, nr_threads, pid),
  4068. TP_STRUCT__entry(
  4069. __field(dev_t, dev)
  4070. __field(unsigned int, nr_threads)
  4071. __field(pid_t, pid)
  4072. ),
  4073. TP_fast_assign(
  4074. __entry->dev = mp->m_super->s_dev;
  4075. __entry->nr_threads = nr_threads;
  4076. __entry->pid = pid;
  4077. ),
  4078. TP_printk("dev %d:%d nr_threads %u pid %u",
  4079. MAJOR(__entry->dev), MINOR(__entry->dev),
  4080. __entry->nr_threads, __entry->pid)
  4081. )
  4082. TRACE_EVENT(xfs_check_new_dalign,
  4083. TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
  4084. TP_ARGS(mp, new_dalign, calc_rootino),
  4085. TP_STRUCT__entry(
  4086. __field(dev_t, dev)
  4087. __field(int, new_dalign)
  4088. __field(xfs_ino_t, sb_rootino)
  4089. __field(xfs_ino_t, calc_rootino)
  4090. ),
  4091. TP_fast_assign(
  4092. __entry->dev = mp->m_super->s_dev;
  4093. __entry->new_dalign = new_dalign;
  4094. __entry->sb_rootino = mp->m_sb.sb_rootino;
  4095. __entry->calc_rootino = calc_rootino;
  4096. ),
  4097. TP_printk("dev %d:%d new_dalign %d sb_rootino 0x%llx calc_rootino 0x%llx",
  4098. MAJOR(__entry->dev), MINOR(__entry->dev),
  4099. __entry->new_dalign, __entry->sb_rootino,
  4100. __entry->calc_rootino)
  4101. )
  4102. TRACE_EVENT(xfs_btree_commit_afakeroot,
  4103. TP_PROTO(struct xfs_btree_cur *cur),
  4104. TP_ARGS(cur),
  4105. TP_STRUCT__entry(
  4106. __field(dev_t, dev)
  4107. __string(name, cur->bc_ops->name)
  4108. __field(xfs_agnumber_t, agno)
  4109. __field(xfs_agblock_t, agbno)
  4110. __field(unsigned int, levels)
  4111. __field(unsigned int, blocks)
  4112. ),
  4113. TP_fast_assign(
  4114. __entry->dev = cur->bc_mp->m_super->s_dev;
  4115. __assign_str(name);
  4116. __entry->agno = cur->bc_ag.pag->pag_agno;
  4117. __entry->agbno = cur->bc_ag.afake->af_root;
  4118. __entry->levels = cur->bc_ag.afake->af_levels;
  4119. __entry->blocks = cur->bc_ag.afake->af_blocks;
  4120. ),
  4121. TP_printk("dev %d:%d %sbt agno 0x%x levels %u blocks %u root %u",
  4122. MAJOR(__entry->dev), MINOR(__entry->dev),
  4123. __get_str(name),
  4124. __entry->agno,
  4125. __entry->levels,
  4126. __entry->blocks,
  4127. __entry->agbno)
  4128. )
  4129. TRACE_EVENT(xfs_btree_commit_ifakeroot,
  4130. TP_PROTO(struct xfs_btree_cur *cur),
  4131. TP_ARGS(cur),
  4132. TP_STRUCT__entry(
  4133. __field(dev_t, dev)
  4134. __string(name, cur->bc_ops->name)
  4135. __field(xfs_agnumber_t, agno)
  4136. __field(xfs_agino_t, agino)
  4137. __field(unsigned int, levels)
  4138. __field(unsigned int, blocks)
  4139. __field(int, whichfork)
  4140. ),
  4141. TP_fast_assign(
  4142. __entry->dev = cur->bc_mp->m_super->s_dev;
  4143. __assign_str(name);
  4144. __entry->agno = XFS_INO_TO_AGNO(cur->bc_mp,
  4145. cur->bc_ino.ip->i_ino);
  4146. __entry->agino = XFS_INO_TO_AGINO(cur->bc_mp,
  4147. cur->bc_ino.ip->i_ino);
  4148. __entry->levels = cur->bc_ino.ifake->if_levels;
  4149. __entry->blocks = cur->bc_ino.ifake->if_blocks;
  4150. __entry->whichfork = cur->bc_ino.whichfork;
  4151. ),
  4152. TP_printk("dev %d:%d %sbt agno 0x%x agino 0x%x whichfork %s levels %u blocks %u",
  4153. MAJOR(__entry->dev), MINOR(__entry->dev),
  4154. __get_str(name),
  4155. __entry->agno,
  4156. __entry->agino,
  4157. __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS),
  4158. __entry->levels,
  4159. __entry->blocks)
  4160. )
  4161. TRACE_EVENT(xfs_btree_bload_level_geometry,
  4162. TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
  4163. uint64_t nr_this_level, unsigned int nr_per_block,
  4164. unsigned int desired_npb, uint64_t blocks,
  4165. uint64_t blocks_with_extra),
  4166. TP_ARGS(cur, level, nr_this_level, nr_per_block, desired_npb, blocks,
  4167. blocks_with_extra),
  4168. TP_STRUCT__entry(
  4169. __field(dev_t, dev)
  4170. __string(name, cur->bc_ops->name)
  4171. __field(unsigned int, level)
  4172. __field(unsigned int, nlevels)
  4173. __field(uint64_t, nr_this_level)
  4174. __field(unsigned int, nr_per_block)
  4175. __field(unsigned int, desired_npb)
  4176. __field(unsigned long long, blocks)
  4177. __field(unsigned long long, blocks_with_extra)
  4178. ),
  4179. TP_fast_assign(
  4180. __entry->dev = cur->bc_mp->m_super->s_dev;
  4181. __assign_str(name);
  4182. __entry->level = level;
  4183. __entry->nlevels = cur->bc_nlevels;
  4184. __entry->nr_this_level = nr_this_level;
  4185. __entry->nr_per_block = nr_per_block;
  4186. __entry->desired_npb = desired_npb;
  4187. __entry->blocks = blocks;
  4188. __entry->blocks_with_extra = blocks_with_extra;
  4189. ),
  4190. TP_printk("dev %d:%d %sbt level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu",
  4191. MAJOR(__entry->dev), MINOR(__entry->dev),
  4192. __get_str(name),
  4193. __entry->level,
  4194. __entry->nlevels,
  4195. __entry->nr_this_level,
  4196. __entry->nr_per_block,
  4197. __entry->desired_npb,
  4198. __entry->blocks,
  4199. __entry->blocks_with_extra)
  4200. )
  4201. TRACE_EVENT(xfs_btree_bload_block,
  4202. TP_PROTO(struct xfs_btree_cur *cur, unsigned int level,
  4203. uint64_t block_idx, uint64_t nr_blocks,
  4204. union xfs_btree_ptr *ptr, unsigned int nr_records),
  4205. TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records),
  4206. TP_STRUCT__entry(
  4207. __field(dev_t, dev)
  4208. __string(name, cur->bc_ops->name)
  4209. __field(unsigned int, level)
  4210. __field(unsigned long long, block_idx)
  4211. __field(unsigned long long, nr_blocks)
  4212. __field(xfs_agnumber_t, agno)
  4213. __field(xfs_agblock_t, agbno)
  4214. __field(unsigned int, nr_records)
  4215. ),
  4216. TP_fast_assign(
  4217. __entry->dev = cur->bc_mp->m_super->s_dev;
  4218. __assign_str(name);
  4219. __entry->level = level;
  4220. __entry->block_idx = block_idx;
  4221. __entry->nr_blocks = nr_blocks;
  4222. if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
  4223. xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
  4224. __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
  4225. __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb);
  4226. } else {
  4227. __entry->agno = cur->bc_ag.pag->pag_agno;
  4228. __entry->agbno = be32_to_cpu(ptr->s);
  4229. }
  4230. __entry->nr_records = nr_records;
  4231. ),
  4232. TP_printk("dev %d:%d %sbt level %u block %llu/%llu agno 0x%x agbno 0x%x recs %u",
  4233. MAJOR(__entry->dev), MINOR(__entry->dev),
  4234. __get_str(name),
  4235. __entry->level,
  4236. __entry->block_idx,
  4237. __entry->nr_blocks,
  4238. __entry->agno,
  4239. __entry->agbno,
  4240. __entry->nr_records)
  4241. )
  4242. DECLARE_EVENT_CLASS(xfs_timestamp_range_class,
  4243. TP_PROTO(struct xfs_mount *mp, time64_t min, time64_t max),
  4244. TP_ARGS(mp, min, max),
  4245. TP_STRUCT__entry(
  4246. __field(dev_t, dev)
  4247. __field(long long, min)
  4248. __field(long long, max)
  4249. ),
  4250. TP_fast_assign(
  4251. __entry->dev = mp->m_super->s_dev;
  4252. __entry->min = min;
  4253. __entry->max = max;
  4254. ),
  4255. TP_printk("dev %d:%d min %lld max %lld",
  4256. MAJOR(__entry->dev), MINOR(__entry->dev),
  4257. __entry->min,
  4258. __entry->max)
  4259. )
  4260. #define DEFINE_TIMESTAMP_RANGE_EVENT(name) \
  4261. DEFINE_EVENT(xfs_timestamp_range_class, name, \
  4262. TP_PROTO(struct xfs_mount *mp, long long min, long long max), \
  4263. TP_ARGS(mp, min, max))
  4264. DEFINE_TIMESTAMP_RANGE_EVENT(xfs_inode_timestamp_range);
  4265. DEFINE_TIMESTAMP_RANGE_EVENT(xfs_quota_expiry_range);
  4266. DECLARE_EVENT_CLASS(xfs_icwalk_class,
  4267. TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw,
  4268. unsigned long caller_ip),
  4269. TP_ARGS(mp, icw, caller_ip),
  4270. TP_STRUCT__entry(
  4271. __field(dev_t, dev)
  4272. __field(__u32, flags)
  4273. __field(uint32_t, uid)
  4274. __field(uint32_t, gid)
  4275. __field(prid_t, prid)
  4276. __field(__u64, min_file_size)
  4277. __field(long, scan_limit)
  4278. __field(unsigned long, caller_ip)
  4279. ),
  4280. TP_fast_assign(
  4281. __entry->dev = mp->m_super->s_dev;
  4282. __entry->flags = icw ? icw->icw_flags : 0;
  4283. __entry->uid = icw ? from_kuid(mp->m_super->s_user_ns,
  4284. icw->icw_uid) : 0;
  4285. __entry->gid = icw ? from_kgid(mp->m_super->s_user_ns,
  4286. icw->icw_gid) : 0;
  4287. __entry->prid = icw ? icw->icw_prid : 0;
  4288. __entry->min_file_size = icw ? icw->icw_min_file_size : 0;
  4289. __entry->scan_limit = icw ? icw->icw_scan_limit : 0;
  4290. __entry->caller_ip = caller_ip;
  4291. ),
  4292. TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu scan_limit %ld caller %pS",
  4293. MAJOR(__entry->dev), MINOR(__entry->dev),
  4294. __entry->flags,
  4295. __entry->uid,
  4296. __entry->gid,
  4297. __entry->prid,
  4298. __entry->min_file_size,
  4299. __entry->scan_limit,
  4300. (char *)__entry->caller_ip)
  4301. );
  4302. #define DEFINE_ICWALK_EVENT(name) \
  4303. DEFINE_EVENT(xfs_icwalk_class, name, \
  4304. TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw, \
  4305. unsigned long caller_ip), \
  4306. TP_ARGS(mp, icw, caller_ip))
  4307. DEFINE_ICWALK_EVENT(xfs_ioc_free_eofblocks);
  4308. DEFINE_ICWALK_EVENT(xfs_blockgc_free_space);
  4309. TRACE_DEFINE_ENUM(XLOG_STATE_ACTIVE);
  4310. TRACE_DEFINE_ENUM(XLOG_STATE_WANT_SYNC);
  4311. TRACE_DEFINE_ENUM(XLOG_STATE_SYNCING);
  4312. TRACE_DEFINE_ENUM(XLOG_STATE_DONE_SYNC);
  4313. TRACE_DEFINE_ENUM(XLOG_STATE_CALLBACK);
  4314. TRACE_DEFINE_ENUM(XLOG_STATE_DIRTY);
  4315. DECLARE_EVENT_CLASS(xlog_iclog_class,
  4316. TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip),
  4317. TP_ARGS(iclog, caller_ip),
  4318. TP_STRUCT__entry(
  4319. __field(dev_t, dev)
  4320. __field(uint32_t, state)
  4321. __field(int32_t, refcount)
  4322. __field(uint32_t, offset)
  4323. __field(uint32_t, flags)
  4324. __field(unsigned long long, lsn)
  4325. __field(unsigned long, caller_ip)
  4326. ),
  4327. TP_fast_assign(
  4328. __entry->dev = iclog->ic_log->l_mp->m_super->s_dev;
  4329. __entry->state = iclog->ic_state;
  4330. __entry->refcount = atomic_read(&iclog->ic_refcnt);
  4331. __entry->offset = iclog->ic_offset;
  4332. __entry->flags = iclog->ic_flags;
  4333. __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn);
  4334. __entry->caller_ip = caller_ip;
  4335. ),
  4336. TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS",
  4337. MAJOR(__entry->dev), MINOR(__entry->dev),
  4338. __print_symbolic(__entry->state, XLOG_STATE_STRINGS),
  4339. __entry->refcount,
  4340. __entry->offset,
  4341. __entry->lsn,
  4342. __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS),
  4343. (char *)__entry->caller_ip)
  4344. );
  4345. #define DEFINE_ICLOG_EVENT(name) \
  4346. DEFINE_EVENT(xlog_iclog_class, name, \
  4347. TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), \
  4348. TP_ARGS(iclog, caller_ip))
  4349. DEFINE_ICLOG_EVENT(xlog_iclog_activate);
  4350. DEFINE_ICLOG_EVENT(xlog_iclog_clean);
  4351. DEFINE_ICLOG_EVENT(xlog_iclog_callback);
  4352. DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_start);
  4353. DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_done);
  4354. DEFINE_ICLOG_EVENT(xlog_iclog_force);
  4355. DEFINE_ICLOG_EVENT(xlog_iclog_force_lsn);
  4356. DEFINE_ICLOG_EVENT(xlog_iclog_get_space);
  4357. DEFINE_ICLOG_EVENT(xlog_iclog_release);
  4358. DEFINE_ICLOG_EVENT(xlog_iclog_switch);
  4359. DEFINE_ICLOG_EVENT(xlog_iclog_sync);
  4360. DEFINE_ICLOG_EVENT(xlog_iclog_syncing);
  4361. DEFINE_ICLOG_EVENT(xlog_iclog_sync_done);
  4362. DEFINE_ICLOG_EVENT(xlog_iclog_want_sync);
  4363. DEFINE_ICLOG_EVENT(xlog_iclog_wait_on);
  4364. DEFINE_ICLOG_EVENT(xlog_iclog_write);
  4365. TRACE_DEFINE_ENUM(XFS_DAS_UNINIT);
  4366. TRACE_DEFINE_ENUM(XFS_DAS_SF_ADD);
  4367. TRACE_DEFINE_ENUM(XFS_DAS_SF_REMOVE);
  4368. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_ADD);
  4369. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE);
  4370. TRACE_DEFINE_ENUM(XFS_DAS_NODE_ADD);
  4371. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE);
  4372. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_SET_RMT);
  4373. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_ALLOC_RMT);
  4374. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REPLACE);
  4375. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_OLD);
  4376. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_RMT);
  4377. TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_ATTR);
  4378. TRACE_DEFINE_ENUM(XFS_DAS_NODE_SET_RMT);
  4379. TRACE_DEFINE_ENUM(XFS_DAS_NODE_ALLOC_RMT);
  4380. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REPLACE);
  4381. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_OLD);
  4382. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_RMT);
  4383. TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_ATTR);
  4384. TRACE_DEFINE_ENUM(XFS_DAS_DONE);
  4385. DECLARE_EVENT_CLASS(xfs_das_state_class,
  4386. TP_PROTO(int das, struct xfs_inode *ip),
  4387. TP_ARGS(das, ip),
  4388. TP_STRUCT__entry(
  4389. __field(int, das)
  4390. __field(xfs_ino_t, ino)
  4391. ),
  4392. TP_fast_assign(
  4393. __entry->das = das;
  4394. __entry->ino = ip->i_ino;
  4395. ),
  4396. TP_printk("state change %s ino 0x%llx",
  4397. __print_symbolic(__entry->das, XFS_DAS_STRINGS),
  4398. __entry->ino)
  4399. )
  4400. #define DEFINE_DAS_STATE_EVENT(name) \
  4401. DEFINE_EVENT(xfs_das_state_class, name, \
  4402. TP_PROTO(int das, struct xfs_inode *ip), \
  4403. TP_ARGS(das, ip))
  4404. DEFINE_DAS_STATE_EVENT(xfs_attr_sf_addname_return);
  4405. DEFINE_DAS_STATE_EVENT(xfs_attr_set_iter_return);
  4406. DEFINE_DAS_STATE_EVENT(xfs_attr_leaf_addname_return);
  4407. DEFINE_DAS_STATE_EVENT(xfs_attr_node_addname_return);
  4408. DEFINE_DAS_STATE_EVENT(xfs_attr_remove_iter_return);
  4409. DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_alloc);
  4410. DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_remove_return);
  4411. DEFINE_DAS_STATE_EVENT(xfs_attr_defer_add);
  4412. TRACE_EVENT(xfs_force_shutdown,
  4413. TP_PROTO(struct xfs_mount *mp, int ptag, int flags, const char *fname,
  4414. int line_num),
  4415. TP_ARGS(mp, ptag, flags, fname, line_num),
  4416. TP_STRUCT__entry(
  4417. __field(dev_t, dev)
  4418. __field(int, ptag)
  4419. __field(int, flags)
  4420. __string(fname, fname)
  4421. __field(int, line_num)
  4422. ),
  4423. TP_fast_assign(
  4424. __entry->dev = mp->m_super->s_dev;
  4425. __entry->ptag = ptag;
  4426. __entry->flags = flags;
  4427. __assign_str(fname);
  4428. __entry->line_num = line_num;
  4429. ),
  4430. TP_printk("dev %d:%d tag %s flags %s file %s line_num %d",
  4431. MAJOR(__entry->dev), MINOR(__entry->dev),
  4432. __print_flags(__entry->ptag, "|", XFS_PTAG_STRINGS),
  4433. __print_flags(__entry->flags, "|", XFS_SHUTDOWN_STRINGS),
  4434. __get_str(fname),
  4435. __entry->line_num)
  4436. );
  4437. #ifdef CONFIG_XFS_DRAIN_INTENTS
  4438. DECLARE_EVENT_CLASS(xfs_perag_intents_class,
  4439. TP_PROTO(struct xfs_perag *pag, void *caller_ip),
  4440. TP_ARGS(pag, caller_ip),
  4441. TP_STRUCT__entry(
  4442. __field(dev_t, dev)
  4443. __field(xfs_agnumber_t, agno)
  4444. __field(long, nr_intents)
  4445. __field(void *, caller_ip)
  4446. ),
  4447. TP_fast_assign(
  4448. __entry->dev = pag->pag_mount->m_super->s_dev;
  4449. __entry->agno = pag->pag_agno;
  4450. __entry->nr_intents = atomic_read(&pag->pag_intents_drain.dr_count);
  4451. __entry->caller_ip = caller_ip;
  4452. ),
  4453. TP_printk("dev %d:%d agno 0x%x intents %ld caller %pS",
  4454. MAJOR(__entry->dev), MINOR(__entry->dev),
  4455. __entry->agno,
  4456. __entry->nr_intents,
  4457. __entry->caller_ip)
  4458. );
  4459. #define DEFINE_PERAG_INTENTS_EVENT(name) \
  4460. DEFINE_EVENT(xfs_perag_intents_class, name, \
  4461. TP_PROTO(struct xfs_perag *pag, void *caller_ip), \
  4462. TP_ARGS(pag, caller_ip))
  4463. DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_hold);
  4464. DEFINE_PERAG_INTENTS_EVENT(xfs_perag_intent_rele);
  4465. DEFINE_PERAG_INTENTS_EVENT(xfs_perag_wait_intents);
  4466. #endif /* CONFIG_XFS_DRAIN_INTENTS */
  4467. #ifdef CONFIG_XFS_MEMORY_BUFS
  4468. TRACE_EVENT(xmbuf_create,
  4469. TP_PROTO(struct xfs_buftarg *btp),
  4470. TP_ARGS(btp),
  4471. TP_STRUCT__entry(
  4472. __field(dev_t, dev)
  4473. __field(unsigned long, ino)
  4474. __array(char, pathname, MAXNAMELEN)
  4475. ),
  4476. TP_fast_assign(
  4477. char *path;
  4478. struct file *file = btp->bt_file;
  4479. __entry->dev = btp->bt_mount->m_super->s_dev;
  4480. __entry->ino = file_inode(file)->i_ino;
  4481. path = file_path(file, __entry->pathname, MAXNAMELEN);
  4482. if (IS_ERR(path))
  4483. strncpy(__entry->pathname, "(unknown)",
  4484. sizeof(__entry->pathname));
  4485. ),
  4486. TP_printk("dev %d:%d xmino 0x%lx path '%s'",
  4487. MAJOR(__entry->dev), MINOR(__entry->dev),
  4488. __entry->ino,
  4489. __entry->pathname)
  4490. );
  4491. TRACE_EVENT(xmbuf_free,
  4492. TP_PROTO(struct xfs_buftarg *btp),
  4493. TP_ARGS(btp),
  4494. TP_STRUCT__entry(
  4495. __field(dev_t, dev)
  4496. __field(unsigned long, ino)
  4497. __field(unsigned long long, bytes)
  4498. __field(loff_t, size)
  4499. ),
  4500. TP_fast_assign(
  4501. struct file *file = btp->bt_file;
  4502. struct inode *inode = file_inode(file);
  4503. __entry->dev = btp->bt_mount->m_super->s_dev;
  4504. __entry->size = i_size_read(inode);
  4505. __entry->bytes = (inode->i_blocks << SECTOR_SHIFT) + inode->i_bytes;
  4506. __entry->ino = inode->i_ino;
  4507. ),
  4508. TP_printk("dev %d:%d xmino 0x%lx mem_bytes 0x%llx isize 0x%llx",
  4509. MAJOR(__entry->dev), MINOR(__entry->dev),
  4510. __entry->ino,
  4511. __entry->bytes,
  4512. __entry->size)
  4513. );
  4514. #endif /* CONFIG_XFS_MEMORY_BUFS */
  4515. #ifdef CONFIG_XFS_BTREE_IN_MEM
  4516. TRACE_EVENT(xfbtree_init,
  4517. TP_PROTO(struct xfs_mount *mp, struct xfbtree *xfbt,
  4518. const struct xfs_btree_ops *ops),
  4519. TP_ARGS(mp, xfbt, ops),
  4520. TP_STRUCT__entry(
  4521. __field(const void *, btree_ops)
  4522. __field(unsigned long, xfino)
  4523. __field(unsigned int, leaf_mxr)
  4524. __field(unsigned int, leaf_mnr)
  4525. __field(unsigned int, node_mxr)
  4526. __field(unsigned int, node_mnr)
  4527. __field(unsigned long long, owner)
  4528. ),
  4529. TP_fast_assign(
  4530. __entry->btree_ops = ops;
  4531. __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino;
  4532. __entry->leaf_mxr = xfbt->maxrecs[0];
  4533. __entry->node_mxr = xfbt->maxrecs[1];
  4534. __entry->leaf_mnr = xfbt->minrecs[0];
  4535. __entry->node_mnr = xfbt->minrecs[1];
  4536. __entry->owner = xfbt->owner;
  4537. ),
  4538. TP_printk("xfino 0x%lx btree_ops %pS owner 0x%llx leaf_mxr %u leaf_mnr %u node_mxr %u node_mnr %u",
  4539. __entry->xfino,
  4540. __entry->btree_ops,
  4541. __entry->owner,
  4542. __entry->leaf_mxr,
  4543. __entry->leaf_mnr,
  4544. __entry->node_mxr,
  4545. __entry->node_mnr)
  4546. );
  4547. DECLARE_EVENT_CLASS(xfbtree_buf_class,
  4548. TP_PROTO(struct xfbtree *xfbt, struct xfs_buf *bp),
  4549. TP_ARGS(xfbt, bp),
  4550. TP_STRUCT__entry(
  4551. __field(unsigned long, xfino)
  4552. __field(xfs_daddr_t, bno)
  4553. __field(int, nblks)
  4554. __field(int, hold)
  4555. __field(int, pincount)
  4556. __field(unsigned int, lockval)
  4557. __field(unsigned int, flags)
  4558. ),
  4559. TP_fast_assign(
  4560. __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino;
  4561. __entry->bno = xfs_buf_daddr(bp);
  4562. __entry->nblks = bp->b_length;
  4563. __entry->hold = atomic_read(&bp->b_hold);
  4564. __entry->pincount = atomic_read(&bp->b_pin_count);
  4565. __entry->lockval = bp->b_sema.count;
  4566. __entry->flags = bp->b_flags;
  4567. ),
  4568. TP_printk("xfino 0x%lx daddr 0x%llx bbcount 0x%x hold %d pincount %d lock %d flags %s",
  4569. __entry->xfino,
  4570. (unsigned long long)__entry->bno,
  4571. __entry->nblks,
  4572. __entry->hold,
  4573. __entry->pincount,
  4574. __entry->lockval,
  4575. __print_flags(__entry->flags, "|", XFS_BUF_FLAGS))
  4576. )
  4577. #define DEFINE_XFBTREE_BUF_EVENT(name) \
  4578. DEFINE_EVENT(xfbtree_buf_class, name, \
  4579. TP_PROTO(struct xfbtree *xfbt, struct xfs_buf *bp), \
  4580. TP_ARGS(xfbt, bp))
  4581. DEFINE_XFBTREE_BUF_EVENT(xfbtree_create_root_buf);
  4582. DEFINE_XFBTREE_BUF_EVENT(xfbtree_trans_commit_buf);
  4583. DEFINE_XFBTREE_BUF_EVENT(xfbtree_trans_cancel_buf);
  4584. DECLARE_EVENT_CLASS(xfbtree_freesp_class,
  4585. TP_PROTO(struct xfbtree *xfbt, struct xfs_btree_cur *cur,
  4586. xfs_fileoff_t fileoff),
  4587. TP_ARGS(xfbt, cur, fileoff),
  4588. TP_STRUCT__entry(
  4589. __field(unsigned long, xfino)
  4590. __string(btname, cur->bc_ops->name)
  4591. __field(int, nlevels)
  4592. __field(xfs_fileoff_t, fileoff)
  4593. ),
  4594. TP_fast_assign(
  4595. __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino;
  4596. __assign_str(btname);
  4597. __entry->nlevels = cur->bc_nlevels;
  4598. __entry->fileoff = fileoff;
  4599. ),
  4600. TP_printk("xfino 0x%lx %sbt nlevels %d fileoff 0x%llx",
  4601. __entry->xfino,
  4602. __get_str(btname),
  4603. __entry->nlevels,
  4604. (unsigned long long)__entry->fileoff)
  4605. )
  4606. #define DEFINE_XFBTREE_FREESP_EVENT(name) \
  4607. DEFINE_EVENT(xfbtree_freesp_class, name, \
  4608. TP_PROTO(struct xfbtree *xfbt, struct xfs_btree_cur *cur, \
  4609. xfs_fileoff_t fileoff), \
  4610. TP_ARGS(xfbt, cur, fileoff))
  4611. DEFINE_XFBTREE_FREESP_EVENT(xfbtree_alloc_block);
  4612. DEFINE_XFBTREE_FREESP_EVENT(xfbtree_free_block);
  4613. #endif /* CONFIG_XFS_BTREE_IN_MEM */
  4614. /* exchmaps tracepoints */
  4615. #define XFS_EXCHMAPS_STRINGS \
  4616. { XFS_EXCHMAPS_ATTR_FORK, "ATTRFORK" }, \
  4617. { XFS_EXCHMAPS_SET_SIZES, "SETSIZES" }, \
  4618. { XFS_EXCHMAPS_INO1_WRITTEN, "INO1_WRITTEN" }, \
  4619. { XFS_EXCHMAPS_CLEAR_INO1_REFLINK, "CLEAR_INO1_REFLINK" }, \
  4620. { XFS_EXCHMAPS_CLEAR_INO2_REFLINK, "CLEAR_INO2_REFLINK" }, \
  4621. { __XFS_EXCHMAPS_INO2_SHORTFORM, "INO2_SF" }
  4622. DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping1_skip);
  4623. DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping1);
  4624. DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping2);
  4625. DEFINE_ITRUNC_EVENT(xfs_exchmaps_update_inode_size);
  4626. #define XFS_EXCHRANGE_INODES \
  4627. { 1, "file1" }, \
  4628. { 2, "file2" }
  4629. DECLARE_EVENT_CLASS(xfs_exchrange_inode_class,
  4630. TP_PROTO(struct xfs_inode *ip, int whichfile),
  4631. TP_ARGS(ip, whichfile),
  4632. TP_STRUCT__entry(
  4633. __field(dev_t, dev)
  4634. __field(int, whichfile)
  4635. __field(xfs_ino_t, ino)
  4636. __field(int, format)
  4637. __field(xfs_extnum_t, nex)
  4638. __field(int, broot_size)
  4639. __field(int, fork_off)
  4640. ),
  4641. TP_fast_assign(
  4642. __entry->dev = VFS_I(ip)->i_sb->s_dev;
  4643. __entry->whichfile = whichfile;
  4644. __entry->ino = ip->i_ino;
  4645. __entry->format = ip->i_df.if_format;
  4646. __entry->nex = ip->i_df.if_nextents;
  4647. __entry->fork_off = xfs_inode_fork_boff(ip);
  4648. ),
  4649. TP_printk("dev %d:%d ino 0x%llx whichfile %s format %s num_extents %llu forkoff 0x%x",
  4650. MAJOR(__entry->dev), MINOR(__entry->dev),
  4651. __entry->ino,
  4652. __print_symbolic(__entry->whichfile, XFS_EXCHRANGE_INODES),
  4653. __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
  4654. __entry->nex,
  4655. __entry->fork_off)
  4656. )
  4657. #define DEFINE_EXCHRANGE_INODE_EVENT(name) \
  4658. DEFINE_EVENT(xfs_exchrange_inode_class, name, \
  4659. TP_PROTO(struct xfs_inode *ip, int whichfile), \
  4660. TP_ARGS(ip, whichfile))
  4661. DEFINE_EXCHRANGE_INODE_EVENT(xfs_exchrange_before);
  4662. DEFINE_EXCHRANGE_INODE_EVENT(xfs_exchrange_after);
  4663. DEFINE_INODE_ERROR_EVENT(xfs_exchrange_error);
  4664. #define XFS_EXCHANGE_RANGE_FLAGS_STRS \
  4665. { XFS_EXCHANGE_RANGE_TO_EOF, "TO_EOF" }, \
  4666. { XFS_EXCHANGE_RANGE_DSYNC , "DSYNC" }, \
  4667. { XFS_EXCHANGE_RANGE_DRY_RUN, "DRY_RUN" }, \
  4668. { XFS_EXCHANGE_RANGE_FILE1_WRITTEN, "F1_WRITTEN" }, \
  4669. { __XFS_EXCHANGE_RANGE_UPD_CMTIME1, "CMTIME1" }, \
  4670. { __XFS_EXCHANGE_RANGE_UPD_CMTIME2, "CMTIME2" }, \
  4671. { __XFS_EXCHANGE_RANGE_CHECK_FRESH2, "FRESH2" }
  4672. /* file exchange-range tracepoint class */
  4673. DECLARE_EVENT_CLASS(xfs_exchrange_class,
  4674. TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip1,
  4675. struct xfs_inode *ip2),
  4676. TP_ARGS(fxr, ip1, ip2),
  4677. TP_STRUCT__entry(
  4678. __field(dev_t, dev)
  4679. __field(xfs_ino_t, ip1_ino)
  4680. __field(loff_t, ip1_isize)
  4681. __field(loff_t, ip1_disize)
  4682. __field(xfs_ino_t, ip2_ino)
  4683. __field(loff_t, ip2_isize)
  4684. __field(loff_t, ip2_disize)
  4685. __field(loff_t, file1_offset)
  4686. __field(loff_t, file2_offset)
  4687. __field(unsigned long long, length)
  4688. __field(unsigned long long, flags)
  4689. ),
  4690. TP_fast_assign(
  4691. __entry->dev = VFS_I(ip1)->i_sb->s_dev;
  4692. __entry->ip1_ino = ip1->i_ino;
  4693. __entry->ip1_isize = VFS_I(ip1)->i_size;
  4694. __entry->ip1_disize = ip1->i_disk_size;
  4695. __entry->ip2_ino = ip2->i_ino;
  4696. __entry->ip2_isize = VFS_I(ip2)->i_size;
  4697. __entry->ip2_disize = ip2->i_disk_size;
  4698. __entry->file1_offset = fxr->file1_offset;
  4699. __entry->file2_offset = fxr->file2_offset;
  4700. __entry->length = fxr->length;
  4701. __entry->flags = fxr->flags;
  4702. ),
  4703. TP_printk("dev %d:%d flags %s bytecount 0x%llx "
  4704. "ino1 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx -> "
  4705. "ino2 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx",
  4706. MAJOR(__entry->dev), MINOR(__entry->dev),
  4707. __print_flags_u64(__entry->flags, "|", XFS_EXCHANGE_RANGE_FLAGS_STRS),
  4708. __entry->length,
  4709. __entry->ip1_ino,
  4710. __entry->ip1_isize,
  4711. __entry->ip1_disize,
  4712. __entry->file1_offset,
  4713. __entry->ip2_ino,
  4714. __entry->ip2_isize,
  4715. __entry->ip2_disize,
  4716. __entry->file2_offset)
  4717. )
  4718. #define DEFINE_EXCHRANGE_EVENT(name) \
  4719. DEFINE_EVENT(xfs_exchrange_class, name, \
  4720. TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip1, \
  4721. struct xfs_inode *ip2), \
  4722. TP_ARGS(fxr, ip1, ip2))
  4723. DEFINE_EXCHRANGE_EVENT(xfs_exchrange_prep);
  4724. DEFINE_EXCHRANGE_EVENT(xfs_exchrange_flush);
  4725. DEFINE_EXCHRANGE_EVENT(xfs_exchrange_mappings);
  4726. TRACE_EVENT(xfs_exchrange_freshness,
  4727. TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip2),
  4728. TP_ARGS(fxr, ip2),
  4729. TP_STRUCT__entry(
  4730. __field(dev_t, dev)
  4731. __field(xfs_ino_t, ip2_ino)
  4732. __field(long long, ip2_mtime)
  4733. __field(long long, ip2_ctime)
  4734. __field(int, ip2_mtime_nsec)
  4735. __field(int, ip2_ctime_nsec)
  4736. __field(xfs_ino_t, file2_ino)
  4737. __field(long long, file2_mtime)
  4738. __field(long long, file2_ctime)
  4739. __field(int, file2_mtime_nsec)
  4740. __field(int, file2_ctime_nsec)
  4741. ),
  4742. TP_fast_assign(
  4743. struct timespec64 ts64;
  4744. struct inode *inode2 = VFS_I(ip2);
  4745. __entry->dev = inode2->i_sb->s_dev;
  4746. __entry->ip2_ino = ip2->i_ino;
  4747. ts64 = inode_get_ctime(inode2);
  4748. __entry->ip2_ctime = ts64.tv_sec;
  4749. __entry->ip2_ctime_nsec = ts64.tv_nsec;
  4750. ts64 = inode_get_mtime(inode2);
  4751. __entry->ip2_mtime = ts64.tv_sec;
  4752. __entry->ip2_mtime_nsec = ts64.tv_nsec;
  4753. __entry->file2_ino = fxr->file2_ino;
  4754. __entry->file2_mtime = fxr->file2_mtime.tv_sec;
  4755. __entry->file2_ctime = fxr->file2_ctime.tv_sec;
  4756. __entry->file2_mtime_nsec = fxr->file2_mtime.tv_nsec;
  4757. __entry->file2_ctime_nsec = fxr->file2_ctime.tv_nsec;
  4758. ),
  4759. TP_printk("dev %d:%d "
  4760. "ino 0x%llx mtime %lld:%d ctime %lld:%d -> "
  4761. "file 0x%llx mtime %lld:%d ctime %lld:%d",
  4762. MAJOR(__entry->dev), MINOR(__entry->dev),
  4763. __entry->ip2_ino,
  4764. __entry->ip2_mtime,
  4765. __entry->ip2_mtime_nsec,
  4766. __entry->ip2_ctime,
  4767. __entry->ip2_ctime_nsec,
  4768. __entry->file2_ino,
  4769. __entry->file2_mtime,
  4770. __entry->file2_mtime_nsec,
  4771. __entry->file2_ctime,
  4772. __entry->file2_ctime_nsec)
  4773. );
  4774. TRACE_EVENT(xfs_exchmaps_overhead,
  4775. TP_PROTO(struct xfs_mount *mp, unsigned long long bmbt_blocks,
  4776. unsigned long long rmapbt_blocks),
  4777. TP_ARGS(mp, bmbt_blocks, rmapbt_blocks),
  4778. TP_STRUCT__entry(
  4779. __field(dev_t, dev)
  4780. __field(unsigned long long, bmbt_blocks)
  4781. __field(unsigned long long, rmapbt_blocks)
  4782. ),
  4783. TP_fast_assign(
  4784. __entry->dev = mp->m_super->s_dev;
  4785. __entry->bmbt_blocks = bmbt_blocks;
  4786. __entry->rmapbt_blocks = rmapbt_blocks;
  4787. ),
  4788. TP_printk("dev %d:%d bmbt_blocks 0x%llx rmapbt_blocks 0x%llx",
  4789. MAJOR(__entry->dev), MINOR(__entry->dev),
  4790. __entry->bmbt_blocks,
  4791. __entry->rmapbt_blocks)
  4792. );
  4793. DECLARE_EVENT_CLASS(xfs_exchmaps_estimate_class,
  4794. TP_PROTO(const struct xfs_exchmaps_req *req),
  4795. TP_ARGS(req),
  4796. TP_STRUCT__entry(
  4797. __field(dev_t, dev)
  4798. __field(xfs_ino_t, ino1)
  4799. __field(xfs_ino_t, ino2)
  4800. __field(xfs_fileoff_t, startoff1)
  4801. __field(xfs_fileoff_t, startoff2)
  4802. __field(xfs_filblks_t, blockcount)
  4803. __field(uint64_t, flags)
  4804. __field(xfs_filblks_t, ip1_bcount)
  4805. __field(xfs_filblks_t, ip2_bcount)
  4806. __field(xfs_filblks_t, ip1_rtbcount)
  4807. __field(xfs_filblks_t, ip2_rtbcount)
  4808. __field(unsigned long long, resblks)
  4809. __field(unsigned long long, nr_exchanges)
  4810. ),
  4811. TP_fast_assign(
  4812. __entry->dev = req->ip1->i_mount->m_super->s_dev;
  4813. __entry->ino1 = req->ip1->i_ino;
  4814. __entry->ino2 = req->ip2->i_ino;
  4815. __entry->startoff1 = req->startoff1;
  4816. __entry->startoff2 = req->startoff2;
  4817. __entry->blockcount = req->blockcount;
  4818. __entry->flags = req->flags;
  4819. __entry->ip1_bcount = req->ip1_bcount;
  4820. __entry->ip2_bcount = req->ip2_bcount;
  4821. __entry->ip1_rtbcount = req->ip1_rtbcount;
  4822. __entry->ip2_rtbcount = req->ip2_rtbcount;
  4823. __entry->resblks = req->resblks;
  4824. __entry->nr_exchanges = req->nr_exchanges;
  4825. ),
  4826. TP_printk("dev %d:%d ino1 0x%llx fileoff1 0x%llx ino2 0x%llx fileoff2 0x%llx fsbcount 0x%llx flags (%s) bcount1 0x%llx rtbcount1 0x%llx bcount2 0x%llx rtbcount2 0x%llx resblks 0x%llx nr_exchanges %llu",
  4827. MAJOR(__entry->dev), MINOR(__entry->dev),
  4828. __entry->ino1, __entry->startoff1,
  4829. __entry->ino2, __entry->startoff2,
  4830. __entry->blockcount,
  4831. __print_flags_u64(__entry->flags, "|", XFS_EXCHMAPS_STRINGS),
  4832. __entry->ip1_bcount,
  4833. __entry->ip1_rtbcount,
  4834. __entry->ip2_bcount,
  4835. __entry->ip2_rtbcount,
  4836. __entry->resblks,
  4837. __entry->nr_exchanges)
  4838. );
  4839. #define DEFINE_EXCHMAPS_ESTIMATE_EVENT(name) \
  4840. DEFINE_EVENT(xfs_exchmaps_estimate_class, name, \
  4841. TP_PROTO(const struct xfs_exchmaps_req *req), \
  4842. TP_ARGS(req))
  4843. DEFINE_EXCHMAPS_ESTIMATE_EVENT(xfs_exchmaps_initial_estimate);
  4844. DEFINE_EXCHMAPS_ESTIMATE_EVENT(xfs_exchmaps_final_estimate);
  4845. DECLARE_EVENT_CLASS(xfs_exchmaps_intent_class,
  4846. TP_PROTO(struct xfs_mount *mp, const struct xfs_exchmaps_intent *xmi),
  4847. TP_ARGS(mp, xmi),
  4848. TP_STRUCT__entry(
  4849. __field(dev_t, dev)
  4850. __field(xfs_ino_t, ino1)
  4851. __field(xfs_ino_t, ino2)
  4852. __field(uint64_t, flags)
  4853. __field(xfs_fileoff_t, startoff1)
  4854. __field(xfs_fileoff_t, startoff2)
  4855. __field(xfs_filblks_t, blockcount)
  4856. __field(xfs_fsize_t, isize1)
  4857. __field(xfs_fsize_t, isize2)
  4858. __field(xfs_fsize_t, new_isize1)
  4859. __field(xfs_fsize_t, new_isize2)
  4860. ),
  4861. TP_fast_assign(
  4862. __entry->dev = mp->m_super->s_dev;
  4863. __entry->ino1 = xmi->xmi_ip1->i_ino;
  4864. __entry->ino2 = xmi->xmi_ip2->i_ino;
  4865. __entry->flags = xmi->xmi_flags;
  4866. __entry->startoff1 = xmi->xmi_startoff1;
  4867. __entry->startoff2 = xmi->xmi_startoff2;
  4868. __entry->blockcount = xmi->xmi_blockcount;
  4869. __entry->isize1 = xmi->xmi_ip1->i_disk_size;
  4870. __entry->isize2 = xmi->xmi_ip2->i_disk_size;
  4871. __entry->new_isize1 = xmi->xmi_isize1;
  4872. __entry->new_isize2 = xmi->xmi_isize2;
  4873. ),
  4874. TP_printk("dev %d:%d ino1 0x%llx fileoff1 0x%llx ino2 0x%llx fileoff2 0x%llx fsbcount 0x%llx flags (%s) isize1 0x%llx newisize1 0x%llx isize2 0x%llx newisize2 0x%llx",
  4875. MAJOR(__entry->dev), MINOR(__entry->dev),
  4876. __entry->ino1, __entry->startoff1,
  4877. __entry->ino2, __entry->startoff2,
  4878. __entry->blockcount,
  4879. __print_flags_u64(__entry->flags, "|", XFS_EXCHMAPS_STRINGS),
  4880. __entry->isize1, __entry->new_isize1,
  4881. __entry->isize2, __entry->new_isize2)
  4882. );
  4883. #define DEFINE_EXCHMAPS_INTENT_EVENT(name) \
  4884. DEFINE_EVENT(xfs_exchmaps_intent_class, name, \
  4885. TP_PROTO(struct xfs_mount *mp, const struct xfs_exchmaps_intent *xmi), \
  4886. TP_ARGS(mp, xmi))
  4887. DEFINE_EXCHMAPS_INTENT_EVENT(xfs_exchmaps_defer);
  4888. DEFINE_EXCHMAPS_INTENT_EVENT(xfs_exchmaps_recover);
  4889. TRACE_EVENT(xfs_exchmaps_delta_nextents_step,
  4890. TP_PROTO(struct xfs_mount *mp,
  4891. const struct xfs_bmbt_irec *left,
  4892. const struct xfs_bmbt_irec *curr,
  4893. const struct xfs_bmbt_irec *new,
  4894. const struct xfs_bmbt_irec *right,
  4895. int delta, unsigned int state),
  4896. TP_ARGS(mp, left, curr, new, right, delta, state),
  4897. TP_STRUCT__entry(
  4898. __field(dev_t, dev)
  4899. __field(xfs_fileoff_t, loff)
  4900. __field(xfs_fsblock_t, lstart)
  4901. __field(xfs_filblks_t, lcount)
  4902. __field(xfs_fileoff_t, coff)
  4903. __field(xfs_fsblock_t, cstart)
  4904. __field(xfs_filblks_t, ccount)
  4905. __field(xfs_fileoff_t, noff)
  4906. __field(xfs_fsblock_t, nstart)
  4907. __field(xfs_filblks_t, ncount)
  4908. __field(xfs_fileoff_t, roff)
  4909. __field(xfs_fsblock_t, rstart)
  4910. __field(xfs_filblks_t, rcount)
  4911. __field(int, delta)
  4912. __field(unsigned int, state)
  4913. ),
  4914. TP_fast_assign(
  4915. __entry->dev = mp->m_super->s_dev;
  4916. __entry->loff = left->br_startoff;
  4917. __entry->lstart = left->br_startblock;
  4918. __entry->lcount = left->br_blockcount;
  4919. __entry->coff = curr->br_startoff;
  4920. __entry->cstart = curr->br_startblock;
  4921. __entry->ccount = curr->br_blockcount;
  4922. __entry->noff = new->br_startoff;
  4923. __entry->nstart = new->br_startblock;
  4924. __entry->ncount = new->br_blockcount;
  4925. __entry->roff = right->br_startoff;
  4926. __entry->rstart = right->br_startblock;
  4927. __entry->rcount = right->br_blockcount;
  4928. __entry->delta = delta;
  4929. __entry->state = state;
  4930. ),
  4931. TP_printk("dev %d:%d left 0x%llx:0x%llx:0x%llx; curr 0x%llx:0x%llx:0x%llx <- new 0x%llx:0x%llx:0x%llx; right 0x%llx:0x%llx:0x%llx delta %d state 0x%x",
  4932. MAJOR(__entry->dev), MINOR(__entry->dev),
  4933. __entry->loff, __entry->lstart, __entry->lcount,
  4934. __entry->coff, __entry->cstart, __entry->ccount,
  4935. __entry->noff, __entry->nstart, __entry->ncount,
  4936. __entry->roff, __entry->rstart, __entry->rcount,
  4937. __entry->delta, __entry->state)
  4938. );
  4939. TRACE_EVENT(xfs_exchmaps_delta_nextents,
  4940. TP_PROTO(const struct xfs_exchmaps_req *req, int64_t d_nexts1,
  4941. int64_t d_nexts2),
  4942. TP_ARGS(req, d_nexts1, d_nexts2),
  4943. TP_STRUCT__entry(
  4944. __field(dev_t, dev)
  4945. __field(xfs_ino_t, ino1)
  4946. __field(xfs_ino_t, ino2)
  4947. __field(xfs_extnum_t, nexts1)
  4948. __field(xfs_extnum_t, nexts2)
  4949. __field(int64_t, d_nexts1)
  4950. __field(int64_t, d_nexts2)
  4951. ),
  4952. TP_fast_assign(
  4953. int whichfork = xfs_exchmaps_reqfork(req);
  4954. __entry->dev = req->ip1->i_mount->m_super->s_dev;
  4955. __entry->ino1 = req->ip1->i_ino;
  4956. __entry->ino2 = req->ip2->i_ino;
  4957. __entry->nexts1 = xfs_ifork_ptr(req->ip1, whichfork)->if_nextents;
  4958. __entry->nexts2 = xfs_ifork_ptr(req->ip2, whichfork)->if_nextents;
  4959. __entry->d_nexts1 = d_nexts1;
  4960. __entry->d_nexts2 = d_nexts2;
  4961. ),
  4962. TP_printk("dev %d:%d ino1 0x%llx nexts %llu ino2 0x%llx nexts %llu delta1 %lld delta2 %lld",
  4963. MAJOR(__entry->dev), MINOR(__entry->dev),
  4964. __entry->ino1, __entry->nexts1,
  4965. __entry->ino2, __entry->nexts2,
  4966. __entry->d_nexts1, __entry->d_nexts2)
  4967. );
  4968. DECLARE_EVENT_CLASS(xfs_getparents_rec_class,
  4969. TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi,
  4970. const struct xfs_attr_list_context *context,
  4971. const struct xfs_getparents_rec *pptr),
  4972. TP_ARGS(ip, ppi, context, pptr),
  4973. TP_STRUCT__entry(
  4974. __field(dev_t, dev)
  4975. __field(xfs_ino_t, ino)
  4976. __field(unsigned int, firstu)
  4977. __field(unsigned short, reclen)
  4978. __field(unsigned int, bufsize)
  4979. __field(xfs_ino_t, parent_ino)
  4980. __field(unsigned int, parent_gen)
  4981. __string(name, pptr->gpr_name)
  4982. ),
  4983. TP_fast_assign(
  4984. __entry->dev = ip->i_mount->m_super->s_dev;
  4985. __entry->ino = ip->i_ino;
  4986. __entry->firstu = context->firstu;
  4987. __entry->reclen = pptr->gpr_reclen;
  4988. __entry->bufsize = ppi->gp_bufsize;
  4989. __entry->parent_ino = pptr->gpr_parent.ha_fid.fid_ino;
  4990. __entry->parent_gen = pptr->gpr_parent.ha_fid.fid_gen;
  4991. __assign_str(name);
  4992. ),
  4993. TP_printk("dev %d:%d ino 0x%llx firstu %u reclen %u bufsize %u parent_ino 0x%llx parent_gen 0x%x name '%s'",
  4994. MAJOR(__entry->dev), MINOR(__entry->dev),
  4995. __entry->ino,
  4996. __entry->firstu,
  4997. __entry->reclen,
  4998. __entry->bufsize,
  4999. __entry->parent_ino,
  5000. __entry->parent_gen,
  5001. __get_str(name))
  5002. )
  5003. #define DEFINE_XFS_GETPARENTS_REC_EVENT(name) \
  5004. DEFINE_EVENT(xfs_getparents_rec_class, name, \
  5005. TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi, \
  5006. const struct xfs_attr_list_context *context, \
  5007. const struct xfs_getparents_rec *pptr), \
  5008. TP_ARGS(ip, ppi, context, pptr))
  5009. DEFINE_XFS_GETPARENTS_REC_EVENT(xfs_getparents_put_listent);
  5010. DEFINE_XFS_GETPARENTS_REC_EVENT(xfs_getparents_expand_lastrec);
  5011. DECLARE_EVENT_CLASS(xfs_getparents_class,
  5012. TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi,
  5013. const struct xfs_attrlist_cursor_kern *cur),
  5014. TP_ARGS(ip, ppi, cur),
  5015. TP_STRUCT__entry(
  5016. __field(dev_t, dev)
  5017. __field(xfs_ino_t, ino)
  5018. __field(unsigned short, iflags)
  5019. __field(unsigned short, oflags)
  5020. __field(unsigned int, bufsize)
  5021. __field(unsigned int, hashval)
  5022. __field(unsigned int, blkno)
  5023. __field(unsigned int, offset)
  5024. __field(int, initted)
  5025. ),
  5026. TP_fast_assign(
  5027. __entry->dev = ip->i_mount->m_super->s_dev;
  5028. __entry->ino = ip->i_ino;
  5029. __entry->iflags = ppi->gp_iflags;
  5030. __entry->oflags = ppi->gp_oflags;
  5031. __entry->bufsize = ppi->gp_bufsize;
  5032. __entry->hashval = cur->hashval;
  5033. __entry->blkno = cur->blkno;
  5034. __entry->offset = cur->offset;
  5035. __entry->initted = cur->initted;
  5036. ),
  5037. TP_printk("dev %d:%d ino 0x%llx iflags 0x%x oflags 0x%x bufsize %u cur_init? %d hashval 0x%x blkno %u offset %u",
  5038. MAJOR(__entry->dev), MINOR(__entry->dev),
  5039. __entry->ino,
  5040. __entry->iflags,
  5041. __entry->oflags,
  5042. __entry->bufsize,
  5043. __entry->initted,
  5044. __entry->hashval,
  5045. __entry->blkno,
  5046. __entry->offset)
  5047. )
  5048. #define DEFINE_XFS_GETPARENTS_EVENT(name) \
  5049. DEFINE_EVENT(xfs_getparents_class, name, \
  5050. TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi, \
  5051. const struct xfs_attrlist_cursor_kern *cur), \
  5052. TP_ARGS(ip, ppi, cur))
  5053. DEFINE_XFS_GETPARENTS_EVENT(xfs_getparents_begin);
  5054. DEFINE_XFS_GETPARENTS_EVENT(xfs_getparents_end);
  5055. #endif /* _TRACE_XFS_H */
  5056. #undef TRACE_INCLUDE_PATH
  5057. #define TRACE_INCLUDE_PATH .
  5058. #define TRACE_INCLUDE_FILE xfs_trace
  5059. #include <trace/define_trace.h>