xhci.c 162 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xHCI host controller driver
  4. *
  5. * Copyright (C) 2008 Intel Corp.
  6. *
  7. * Author: Sarah Sharp
  8. * Some code borrowed from the Linux EHCI driver.
  9. */
  10. #include <linux/jiffies.h>
  11. #include <linux/pci.h>
  12. #include <linux/iommu.h>
  13. #include <linux/iopoll.h>
  14. #include <linux/irq.h>
  15. #include <linux/log2.h>
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/slab.h>
  19. #include <linux/dmi.h>
  20. #include <linux/dma-mapping.h>
  21. #include "xhci.h"
  22. #include "xhci-trace.h"
  23. #include "xhci-debugfs.h"
  24. #include "xhci-dbgcap.h"
  25. #define DRIVER_AUTHOR "Sarah Sharp"
  26. #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
  27. #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
  28. /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
  29. static int link_quirk;
  30. module_param(link_quirk, int, S_IRUGO | S_IWUSR);
  31. MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
  32. static unsigned long long quirks;
  33. module_param(quirks, ullong, S_IRUGO);
  34. MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
  35. static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
  36. {
  37. struct xhci_segment *seg = ring->first_seg;
  38. if (!td || !td->start_seg)
  39. return false;
  40. do {
  41. if (seg == td->start_seg)
  42. return true;
  43. seg = seg->next;
  44. } while (seg && seg != ring->first_seg);
  45. return false;
  46. }
  47. /*
  48. * xhci_handshake - spin reading hc until handshake completes or fails
  49. * @ptr: address of hc register to be read
  50. * @mask: bits to look at in result of read
  51. * @done: value of those bits when handshake succeeds
  52. * @usec: timeout in microseconds
  53. *
  54. * Returns negative errno, or zero on success
  55. *
  56. * Success happens when the "mask" bits have the specified value (hardware
  57. * handshake done). There are two failure modes: "usec" have passed (major
  58. * hardware flakeout), or the register reads as all-ones (hardware removed).
  59. */
  60. int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
  61. {
  62. u32 result;
  63. int ret;
  64. ret = readl_poll_timeout_atomic(ptr, result,
  65. (result & mask) == done ||
  66. result == U32_MAX,
  67. 1, timeout_us);
  68. if (result == U32_MAX) /* card removed */
  69. return -ENODEV;
  70. return ret;
  71. }
  72. /*
  73. * xhci_handshake_check_state - same as xhci_handshake but takes an additional
  74. * exit_state parameter, and bails out with an error immediately when xhc_state
  75. * has exit_state flag set.
  76. */
  77. int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
  78. u32 mask, u32 done, int usec, unsigned int exit_state)
  79. {
  80. u32 result;
  81. int ret;
  82. ret = readl_poll_timeout_atomic(ptr, result,
  83. (result & mask) == done ||
  84. result == U32_MAX ||
  85. xhci->xhc_state & exit_state,
  86. 1, usec);
  87. if (result == U32_MAX || xhci->xhc_state & exit_state)
  88. return -ENODEV;
  89. return ret;
  90. }
  91. /*
  92. * Disable interrupts and begin the xHCI halting process.
  93. */
  94. void xhci_quiesce(struct xhci_hcd *xhci)
  95. {
  96. u32 halted;
  97. u32 cmd;
  98. u32 mask;
  99. mask = ~(XHCI_IRQS);
  100. halted = readl(&xhci->op_regs->status) & STS_HALT;
  101. if (!halted)
  102. mask &= ~CMD_RUN;
  103. cmd = readl(&xhci->op_regs->command);
  104. cmd &= mask;
  105. writel(cmd, &xhci->op_regs->command);
  106. }
  107. /*
  108. * Force HC into halt state.
  109. *
  110. * Disable any IRQs and clear the run/stop bit.
  111. * HC will complete any current and actively pipelined transactions, and
  112. * should halt within 16 ms of the run/stop bit being cleared.
  113. * Read HC Halted bit in the status register to see when the HC is finished.
  114. */
  115. int xhci_halt(struct xhci_hcd *xhci)
  116. {
  117. int ret;
  118. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
  119. xhci_quiesce(xhci);
  120. ret = xhci_handshake(&xhci->op_regs->status,
  121. STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
  122. if (ret) {
  123. xhci_warn(xhci, "Host halt failed, %d\n", ret);
  124. return ret;
  125. }
  126. xhci->xhc_state |= XHCI_STATE_HALTED;
  127. xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
  128. return ret;
  129. }
  130. /*
  131. * Set the run bit and wait for the host to be running.
  132. */
  133. int xhci_start(struct xhci_hcd *xhci)
  134. {
  135. u32 temp;
  136. int ret;
  137. temp = readl(&xhci->op_regs->command);
  138. temp |= (CMD_RUN);
  139. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
  140. temp);
  141. writel(temp, &xhci->op_regs->command);
  142. /*
  143. * Wait for the HCHalted Status bit to be 0 to indicate the host is
  144. * running.
  145. */
  146. ret = xhci_handshake(&xhci->op_regs->status,
  147. STS_HALT, 0, XHCI_MAX_HALT_USEC);
  148. if (ret == -ETIMEDOUT)
  149. xhci_err(xhci, "Host took too long to start, "
  150. "waited %u microseconds.\n",
  151. XHCI_MAX_HALT_USEC);
  152. if (!ret) {
  153. /* clear state flags. Including dying, halted or removing */
  154. xhci->xhc_state = 0;
  155. xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
  156. }
  157. return ret;
  158. }
  159. /*
  160. * Reset a halted HC.
  161. *
  162. * This resets pipelines, timers, counters, state machines, etc.
  163. * Transactions will be terminated immediately, and operational registers
  164. * will be set to their defaults.
  165. */
  166. int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
  167. {
  168. u32 command;
  169. u32 state;
  170. int ret;
  171. state = readl(&xhci->op_regs->status);
  172. if (state == ~(u32)0) {
  173. xhci_warn(xhci, "Host not accessible, reset failed.\n");
  174. return -ENODEV;
  175. }
  176. if ((state & STS_HALT) == 0) {
  177. xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
  178. return 0;
  179. }
  180. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
  181. command = readl(&xhci->op_regs->command);
  182. command |= CMD_RESET;
  183. writel(command, &xhci->op_regs->command);
  184. /* Existing Intel xHCI controllers require a delay of 1 mS,
  185. * after setting the CMD_RESET bit, and before accessing any
  186. * HC registers. This allows the HC to complete the
  187. * reset operation and be ready for HC register access.
  188. * Without this delay, the subsequent HC register access,
  189. * may result in a system hang very rarely.
  190. */
  191. if (xhci->quirks & XHCI_INTEL_HOST)
  192. udelay(1000);
  193. ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
  194. CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
  195. if (ret)
  196. return ret;
  197. if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
  198. usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
  199. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  200. "Wait for controller to be ready for doorbell rings");
  201. /*
  202. * xHCI cannot write to any doorbells or operational registers other
  203. * than status until the "Controller Not Ready" flag is cleared.
  204. */
  205. ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
  206. xhci->usb2_rhub.bus_state.port_c_suspend = 0;
  207. xhci->usb2_rhub.bus_state.suspended_ports = 0;
  208. xhci->usb2_rhub.bus_state.resuming_ports = 0;
  209. xhci->usb3_rhub.bus_state.port_c_suspend = 0;
  210. xhci->usb3_rhub.bus_state.suspended_ports = 0;
  211. xhci->usb3_rhub.bus_state.resuming_ports = 0;
  212. return ret;
  213. }
  214. static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
  215. {
  216. struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
  217. struct iommu_domain *domain;
  218. int err, i;
  219. u64 val;
  220. u32 intrs;
  221. /*
  222. * Some Renesas controllers get into a weird state if they are
  223. * reset while programmed with 64bit addresses (they will preserve
  224. * the top half of the address in internal, non visible
  225. * registers). You end up with half the address coming from the
  226. * kernel, and the other half coming from the firmware. Also,
  227. * changing the programming leads to extra accesses even if the
  228. * controller is supposed to be halted. The controller ends up with
  229. * a fatal fault, and is then ripe for being properly reset.
  230. *
  231. * Special care is taken to only apply this if the device is behind
  232. * an iommu. Doing anything when there is no iommu is definitely
  233. * unsafe...
  234. */
  235. domain = iommu_get_domain_for_dev(dev);
  236. if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
  237. domain->type == IOMMU_DOMAIN_IDENTITY)
  238. return;
  239. xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
  240. /* Clear HSEIE so that faults do not get signaled */
  241. val = readl(&xhci->op_regs->command);
  242. val &= ~CMD_HSEIE;
  243. writel(val, &xhci->op_regs->command);
  244. /* Clear HSE (aka FATAL) */
  245. val = readl(&xhci->op_regs->status);
  246. val |= STS_FATAL;
  247. writel(val, &xhci->op_regs->status);
  248. /* Now zero the registers, and brace for impact */
  249. val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
  250. if (upper_32_bits(val))
  251. xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
  252. val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  253. if (upper_32_bits(val))
  254. xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
  255. intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
  256. ARRAY_SIZE(xhci->run_regs->ir_set));
  257. for (i = 0; i < intrs; i++) {
  258. struct xhci_intr_reg __iomem *ir;
  259. ir = &xhci->run_regs->ir_set[i];
  260. val = xhci_read_64(xhci, &ir->erst_base);
  261. if (upper_32_bits(val))
  262. xhci_write_64(xhci, 0, &ir->erst_base);
  263. val= xhci_read_64(xhci, &ir->erst_dequeue);
  264. if (upper_32_bits(val))
  265. xhci_write_64(xhci, 0, &ir->erst_dequeue);
  266. }
  267. /* Wait for the fault to appear. It will be cleared on reset */
  268. err = xhci_handshake(&xhci->op_regs->status,
  269. STS_FATAL, STS_FATAL,
  270. XHCI_MAX_HALT_USEC);
  271. if (!err)
  272. xhci_info(xhci, "Fault detected\n");
  273. }
  274. static int xhci_enable_interrupter(struct xhci_interrupter *ir)
  275. {
  276. u32 iman;
  277. if (!ir || !ir->ir_set)
  278. return -EINVAL;
  279. iman = readl(&ir->ir_set->irq_pending);
  280. writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
  281. return 0;
  282. }
  283. static int xhci_disable_interrupter(struct xhci_interrupter *ir)
  284. {
  285. u32 iman;
  286. if (!ir || !ir->ir_set)
  287. return -EINVAL;
  288. iman = readl(&ir->ir_set->irq_pending);
  289. writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
  290. return 0;
  291. }
  292. /* interrupt moderation interval imod_interval in nanoseconds */
  293. int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
  294. u32 imod_interval)
  295. {
  296. u32 imod;
  297. if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
  298. return -EINVAL;
  299. imod = readl(&ir->ir_set->irq_control);
  300. imod &= ~ER_IRQ_INTERVAL_MASK;
  301. imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
  302. writel(imod, &ir->ir_set->irq_control);
  303. return 0;
  304. }
  305. static void compliance_mode_recovery(struct timer_list *t)
  306. {
  307. struct xhci_hcd *xhci;
  308. struct usb_hcd *hcd;
  309. struct xhci_hub *rhub;
  310. u32 temp;
  311. int i;
  312. xhci = from_timer(xhci, t, comp_mode_recovery_timer);
  313. rhub = &xhci->usb3_rhub;
  314. hcd = rhub->hcd;
  315. if (!hcd)
  316. return;
  317. for (i = 0; i < rhub->num_ports; i++) {
  318. temp = readl(rhub->ports[i]->addr);
  319. if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
  320. /*
  321. * Compliance Mode Detected. Letting USB Core
  322. * handle the Warm Reset
  323. */
  324. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  325. "Compliance mode detected->port %d",
  326. i + 1);
  327. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  328. "Attempting compliance mode recovery");
  329. if (hcd->state == HC_STATE_SUSPENDED)
  330. usb_hcd_resume_root_hub(hcd);
  331. usb_hcd_poll_rh_status(hcd);
  332. }
  333. }
  334. if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
  335. mod_timer(&xhci->comp_mode_recovery_timer,
  336. jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
  337. }
  338. /*
  339. * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
  340. * that causes ports behind that hardware to enter compliance mode sometimes.
  341. * The quirk creates a timer that polls every 2 seconds the link state of
  342. * each host controller's port and recovers it by issuing a Warm reset
  343. * if Compliance mode is detected, otherwise the port will become "dead" (no
  344. * device connections or disconnections will be detected anymore). Becasue no
  345. * status event is generated when entering compliance mode (per xhci spec),
  346. * this quirk is needed on systems that have the failing hardware installed.
  347. */
  348. static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
  349. {
  350. xhci->port_status_u0 = 0;
  351. timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
  352. 0);
  353. xhci->comp_mode_recovery_timer.expires = jiffies +
  354. msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
  355. add_timer(&xhci->comp_mode_recovery_timer);
  356. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  357. "Compliance mode recovery timer initialized");
  358. }
  359. /*
  360. * This function identifies the systems that have installed the SN65LVPE502CP
  361. * USB3.0 re-driver and that need the Compliance Mode Quirk.
  362. * Systems:
  363. * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
  364. */
  365. static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
  366. {
  367. const char *dmi_product_name, *dmi_sys_vendor;
  368. dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
  369. dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
  370. if (!dmi_product_name || !dmi_sys_vendor)
  371. return false;
  372. if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
  373. return false;
  374. if (strstr(dmi_product_name, "Z420") ||
  375. strstr(dmi_product_name, "Z620") ||
  376. strstr(dmi_product_name, "Z820") ||
  377. strstr(dmi_product_name, "Z1 Workstation"))
  378. return true;
  379. return false;
  380. }
  381. static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
  382. {
  383. return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
  384. }
  385. /*
  386. * Initialize memory for HCD and xHC (one-time init).
  387. *
  388. * Program the PAGESIZE register, initialize the device context array, create
  389. * device contexts (?), set up a command ring segment (or two?), create event
  390. * ring (one for now).
  391. */
  392. static int xhci_init(struct usb_hcd *hcd)
  393. {
  394. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  395. int retval;
  396. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
  397. spin_lock_init(&xhci->lock);
  398. if (xhci->hci_version == 0x95 && link_quirk) {
  399. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  400. "QUIRK: Not clearing Link TRB chain bits.");
  401. xhci->quirks |= XHCI_LINK_TRB_QUIRK;
  402. } else {
  403. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  404. "xHCI doesn't need link TRB QUIRK");
  405. }
  406. retval = xhci_mem_init(xhci, GFP_KERNEL);
  407. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
  408. /* Initializing Compliance Mode Recovery Data If Needed */
  409. if (xhci_compliance_mode_recovery_timer_quirk_check()) {
  410. xhci->quirks |= XHCI_COMP_MODE_QUIRK;
  411. compliance_mode_recovery_timer_init(xhci);
  412. }
  413. return retval;
  414. }
  415. /*-------------------------------------------------------------------------*/
  416. static int xhci_run_finished(struct xhci_hcd *xhci)
  417. {
  418. struct xhci_interrupter *ir = xhci->interrupters[0];
  419. unsigned long flags;
  420. u32 temp;
  421. /*
  422. * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
  423. * Protect the short window before host is running with a lock
  424. */
  425. spin_lock_irqsave(&xhci->lock, flags);
  426. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
  427. temp = readl(&xhci->op_regs->command);
  428. temp |= (CMD_EIE);
  429. writel(temp, &xhci->op_regs->command);
  430. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
  431. xhci_enable_interrupter(ir);
  432. if (xhci_start(xhci)) {
  433. xhci_halt(xhci);
  434. spin_unlock_irqrestore(&xhci->lock, flags);
  435. return -ENODEV;
  436. }
  437. xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
  438. if (xhci->quirks & XHCI_NEC_HOST)
  439. xhci_ring_cmd_db(xhci);
  440. spin_unlock_irqrestore(&xhci->lock, flags);
  441. return 0;
  442. }
  443. /*
  444. * Start the HC after it was halted.
  445. *
  446. * This function is called by the USB core when the HC driver is added.
  447. * Its opposite is xhci_stop().
  448. *
  449. * xhci_init() must be called once before this function can be called.
  450. * Reset the HC, enable device slot contexts, program DCBAAP, and
  451. * set command ring pointer and event ring pointer.
  452. *
  453. * Setup MSI-X vectors and enable interrupts.
  454. */
  455. int xhci_run(struct usb_hcd *hcd)
  456. {
  457. u64 temp_64;
  458. int ret;
  459. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  460. struct xhci_interrupter *ir = xhci->interrupters[0];
  461. /* Start the xHCI host controller running only after the USB 2.0 roothub
  462. * is setup.
  463. */
  464. hcd->uses_new_polling = 1;
  465. if (hcd->msi_enabled)
  466. ir->ip_autoclear = true;
  467. if (!usb_hcd_is_primary_hcd(hcd))
  468. return xhci_run_finished(xhci);
  469. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
  470. temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
  471. temp_64 &= ERST_PTR_MASK;
  472. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  473. "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
  474. xhci_set_interrupter_moderation(ir, xhci->imod_interval);
  475. if (xhci->quirks & XHCI_NEC_HOST) {
  476. struct xhci_command *command;
  477. command = xhci_alloc_command(xhci, false, GFP_KERNEL);
  478. if (!command)
  479. return -ENOMEM;
  480. ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
  481. TRB_TYPE(TRB_NEC_GET_FW));
  482. if (ret)
  483. xhci_free_command(xhci, command);
  484. }
  485. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  486. "Finished %s for main hcd", __func__);
  487. xhci_create_dbc_dev(xhci);
  488. xhci_debugfs_init(xhci);
  489. if (xhci_has_one_roothub(xhci))
  490. return xhci_run_finished(xhci);
  491. set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
  492. return 0;
  493. }
  494. EXPORT_SYMBOL_GPL(xhci_run);
  495. /*
  496. * Stop xHCI driver.
  497. *
  498. * This function is called by the USB core when the HC driver is removed.
  499. * Its opposite is xhci_run().
  500. *
  501. * Disable device contexts, disable IRQs, and quiesce the HC.
  502. * Reset the HC, finish any completed transactions, and cleanup memory.
  503. */
  504. void xhci_stop(struct usb_hcd *hcd)
  505. {
  506. u32 temp;
  507. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  508. struct xhci_interrupter *ir = xhci->interrupters[0];
  509. mutex_lock(&xhci->mutex);
  510. /* Only halt host and free memory after both hcds are removed */
  511. if (!usb_hcd_is_primary_hcd(hcd)) {
  512. mutex_unlock(&xhci->mutex);
  513. return;
  514. }
  515. xhci_remove_dbc_dev(xhci);
  516. spin_lock_irq(&xhci->lock);
  517. xhci->xhc_state |= XHCI_STATE_HALTED;
  518. xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
  519. xhci_halt(xhci);
  520. xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
  521. spin_unlock_irq(&xhci->lock);
  522. /* Deleting Compliance Mode Recovery Timer */
  523. if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
  524. (!(xhci_all_ports_seen_u0(xhci)))) {
  525. del_timer_sync(&xhci->comp_mode_recovery_timer);
  526. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  527. "%s: compliance mode recovery timer deleted",
  528. __func__);
  529. }
  530. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  531. usb_amd_dev_put();
  532. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  533. "// Disabling event ring interrupts");
  534. temp = readl(&xhci->op_regs->status);
  535. writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
  536. xhci_disable_interrupter(ir);
  537. xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
  538. xhci_mem_cleanup(xhci);
  539. xhci_debugfs_exit(xhci);
  540. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  541. "xhci_stop completed - status = %x",
  542. readl(&xhci->op_regs->status));
  543. mutex_unlock(&xhci->mutex);
  544. }
  545. EXPORT_SYMBOL_GPL(xhci_stop);
  546. /*
  547. * Shutdown HC (not bus-specific)
  548. *
  549. * This is called when the machine is rebooting or halting. We assume that the
  550. * machine will be powered off, and the HC's internal state will be reset.
  551. * Don't bother to free memory.
  552. *
  553. * This will only ever be called with the main usb_hcd (the USB3 roothub).
  554. */
  555. void xhci_shutdown(struct usb_hcd *hcd)
  556. {
  557. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  558. if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
  559. usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
  560. /* Don't poll the roothubs after shutdown. */
  561. xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
  562. __func__, hcd->self.busnum);
  563. clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  564. del_timer_sync(&hcd->rh_timer);
  565. if (xhci->shared_hcd) {
  566. clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
  567. del_timer_sync(&xhci->shared_hcd->rh_timer);
  568. }
  569. spin_lock_irq(&xhci->lock);
  570. xhci_halt(xhci);
  571. /*
  572. * Workaround for spurious wakeps at shutdown with HSW, and for boot
  573. * firmware delay in ADL-P PCH if port are left in U3 at shutdown
  574. */
  575. if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
  576. xhci->quirks & XHCI_RESET_TO_DEFAULT)
  577. xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
  578. spin_unlock_irq(&xhci->lock);
  579. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  580. "xhci_shutdown completed - status = %x",
  581. readl(&xhci->op_regs->status));
  582. }
  583. EXPORT_SYMBOL_GPL(xhci_shutdown);
  584. #ifdef CONFIG_PM
  585. static void xhci_save_registers(struct xhci_hcd *xhci)
  586. {
  587. struct xhci_interrupter *ir;
  588. unsigned int i;
  589. xhci->s3.command = readl(&xhci->op_regs->command);
  590. xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
  591. xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
  592. xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
  593. /* save both primary and all secondary interrupters */
  594. /* fixme, shold we lock to prevent race with remove secondary interrupter? */
  595. for (i = 0; i < xhci->max_interrupters; i++) {
  596. ir = xhci->interrupters[i];
  597. if (!ir)
  598. continue;
  599. ir->s3_erst_size = readl(&ir->ir_set->erst_size);
  600. ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
  601. ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
  602. ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
  603. ir->s3_irq_control = readl(&ir->ir_set->irq_control);
  604. }
  605. }
  606. static void xhci_restore_registers(struct xhci_hcd *xhci)
  607. {
  608. struct xhci_interrupter *ir;
  609. unsigned int i;
  610. writel(xhci->s3.command, &xhci->op_regs->command);
  611. writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
  612. xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
  613. writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
  614. /* FIXME should we lock to protect against freeing of interrupters */
  615. for (i = 0; i < xhci->max_interrupters; i++) {
  616. ir = xhci->interrupters[i];
  617. if (!ir)
  618. continue;
  619. writel(ir->s3_erst_size, &ir->ir_set->erst_size);
  620. xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
  621. xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
  622. writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
  623. writel(ir->s3_irq_control, &ir->ir_set->irq_control);
  624. }
  625. }
  626. static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
  627. {
  628. u64 val_64;
  629. /* step 2: initialize command ring buffer */
  630. val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  631. val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
  632. (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  633. xhci->cmd_ring->dequeue) &
  634. (u64) ~CMD_RING_RSVD_BITS) |
  635. xhci->cmd_ring->cycle_state;
  636. xhci_dbg_trace(xhci, trace_xhci_dbg_init,
  637. "// Setting command ring address to 0x%llx",
  638. (long unsigned long) val_64);
  639. xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
  640. }
  641. /*
  642. * The whole command ring must be cleared to zero when we suspend the host.
  643. *
  644. * The host doesn't save the command ring pointer in the suspend well, so we
  645. * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
  646. * aligned, because of the reserved bits in the command ring dequeue pointer
  647. * register. Therefore, we can't just set the dequeue pointer back in the
  648. * middle of the ring (TRBs are 16-byte aligned).
  649. */
  650. static void xhci_clear_command_ring(struct xhci_hcd *xhci)
  651. {
  652. struct xhci_ring *ring;
  653. struct xhci_segment *seg;
  654. ring = xhci->cmd_ring;
  655. seg = ring->deq_seg;
  656. do {
  657. memset(seg->trbs, 0,
  658. sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
  659. seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
  660. cpu_to_le32(~TRB_CYCLE);
  661. seg = seg->next;
  662. } while (seg != ring->deq_seg);
  663. xhci_initialize_ring_info(ring, 1);
  664. /*
  665. * Reset the hardware dequeue pointer.
  666. * Yes, this will need to be re-written after resume, but we're paranoid
  667. * and want to make sure the hardware doesn't access bogus memory
  668. * because, say, the BIOS or an SMI started the host without changing
  669. * the command ring pointers.
  670. */
  671. xhci_set_cmd_ring_deq(xhci);
  672. }
  673. /*
  674. * Disable port wake bits if do_wakeup is not set.
  675. *
  676. * Also clear a possible internal port wake state left hanging for ports that
  677. * detected termination but never successfully enumerated (trained to 0U).
  678. * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
  679. * at enumeration clears this wake, force one here as well for unconnected ports
  680. */
  681. static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
  682. struct xhci_hub *rhub,
  683. bool do_wakeup)
  684. {
  685. unsigned long flags;
  686. u32 t1, t2, portsc;
  687. int i;
  688. spin_lock_irqsave(&xhci->lock, flags);
  689. for (i = 0; i < rhub->num_ports; i++) {
  690. portsc = readl(rhub->ports[i]->addr);
  691. t1 = xhci_port_state_to_neutral(portsc);
  692. t2 = t1;
  693. /* clear wake bits if do_wake is not set */
  694. if (!do_wakeup)
  695. t2 &= ~PORT_WAKE_BITS;
  696. /* Don't touch csc bit if connected or connect change is set */
  697. if (!(portsc & (PORT_CSC | PORT_CONNECT)))
  698. t2 |= PORT_CSC;
  699. if (t1 != t2) {
  700. writel(t2, rhub->ports[i]->addr);
  701. xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
  702. rhub->hcd->self.busnum, i + 1, portsc, t2);
  703. }
  704. }
  705. spin_unlock_irqrestore(&xhci->lock, flags);
  706. }
  707. static bool xhci_pending_portevent(struct xhci_hcd *xhci)
  708. {
  709. struct xhci_port **ports;
  710. int port_index;
  711. u32 status;
  712. u32 portsc;
  713. status = readl(&xhci->op_regs->status);
  714. if (status & STS_EINT)
  715. return true;
  716. /*
  717. * Checking STS_EINT is not enough as there is a lag between a change
  718. * bit being set and the Port Status Change Event that it generated
  719. * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
  720. */
  721. port_index = xhci->usb2_rhub.num_ports;
  722. ports = xhci->usb2_rhub.ports;
  723. while (port_index--) {
  724. portsc = readl(ports[port_index]->addr);
  725. if (portsc & PORT_CHANGE_MASK ||
  726. (portsc & PORT_PLS_MASK) == XDEV_RESUME)
  727. return true;
  728. }
  729. port_index = xhci->usb3_rhub.num_ports;
  730. ports = xhci->usb3_rhub.ports;
  731. while (port_index--) {
  732. portsc = readl(ports[port_index]->addr);
  733. if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
  734. (portsc & PORT_PLS_MASK) == XDEV_RESUME)
  735. return true;
  736. }
  737. return false;
  738. }
  739. /*
  740. * Stop HC (not bus-specific)
  741. *
  742. * This is called when the machine transition into S3/S4 mode.
  743. *
  744. */
  745. int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
  746. {
  747. int rc = 0;
  748. unsigned int delay = XHCI_MAX_HALT_USEC * 2;
  749. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  750. u32 command;
  751. u32 res;
  752. if (!hcd->state)
  753. return 0;
  754. if (hcd->state != HC_STATE_SUSPENDED ||
  755. (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
  756. return -EINVAL;
  757. /* Clear root port wake on bits if wakeup not allowed. */
  758. xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
  759. xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
  760. if (!HCD_HW_ACCESSIBLE(hcd))
  761. return 0;
  762. xhci_dbc_suspend(xhci);
  763. /* Don't poll the roothubs on bus suspend. */
  764. xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
  765. __func__, hcd->self.busnum);
  766. clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  767. del_timer_sync(&hcd->rh_timer);
  768. if (xhci->shared_hcd) {
  769. clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
  770. del_timer_sync(&xhci->shared_hcd->rh_timer);
  771. }
  772. if (xhci->quirks & XHCI_SUSPEND_DELAY)
  773. usleep_range(1000, 1500);
  774. spin_lock_irq(&xhci->lock);
  775. clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  776. if (xhci->shared_hcd)
  777. clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
  778. /* step 1: stop endpoint */
  779. /* skipped assuming that port suspend has done */
  780. /* step 2: clear Run/Stop bit */
  781. command = readl(&xhci->op_regs->command);
  782. command &= ~CMD_RUN;
  783. writel(command, &xhci->op_regs->command);
  784. /* Some chips from Fresco Logic need an extraordinary delay */
  785. delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
  786. if (xhci_handshake(&xhci->op_regs->status,
  787. STS_HALT, STS_HALT, delay)) {
  788. xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
  789. spin_unlock_irq(&xhci->lock);
  790. return -ETIMEDOUT;
  791. }
  792. xhci_clear_command_ring(xhci);
  793. /* step 3: save registers */
  794. xhci_save_registers(xhci);
  795. /* step 4: set CSS flag */
  796. command = readl(&xhci->op_regs->command);
  797. command |= CMD_CSS;
  798. writel(command, &xhci->op_regs->command);
  799. xhci->broken_suspend = 0;
  800. if (xhci_handshake(&xhci->op_regs->status,
  801. STS_SAVE, 0, 20 * 1000)) {
  802. /*
  803. * AMD SNPS xHC 3.0 occasionally does not clear the
  804. * SSS bit of USBSTS and when driver tries to poll
  805. * to see if the xHC clears BIT(8) which never happens
  806. * and driver assumes that controller is not responding
  807. * and times out. To workaround this, its good to check
  808. * if SRE and HCE bits are not set (as per xhci
  809. * Section 5.4.2) and bypass the timeout.
  810. */
  811. res = readl(&xhci->op_regs->status);
  812. if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
  813. (((res & STS_SRE) == 0) &&
  814. ((res & STS_HCE) == 0))) {
  815. xhci->broken_suspend = 1;
  816. } else {
  817. xhci_warn(xhci, "WARN: xHC save state timeout\n");
  818. spin_unlock_irq(&xhci->lock);
  819. return -ETIMEDOUT;
  820. }
  821. }
  822. spin_unlock_irq(&xhci->lock);
  823. /*
  824. * Deleting Compliance Mode Recovery Timer because the xHCI Host
  825. * is about to be suspended.
  826. */
  827. if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
  828. (!(xhci_all_ports_seen_u0(xhci)))) {
  829. del_timer_sync(&xhci->comp_mode_recovery_timer);
  830. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  831. "%s: compliance mode recovery timer deleted",
  832. __func__);
  833. }
  834. return rc;
  835. }
  836. EXPORT_SYMBOL_GPL(xhci_suspend);
  837. /*
  838. * start xHC (not bus-specific)
  839. *
  840. * This is called when the machine transition from S3/S4 mode.
  841. *
  842. */
  843. int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
  844. {
  845. bool hibernated = (msg.event == PM_EVENT_RESTORE);
  846. u32 command, temp = 0;
  847. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  848. int retval = 0;
  849. bool comp_timer_running = false;
  850. bool pending_portevent = false;
  851. bool suspended_usb3_devs = false;
  852. bool reinit_xhc = false;
  853. if (!hcd->state)
  854. return 0;
  855. /* Wait a bit if either of the roothubs need to settle from the
  856. * transition into bus suspend.
  857. */
  858. if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
  859. time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
  860. msleep(100);
  861. set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
  862. if (xhci->shared_hcd)
  863. set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
  864. spin_lock_irq(&xhci->lock);
  865. if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
  866. reinit_xhc = true;
  867. if (!reinit_xhc) {
  868. /*
  869. * Some controllers might lose power during suspend, so wait
  870. * for controller not ready bit to clear, just as in xHC init.
  871. */
  872. retval = xhci_handshake(&xhci->op_regs->status,
  873. STS_CNR, 0, 10 * 1000 * 1000);
  874. if (retval) {
  875. xhci_warn(xhci, "Controller not ready at resume %d\n",
  876. retval);
  877. spin_unlock_irq(&xhci->lock);
  878. return retval;
  879. }
  880. /* step 1: restore register */
  881. xhci_restore_registers(xhci);
  882. /* step 2: initialize command ring buffer */
  883. xhci_set_cmd_ring_deq(xhci);
  884. /* step 3: restore state and start state*/
  885. /* step 3: set CRS flag */
  886. command = readl(&xhci->op_regs->command);
  887. command |= CMD_CRS;
  888. writel(command, &xhci->op_regs->command);
  889. /*
  890. * Some controllers take up to 55+ ms to complete the controller
  891. * restore so setting the timeout to 100ms. Xhci specification
  892. * doesn't mention any timeout value.
  893. */
  894. if (xhci_handshake(&xhci->op_regs->status,
  895. STS_RESTORE, 0, 100 * 1000)) {
  896. xhci_warn(xhci, "WARN: xHC restore state timeout\n");
  897. spin_unlock_irq(&xhci->lock);
  898. return -ETIMEDOUT;
  899. }
  900. }
  901. temp = readl(&xhci->op_regs->status);
  902. /* re-initialize the HC on Restore Error, or Host Controller Error */
  903. if ((temp & (STS_SRE | STS_HCE)) &&
  904. !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
  905. reinit_xhc = true;
  906. if (!xhci->broken_suspend)
  907. xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
  908. }
  909. if (reinit_xhc) {
  910. if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
  911. !(xhci_all_ports_seen_u0(xhci))) {
  912. del_timer_sync(&xhci->comp_mode_recovery_timer);
  913. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  914. "Compliance Mode Recovery Timer deleted!");
  915. }
  916. /* Let the USB core know _both_ roothubs lost power. */
  917. usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
  918. if (xhci->shared_hcd)
  919. usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
  920. xhci_dbg(xhci, "Stop HCD\n");
  921. xhci_halt(xhci);
  922. xhci_zero_64b_regs(xhci);
  923. retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
  924. spin_unlock_irq(&xhci->lock);
  925. if (retval)
  926. return retval;
  927. xhci_dbg(xhci, "// Disabling event ring interrupts\n");
  928. temp = readl(&xhci->op_regs->status);
  929. writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
  930. xhci_disable_interrupter(xhci->interrupters[0]);
  931. xhci_dbg(xhci, "cleaning up memory\n");
  932. xhci_mem_cleanup(xhci);
  933. xhci_debugfs_exit(xhci);
  934. xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
  935. readl(&xhci->op_regs->status));
  936. /* USB core calls the PCI reinit and start functions twice:
  937. * first with the primary HCD, and then with the secondary HCD.
  938. * If we don't do the same, the host will never be started.
  939. */
  940. xhci_dbg(xhci, "Initialize the xhci_hcd\n");
  941. retval = xhci_init(hcd);
  942. if (retval)
  943. return retval;
  944. comp_timer_running = true;
  945. xhci_dbg(xhci, "Start the primary HCD\n");
  946. retval = xhci_run(hcd);
  947. if (!retval && xhci->shared_hcd) {
  948. xhci_dbg(xhci, "Start the secondary HCD\n");
  949. retval = xhci_run(xhci->shared_hcd);
  950. }
  951. if (retval)
  952. return retval;
  953. /*
  954. * Resume roothubs unconditionally as PORTSC change bits are not
  955. * immediately visible after xHC reset
  956. */
  957. hcd->state = HC_STATE_SUSPENDED;
  958. if (xhci->shared_hcd) {
  959. xhci->shared_hcd->state = HC_STATE_SUSPENDED;
  960. usb_hcd_resume_root_hub(xhci->shared_hcd);
  961. }
  962. usb_hcd_resume_root_hub(hcd);
  963. goto done;
  964. }
  965. /* step 4: set Run/Stop bit */
  966. command = readl(&xhci->op_regs->command);
  967. command |= CMD_RUN;
  968. writel(command, &xhci->op_regs->command);
  969. xhci_handshake(&xhci->op_regs->status, STS_HALT,
  970. 0, 250 * 1000);
  971. /* step 5: walk topology and initialize portsc,
  972. * portpmsc and portli
  973. */
  974. /* this is done in bus_resume */
  975. /* step 6: restart each of the previously
  976. * Running endpoints by ringing their doorbells
  977. */
  978. spin_unlock_irq(&xhci->lock);
  979. xhci_dbc_resume(xhci);
  980. if (retval == 0) {
  981. /*
  982. * Resume roothubs only if there are pending events.
  983. * USB 3 devices resend U3 LFPS wake after a 100ms delay if
  984. * the first wake signalling failed, give it that chance if
  985. * there are suspended USB 3 devices.
  986. */
  987. if (xhci->usb3_rhub.bus_state.suspended_ports ||
  988. xhci->usb3_rhub.bus_state.bus_suspended)
  989. suspended_usb3_devs = true;
  990. pending_portevent = xhci_pending_portevent(xhci);
  991. if (suspended_usb3_devs && !pending_portevent &&
  992. msg.event == PM_EVENT_AUTO_RESUME) {
  993. msleep(120);
  994. pending_portevent = xhci_pending_portevent(xhci);
  995. }
  996. if (pending_portevent) {
  997. if (xhci->shared_hcd)
  998. usb_hcd_resume_root_hub(xhci->shared_hcd);
  999. usb_hcd_resume_root_hub(hcd);
  1000. }
  1001. }
  1002. done:
  1003. /*
  1004. * If system is subject to the Quirk, Compliance Mode Timer needs to
  1005. * be re-initialized Always after a system resume. Ports are subject
  1006. * to suffer the Compliance Mode issue again. It doesn't matter if
  1007. * ports have entered previously to U0 before system's suspension.
  1008. */
  1009. if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
  1010. compliance_mode_recovery_timer_init(xhci);
  1011. if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
  1012. usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
  1013. /* Re-enable port polling. */
  1014. xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
  1015. __func__, hcd->self.busnum);
  1016. if (xhci->shared_hcd) {
  1017. set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
  1018. usb_hcd_poll_rh_status(xhci->shared_hcd);
  1019. }
  1020. set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  1021. usb_hcd_poll_rh_status(hcd);
  1022. return retval;
  1023. }
  1024. EXPORT_SYMBOL_GPL(xhci_resume);
  1025. #endif /* CONFIG_PM */
  1026. /*-------------------------------------------------------------------------*/
  1027. static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
  1028. {
  1029. void *temp;
  1030. int ret = 0;
  1031. unsigned int buf_len;
  1032. enum dma_data_direction dir;
  1033. dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  1034. buf_len = urb->transfer_buffer_length;
  1035. temp = kzalloc_node(buf_len, GFP_ATOMIC,
  1036. dev_to_node(hcd->self.sysdev));
  1037. if (!temp)
  1038. return -ENOMEM;
  1039. if (usb_urb_dir_out(urb))
  1040. sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
  1041. temp, buf_len, 0);
  1042. urb->transfer_buffer = temp;
  1043. urb->transfer_dma = dma_map_single(hcd->self.sysdev,
  1044. urb->transfer_buffer,
  1045. urb->transfer_buffer_length,
  1046. dir);
  1047. if (dma_mapping_error(hcd->self.sysdev,
  1048. urb->transfer_dma)) {
  1049. ret = -EAGAIN;
  1050. kfree(temp);
  1051. } else {
  1052. urb->transfer_flags |= URB_DMA_MAP_SINGLE;
  1053. }
  1054. return ret;
  1055. }
  1056. static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
  1057. struct urb *urb)
  1058. {
  1059. bool ret = false;
  1060. unsigned int i;
  1061. unsigned int len = 0;
  1062. unsigned int trb_size;
  1063. unsigned int max_pkt;
  1064. struct scatterlist *sg;
  1065. struct scatterlist *tail_sg;
  1066. tail_sg = urb->sg;
  1067. max_pkt = usb_endpoint_maxp(&urb->ep->desc);
  1068. if (!urb->num_sgs)
  1069. return ret;
  1070. if (urb->dev->speed >= USB_SPEED_SUPER)
  1071. trb_size = TRB_CACHE_SIZE_SS;
  1072. else
  1073. trb_size = TRB_CACHE_SIZE_HS;
  1074. if (urb->transfer_buffer_length != 0 &&
  1075. !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
  1076. for_each_sg(urb->sg, sg, urb->num_sgs, i) {
  1077. len = len + sg->length;
  1078. if (i > trb_size - 2) {
  1079. len = len - tail_sg->length;
  1080. if (len < max_pkt) {
  1081. ret = true;
  1082. break;
  1083. }
  1084. tail_sg = sg_next(tail_sg);
  1085. }
  1086. }
  1087. }
  1088. return ret;
  1089. }
  1090. static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
  1091. {
  1092. unsigned int len;
  1093. unsigned int buf_len;
  1094. enum dma_data_direction dir;
  1095. dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  1096. buf_len = urb->transfer_buffer_length;
  1097. if (IS_ENABLED(CONFIG_HAS_DMA) &&
  1098. (urb->transfer_flags & URB_DMA_MAP_SINGLE))
  1099. dma_unmap_single(hcd->self.sysdev,
  1100. urb->transfer_dma,
  1101. urb->transfer_buffer_length,
  1102. dir);
  1103. if (usb_urb_dir_in(urb)) {
  1104. len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
  1105. urb->transfer_buffer,
  1106. buf_len,
  1107. 0);
  1108. if (len != buf_len) {
  1109. xhci_dbg(hcd_to_xhci(hcd),
  1110. "Copy from tmp buf to urb sg list failed\n");
  1111. urb->actual_length = len;
  1112. }
  1113. }
  1114. urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
  1115. kfree(urb->transfer_buffer);
  1116. urb->transfer_buffer = NULL;
  1117. }
  1118. /*
  1119. * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
  1120. * we'll copy the actual data into the TRB address register. This is limited to
  1121. * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
  1122. * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
  1123. */
  1124. static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
  1125. gfp_t mem_flags)
  1126. {
  1127. struct xhci_hcd *xhci;
  1128. xhci = hcd_to_xhci(hcd);
  1129. if (xhci_urb_suitable_for_idt(urb))
  1130. return 0;
  1131. if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
  1132. if (xhci_urb_temp_buffer_required(hcd, urb))
  1133. return xhci_map_temp_buffer(hcd, urb);
  1134. }
  1135. return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
  1136. }
  1137. static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
  1138. {
  1139. struct xhci_hcd *xhci;
  1140. bool unmap_temp_buf = false;
  1141. xhci = hcd_to_xhci(hcd);
  1142. if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
  1143. unmap_temp_buf = true;
  1144. if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
  1145. xhci_unmap_temp_buf(hcd, urb);
  1146. else
  1147. usb_hcd_unmap_urb_for_dma(hcd, urb);
  1148. }
  1149. /**
  1150. * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
  1151. * HCDs. Find the index for an endpoint given its descriptor. Use the return
  1152. * value to right shift 1 for the bitmask.
  1153. *
  1154. * Index = (epnum * 2) + direction - 1,
  1155. * where direction = 0 for OUT, 1 for IN.
  1156. * For control endpoints, the IN index is used (OUT index is unused), so
  1157. * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
  1158. */
  1159. unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
  1160. {
  1161. unsigned int index;
  1162. if (usb_endpoint_xfer_control(desc))
  1163. index = (unsigned int) (usb_endpoint_num(desc)*2);
  1164. else
  1165. index = (unsigned int) (usb_endpoint_num(desc)*2) +
  1166. (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
  1167. return index;
  1168. }
  1169. EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
  1170. /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
  1171. * address from the XHCI endpoint index.
  1172. */
  1173. static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
  1174. {
  1175. unsigned int number = DIV_ROUND_UP(ep_index, 2);
  1176. unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
  1177. return direction | number;
  1178. }
  1179. /* Find the flag for this endpoint (for use in the control context). Use the
  1180. * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
  1181. * bit 1, etc.
  1182. */
  1183. static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
  1184. {
  1185. return 1 << (xhci_get_endpoint_index(desc) + 1);
  1186. }
  1187. /* Compute the last valid endpoint context index. Basically, this is the
  1188. * endpoint index plus one. For slot contexts with more than valid endpoint,
  1189. * we find the most significant bit set in the added contexts flags.
  1190. * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
  1191. * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
  1192. */
  1193. unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
  1194. {
  1195. return fls(added_ctxs) - 1;
  1196. }
  1197. /* Returns 1 if the arguments are OK;
  1198. * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
  1199. */
  1200. static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
  1201. struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
  1202. const char *func) {
  1203. struct xhci_hcd *xhci;
  1204. struct xhci_virt_device *virt_dev;
  1205. if (!hcd || (check_ep && !ep) || !udev) {
  1206. pr_debug("xHCI %s called with invalid args\n", func);
  1207. return -EINVAL;
  1208. }
  1209. if (!udev->parent) {
  1210. pr_debug("xHCI %s called for root hub\n", func);
  1211. return 0;
  1212. }
  1213. xhci = hcd_to_xhci(hcd);
  1214. if (check_virt_dev) {
  1215. if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
  1216. xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
  1217. func);
  1218. return -EINVAL;
  1219. }
  1220. virt_dev = xhci->devs[udev->slot_id];
  1221. if (virt_dev->udev != udev) {
  1222. xhci_dbg(xhci, "xHCI %s called with udev and "
  1223. "virt_dev does not match\n", func);
  1224. return -EINVAL;
  1225. }
  1226. }
  1227. if (xhci->xhc_state & XHCI_STATE_HALTED)
  1228. return -ENODEV;
  1229. return 1;
  1230. }
  1231. static int xhci_configure_endpoint(struct xhci_hcd *xhci,
  1232. struct usb_device *udev, struct xhci_command *command,
  1233. bool ctx_change, bool must_succeed);
  1234. /*
  1235. * Full speed devices may have a max packet size greater than 8 bytes, but the
  1236. * USB core doesn't know that until it reads the first 8 bytes of the
  1237. * descriptor. If the usb_device's max packet size changes after that point,
  1238. * we need to issue an evaluate context command and wait on it.
  1239. */
  1240. static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
  1241. {
  1242. struct xhci_input_control_ctx *ctrl_ctx;
  1243. struct xhci_ep_ctx *ep_ctx;
  1244. struct xhci_command *command;
  1245. int max_packet_size;
  1246. int hw_max_packet_size;
  1247. int ret = 0;
  1248. ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
  1249. hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
  1250. max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc);
  1251. if (hw_max_packet_size == max_packet_size)
  1252. return 0;
  1253. switch (max_packet_size) {
  1254. case 8: case 16: case 32: case 64: case 9:
  1255. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  1256. "Max Packet Size for ep 0 changed.");
  1257. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  1258. "Max packet size in usb_device = %d",
  1259. max_packet_size);
  1260. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  1261. "Max packet size in xHCI HW = %d",
  1262. hw_max_packet_size);
  1263. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  1264. "Issuing evaluate context command.");
  1265. command = xhci_alloc_command(xhci, true, GFP_KERNEL);
  1266. if (!command)
  1267. return -ENOMEM;
  1268. command->in_ctx = vdev->in_ctx;
  1269. ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
  1270. if (!ctrl_ctx) {
  1271. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  1272. __func__);
  1273. ret = -ENOMEM;
  1274. break;
  1275. }
  1276. /* Set up the modified control endpoint 0 */
  1277. xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
  1278. ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
  1279. ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
  1280. ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
  1281. ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
  1282. ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
  1283. ctrl_ctx->drop_flags = 0;
  1284. ret = xhci_configure_endpoint(xhci, vdev->udev, command,
  1285. true, false);
  1286. /* Clean up the input context for later use by bandwidth functions */
  1287. ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
  1288. break;
  1289. default:
  1290. dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n",
  1291. max_packet_size);
  1292. return -EINVAL;
  1293. }
  1294. kfree(command->completion);
  1295. kfree(command);
  1296. return ret;
  1297. }
  1298. /*
  1299. * non-error returns are a promise to giveback() the urb later
  1300. * we drop ownership so next owner (or urb unlink) can get it
  1301. */
  1302. static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
  1303. {
  1304. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  1305. unsigned long flags;
  1306. int ret = 0;
  1307. unsigned int slot_id, ep_index;
  1308. unsigned int *ep_state;
  1309. struct urb_priv *urb_priv;
  1310. int num_tds;
  1311. ep_index = xhci_get_endpoint_index(&urb->ep->desc);
  1312. if (usb_endpoint_xfer_isoc(&urb->ep->desc))
  1313. num_tds = urb->number_of_packets;
  1314. else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
  1315. urb->transfer_buffer_length > 0 &&
  1316. urb->transfer_flags & URB_ZERO_PACKET &&
  1317. !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
  1318. num_tds = 2;
  1319. else
  1320. num_tds = 1;
  1321. urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
  1322. if (!urb_priv)
  1323. return -ENOMEM;
  1324. urb_priv->num_tds = num_tds;
  1325. urb_priv->num_tds_done = 0;
  1326. urb->hcpriv = urb_priv;
  1327. trace_xhci_urb_enqueue(urb);
  1328. spin_lock_irqsave(&xhci->lock, flags);
  1329. ret = xhci_check_args(hcd, urb->dev, urb->ep,
  1330. true, true, __func__);
  1331. if (ret <= 0) {
  1332. ret = ret ? ret : -EINVAL;
  1333. goto free_priv;
  1334. }
  1335. slot_id = urb->dev->slot_id;
  1336. if (!HCD_HW_ACCESSIBLE(hcd)) {
  1337. ret = -ESHUTDOWN;
  1338. goto free_priv;
  1339. }
  1340. if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
  1341. xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
  1342. ret = -ENODEV;
  1343. goto free_priv;
  1344. }
  1345. if (xhci->xhc_state & XHCI_STATE_DYING) {
  1346. xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
  1347. urb->ep->desc.bEndpointAddress, urb);
  1348. ret = -ESHUTDOWN;
  1349. goto free_priv;
  1350. }
  1351. ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
  1352. if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
  1353. xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
  1354. *ep_state);
  1355. ret = -EINVAL;
  1356. goto free_priv;
  1357. }
  1358. if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
  1359. xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
  1360. ret = -EINVAL;
  1361. goto free_priv;
  1362. }
  1363. switch (usb_endpoint_type(&urb->ep->desc)) {
  1364. case USB_ENDPOINT_XFER_CONTROL:
  1365. ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
  1366. slot_id, ep_index);
  1367. break;
  1368. case USB_ENDPOINT_XFER_BULK:
  1369. ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
  1370. slot_id, ep_index);
  1371. break;
  1372. case USB_ENDPOINT_XFER_INT:
  1373. ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
  1374. slot_id, ep_index);
  1375. break;
  1376. case USB_ENDPOINT_XFER_ISOC:
  1377. ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
  1378. slot_id, ep_index);
  1379. }
  1380. if (ret) {
  1381. free_priv:
  1382. xhci_urb_free_priv(urb_priv);
  1383. urb->hcpriv = NULL;
  1384. }
  1385. spin_unlock_irqrestore(&xhci->lock, flags);
  1386. return ret;
  1387. }
  1388. /*
  1389. * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
  1390. * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
  1391. * should pick up where it left off in the TD, unless a Set Transfer Ring
  1392. * Dequeue Pointer is issued.
  1393. *
  1394. * The TRBs that make up the buffers for the canceled URB will be "removed" from
  1395. * the ring. Since the ring is a contiguous structure, they can't be physically
  1396. * removed. Instead, there are two options:
  1397. *
  1398. * 1) If the HC is in the middle of processing the URB to be canceled, we
  1399. * simply move the ring's dequeue pointer past those TRBs using the Set
  1400. * Transfer Ring Dequeue Pointer command. This will be the common case,
  1401. * when drivers timeout on the last submitted URB and attempt to cancel.
  1402. *
  1403. * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
  1404. * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
  1405. * HC will need to invalidate the any TRBs it has cached after the stop
  1406. * endpoint command, as noted in the xHCI 0.95 errata.
  1407. *
  1408. * 3) The TD may have completed by the time the Stop Endpoint Command
  1409. * completes, so software needs to handle that case too.
  1410. *
  1411. * This function should protect against the TD enqueueing code ringing the
  1412. * doorbell while this code is waiting for a Stop Endpoint command to complete.
  1413. * It also needs to account for multiple cancellations on happening at the same
  1414. * time for the same endpoint.
  1415. *
  1416. * Note that this function can be called in any context, or so says
  1417. * usb_hcd_unlink_urb()
  1418. */
  1419. static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  1420. {
  1421. unsigned long flags;
  1422. int ret, i;
  1423. u32 temp;
  1424. struct xhci_hcd *xhci;
  1425. struct urb_priv *urb_priv;
  1426. struct xhci_td *td;
  1427. unsigned int ep_index;
  1428. struct xhci_ring *ep_ring;
  1429. struct xhci_virt_ep *ep;
  1430. struct xhci_command *command;
  1431. struct xhci_virt_device *vdev;
  1432. xhci = hcd_to_xhci(hcd);
  1433. spin_lock_irqsave(&xhci->lock, flags);
  1434. trace_xhci_urb_dequeue(urb);
  1435. /* Make sure the URB hasn't completed or been unlinked already */
  1436. ret = usb_hcd_check_unlink_urb(hcd, urb, status);
  1437. if (ret)
  1438. goto done;
  1439. /* give back URB now if we can't queue it for cancel */
  1440. vdev = xhci->devs[urb->dev->slot_id];
  1441. urb_priv = urb->hcpriv;
  1442. if (!vdev || !urb_priv)
  1443. goto err_giveback;
  1444. ep_index = xhci_get_endpoint_index(&urb->ep->desc);
  1445. ep = &vdev->eps[ep_index];
  1446. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  1447. if (!ep || !ep_ring)
  1448. goto err_giveback;
  1449. /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
  1450. temp = readl(&xhci->op_regs->status);
  1451. if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
  1452. xhci_hc_died(xhci);
  1453. goto done;
  1454. }
  1455. /*
  1456. * check ring is not re-allocated since URB was enqueued. If it is, then
  1457. * make sure none of the ring related pointers in this URB private data
  1458. * are touched, such as td_list, otherwise we overwrite freed data
  1459. */
  1460. if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
  1461. xhci_err(xhci, "Canceled URB td not found on endpoint ring");
  1462. for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
  1463. td = &urb_priv->td[i];
  1464. if (!list_empty(&td->cancelled_td_list))
  1465. list_del_init(&td->cancelled_td_list);
  1466. }
  1467. goto err_giveback;
  1468. }
  1469. if (xhci->xhc_state & XHCI_STATE_HALTED) {
  1470. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  1471. "HC halted, freeing TD manually.");
  1472. for (i = urb_priv->num_tds_done;
  1473. i < urb_priv->num_tds;
  1474. i++) {
  1475. td = &urb_priv->td[i];
  1476. if (!list_empty(&td->td_list))
  1477. list_del_init(&td->td_list);
  1478. if (!list_empty(&td->cancelled_td_list))
  1479. list_del_init(&td->cancelled_td_list);
  1480. }
  1481. goto err_giveback;
  1482. }
  1483. i = urb_priv->num_tds_done;
  1484. if (i < urb_priv->num_tds)
  1485. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  1486. "Cancel URB %p, dev %s, ep 0x%x, "
  1487. "starting at offset 0x%llx",
  1488. urb, urb->dev->devpath,
  1489. urb->ep->desc.bEndpointAddress,
  1490. (unsigned long long) xhci_trb_virt_to_dma(
  1491. urb_priv->td[i].start_seg,
  1492. urb_priv->td[i].first_trb));
  1493. for (; i < urb_priv->num_tds; i++) {
  1494. td = &urb_priv->td[i];
  1495. /* TD can already be on cancelled list if ep halted on it */
  1496. if (list_empty(&td->cancelled_td_list)) {
  1497. td->cancel_status = TD_DIRTY;
  1498. list_add_tail(&td->cancelled_td_list,
  1499. &ep->cancelled_td_list);
  1500. }
  1501. }
  1502. /* These completion handlers will sort out cancelled TDs for us */
  1503. if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
  1504. xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
  1505. urb->dev->slot_id, ep_index, ep->ep_state);
  1506. goto done;
  1507. }
  1508. /* In this case no commands are pending but the endpoint is stopped */
  1509. if (ep->ep_state & EP_CLEARING_TT) {
  1510. /* and cancelled TDs can be given back right away */
  1511. xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
  1512. urb->dev->slot_id, ep_index, ep->ep_state);
  1513. xhci_process_cancelled_tds(ep);
  1514. } else {
  1515. /* Otherwise, queue a new Stop Endpoint command */
  1516. command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
  1517. if (!command) {
  1518. ret = -ENOMEM;
  1519. goto done;
  1520. }
  1521. ep->stop_time = jiffies;
  1522. ep->ep_state |= EP_STOP_CMD_PENDING;
  1523. xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
  1524. ep_index, 0);
  1525. xhci_ring_cmd_db(xhci);
  1526. }
  1527. done:
  1528. spin_unlock_irqrestore(&xhci->lock, flags);
  1529. return ret;
  1530. err_giveback:
  1531. if (urb_priv)
  1532. xhci_urb_free_priv(urb_priv);
  1533. usb_hcd_unlink_urb_from_ep(hcd, urb);
  1534. spin_unlock_irqrestore(&xhci->lock, flags);
  1535. usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
  1536. return ret;
  1537. }
  1538. /* Drop an endpoint from a new bandwidth configuration for this device.
  1539. * Only one call to this function is allowed per endpoint before
  1540. * check_bandwidth() or reset_bandwidth() must be called.
  1541. * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
  1542. * add the endpoint to the schedule with possibly new parameters denoted by a
  1543. * different endpoint descriptor in usb_host_endpoint.
  1544. * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
  1545. * not allowed.
  1546. *
  1547. * The USB core will not allow URBs to be queued to an endpoint that is being
  1548. * disabled, so there's no need for mutual exclusion to protect
  1549. * the xhci->devs[slot_id] structure.
  1550. */
  1551. int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
  1552. struct usb_host_endpoint *ep)
  1553. {
  1554. struct xhci_hcd *xhci;
  1555. struct xhci_container_ctx *in_ctx, *out_ctx;
  1556. struct xhci_input_control_ctx *ctrl_ctx;
  1557. unsigned int ep_index;
  1558. struct xhci_ep_ctx *ep_ctx;
  1559. u32 drop_flag;
  1560. u32 new_add_flags, new_drop_flags;
  1561. int ret;
  1562. ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
  1563. if (ret <= 0)
  1564. return ret;
  1565. xhci = hcd_to_xhci(hcd);
  1566. if (xhci->xhc_state & XHCI_STATE_DYING)
  1567. return -ENODEV;
  1568. xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
  1569. drop_flag = xhci_get_endpoint_flag(&ep->desc);
  1570. if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
  1571. xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
  1572. __func__, drop_flag);
  1573. return 0;
  1574. }
  1575. in_ctx = xhci->devs[udev->slot_id]->in_ctx;
  1576. out_ctx = xhci->devs[udev->slot_id]->out_ctx;
  1577. ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
  1578. if (!ctrl_ctx) {
  1579. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  1580. __func__);
  1581. return 0;
  1582. }
  1583. ep_index = xhci_get_endpoint_index(&ep->desc);
  1584. ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
  1585. /* If the HC already knows the endpoint is disabled,
  1586. * or the HCD has noted it is disabled, ignore this request
  1587. */
  1588. if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
  1589. le32_to_cpu(ctrl_ctx->drop_flags) &
  1590. xhci_get_endpoint_flag(&ep->desc)) {
  1591. /* Do not warn when called after a usb_device_reset */
  1592. if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
  1593. xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
  1594. __func__, ep);
  1595. return 0;
  1596. }
  1597. ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
  1598. new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
  1599. ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
  1600. new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  1601. xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
  1602. xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
  1603. xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
  1604. (unsigned int) ep->desc.bEndpointAddress,
  1605. udev->slot_id,
  1606. (unsigned int) new_drop_flags,
  1607. (unsigned int) new_add_flags);
  1608. return 0;
  1609. }
  1610. EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
  1611. /* Add an endpoint to a new possible bandwidth configuration for this device.
  1612. * Only one call to this function is allowed per endpoint before
  1613. * check_bandwidth() or reset_bandwidth() must be called.
  1614. * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
  1615. * add the endpoint to the schedule with possibly new parameters denoted by a
  1616. * different endpoint descriptor in usb_host_endpoint.
  1617. * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
  1618. * not allowed.
  1619. *
  1620. * The USB core will not allow URBs to be queued to an endpoint until the
  1621. * configuration or alt setting is installed in the device, so there's no need
  1622. * for mutual exclusion to protect the xhci->devs[slot_id] structure.
  1623. */
  1624. int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
  1625. struct usb_host_endpoint *ep)
  1626. {
  1627. struct xhci_hcd *xhci;
  1628. struct xhci_container_ctx *in_ctx;
  1629. unsigned int ep_index;
  1630. struct xhci_input_control_ctx *ctrl_ctx;
  1631. struct xhci_ep_ctx *ep_ctx;
  1632. u32 added_ctxs;
  1633. u32 new_add_flags, new_drop_flags;
  1634. struct xhci_virt_device *virt_dev;
  1635. int ret = 0;
  1636. ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
  1637. if (ret <= 0) {
  1638. /* So we won't queue a reset ep command for a root hub */
  1639. ep->hcpriv = NULL;
  1640. return ret;
  1641. }
  1642. xhci = hcd_to_xhci(hcd);
  1643. if (xhci->xhc_state & XHCI_STATE_DYING)
  1644. return -ENODEV;
  1645. added_ctxs = xhci_get_endpoint_flag(&ep->desc);
  1646. if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
  1647. /* FIXME when we have to issue an evaluate endpoint command to
  1648. * deal with ep0 max packet size changing once we get the
  1649. * descriptors
  1650. */
  1651. xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
  1652. __func__, added_ctxs);
  1653. return 0;
  1654. }
  1655. virt_dev = xhci->devs[udev->slot_id];
  1656. in_ctx = virt_dev->in_ctx;
  1657. ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
  1658. if (!ctrl_ctx) {
  1659. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  1660. __func__);
  1661. return 0;
  1662. }
  1663. ep_index = xhci_get_endpoint_index(&ep->desc);
  1664. /* If this endpoint is already in use, and the upper layers are trying
  1665. * to add it again without dropping it, reject the addition.
  1666. */
  1667. if (virt_dev->eps[ep_index].ring &&
  1668. !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
  1669. xhci_warn(xhci, "Trying to add endpoint 0x%x "
  1670. "without dropping it.\n",
  1671. (unsigned int) ep->desc.bEndpointAddress);
  1672. return -EINVAL;
  1673. }
  1674. /* If the HCD has already noted the endpoint is enabled,
  1675. * ignore this request.
  1676. */
  1677. if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
  1678. xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
  1679. __func__, ep);
  1680. return 0;
  1681. }
  1682. /*
  1683. * Configuration and alternate setting changes must be done in
  1684. * process context, not interrupt context (or so documenation
  1685. * for usb_set_interface() and usb_set_configuration() claim).
  1686. */
  1687. if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
  1688. dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
  1689. __func__, ep->desc.bEndpointAddress);
  1690. return -ENOMEM;
  1691. }
  1692. ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
  1693. new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  1694. /* If xhci_endpoint_disable() was called for this endpoint, but the
  1695. * xHC hasn't been notified yet through the check_bandwidth() call,
  1696. * this re-adds a new state for the endpoint from the new endpoint
  1697. * descriptors. We must drop and re-add this endpoint, so we leave the
  1698. * drop flags alone.
  1699. */
  1700. new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
  1701. /* Store the usb_device pointer for later use */
  1702. ep->hcpriv = udev;
  1703. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
  1704. trace_xhci_add_endpoint(ep_ctx);
  1705. xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
  1706. (unsigned int) ep->desc.bEndpointAddress,
  1707. udev->slot_id,
  1708. (unsigned int) new_drop_flags,
  1709. (unsigned int) new_add_flags);
  1710. return 0;
  1711. }
  1712. EXPORT_SYMBOL_GPL(xhci_add_endpoint);
  1713. static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
  1714. {
  1715. struct xhci_input_control_ctx *ctrl_ctx;
  1716. struct xhci_ep_ctx *ep_ctx;
  1717. struct xhci_slot_ctx *slot_ctx;
  1718. int i;
  1719. ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
  1720. if (!ctrl_ctx) {
  1721. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  1722. __func__);
  1723. return;
  1724. }
  1725. /* When a device's add flag and drop flag are zero, any subsequent
  1726. * configure endpoint command will leave that endpoint's state
  1727. * untouched. Make sure we don't leave any old state in the input
  1728. * endpoint contexts.
  1729. */
  1730. ctrl_ctx->drop_flags = 0;
  1731. ctrl_ctx->add_flags = 0;
  1732. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
  1733. slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
  1734. /* Endpoint 0 is always valid */
  1735. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
  1736. for (i = 1; i < 31; i++) {
  1737. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
  1738. ep_ctx->ep_info = 0;
  1739. ep_ctx->ep_info2 = 0;
  1740. ep_ctx->deq = 0;
  1741. ep_ctx->tx_info = 0;
  1742. }
  1743. }
  1744. static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
  1745. struct usb_device *udev, u32 *cmd_status)
  1746. {
  1747. int ret;
  1748. switch (*cmd_status) {
  1749. case COMP_COMMAND_ABORTED:
  1750. case COMP_COMMAND_RING_STOPPED:
  1751. xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
  1752. ret = -ETIME;
  1753. break;
  1754. case COMP_RESOURCE_ERROR:
  1755. dev_warn(&udev->dev,
  1756. "Not enough host controller resources for new device state.\n");
  1757. ret = -ENOMEM;
  1758. /* FIXME: can we allocate more resources for the HC? */
  1759. break;
  1760. case COMP_BANDWIDTH_ERROR:
  1761. case COMP_SECONDARY_BANDWIDTH_ERROR:
  1762. dev_warn(&udev->dev,
  1763. "Not enough bandwidth for new device state.\n");
  1764. ret = -ENOSPC;
  1765. /* FIXME: can we go back to the old state? */
  1766. break;
  1767. case COMP_TRB_ERROR:
  1768. /* the HCD set up something wrong */
  1769. dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
  1770. "add flag = 1, "
  1771. "and endpoint is not disabled.\n");
  1772. ret = -EINVAL;
  1773. break;
  1774. case COMP_INCOMPATIBLE_DEVICE_ERROR:
  1775. dev_warn(&udev->dev,
  1776. "ERROR: Incompatible device for endpoint configure command.\n");
  1777. ret = -ENODEV;
  1778. break;
  1779. case COMP_SUCCESS:
  1780. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  1781. "Successful Endpoint Configure command");
  1782. ret = 0;
  1783. break;
  1784. default:
  1785. xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
  1786. *cmd_status);
  1787. ret = -EINVAL;
  1788. break;
  1789. }
  1790. return ret;
  1791. }
  1792. static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
  1793. struct usb_device *udev, u32 *cmd_status)
  1794. {
  1795. int ret;
  1796. switch (*cmd_status) {
  1797. case COMP_COMMAND_ABORTED:
  1798. case COMP_COMMAND_RING_STOPPED:
  1799. xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
  1800. ret = -ETIME;
  1801. break;
  1802. case COMP_PARAMETER_ERROR:
  1803. dev_warn(&udev->dev,
  1804. "WARN: xHCI driver setup invalid evaluate context command.\n");
  1805. ret = -EINVAL;
  1806. break;
  1807. case COMP_SLOT_NOT_ENABLED_ERROR:
  1808. dev_warn(&udev->dev,
  1809. "WARN: slot not enabled for evaluate context command.\n");
  1810. ret = -EINVAL;
  1811. break;
  1812. case COMP_CONTEXT_STATE_ERROR:
  1813. dev_warn(&udev->dev,
  1814. "WARN: invalid context state for evaluate context command.\n");
  1815. ret = -EINVAL;
  1816. break;
  1817. case COMP_INCOMPATIBLE_DEVICE_ERROR:
  1818. dev_warn(&udev->dev,
  1819. "ERROR: Incompatible device for evaluate context command.\n");
  1820. ret = -ENODEV;
  1821. break;
  1822. case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
  1823. /* Max Exit Latency too large error */
  1824. dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
  1825. ret = -EINVAL;
  1826. break;
  1827. case COMP_SUCCESS:
  1828. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  1829. "Successful evaluate context command");
  1830. ret = 0;
  1831. break;
  1832. default:
  1833. xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
  1834. *cmd_status);
  1835. ret = -EINVAL;
  1836. break;
  1837. }
  1838. return ret;
  1839. }
  1840. static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
  1841. struct xhci_input_control_ctx *ctrl_ctx)
  1842. {
  1843. u32 valid_add_flags;
  1844. u32 valid_drop_flags;
  1845. /* Ignore the slot flag (bit 0), and the default control endpoint flag
  1846. * (bit 1). The default control endpoint is added during the Address
  1847. * Device command and is never removed until the slot is disabled.
  1848. */
  1849. valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
  1850. valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
  1851. /* Use hweight32 to count the number of ones in the add flags, or
  1852. * number of endpoints added. Don't count endpoints that are changed
  1853. * (both added and dropped).
  1854. */
  1855. return hweight32(valid_add_flags) -
  1856. hweight32(valid_add_flags & valid_drop_flags);
  1857. }
  1858. static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
  1859. struct xhci_input_control_ctx *ctrl_ctx)
  1860. {
  1861. u32 valid_add_flags;
  1862. u32 valid_drop_flags;
  1863. valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
  1864. valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
  1865. return hweight32(valid_drop_flags) -
  1866. hweight32(valid_add_flags & valid_drop_flags);
  1867. }
  1868. /*
  1869. * We need to reserve the new number of endpoints before the configure endpoint
  1870. * command completes. We can't subtract the dropped endpoints from the number
  1871. * of active endpoints until the command completes because we can oversubscribe
  1872. * the host in this case:
  1873. *
  1874. * - the first configure endpoint command drops more endpoints than it adds
  1875. * - a second configure endpoint command that adds more endpoints is queued
  1876. * - the first configure endpoint command fails, so the config is unchanged
  1877. * - the second command may succeed, even though there isn't enough resources
  1878. *
  1879. * Must be called with xhci->lock held.
  1880. */
  1881. static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
  1882. struct xhci_input_control_ctx *ctrl_ctx)
  1883. {
  1884. u32 added_eps;
  1885. added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
  1886. if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
  1887. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1888. "Not enough ep ctxs: "
  1889. "%u active, need to add %u, limit is %u.",
  1890. xhci->num_active_eps, added_eps,
  1891. xhci->limit_active_eps);
  1892. return -ENOMEM;
  1893. }
  1894. xhci->num_active_eps += added_eps;
  1895. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1896. "Adding %u ep ctxs, %u now active.", added_eps,
  1897. xhci->num_active_eps);
  1898. return 0;
  1899. }
  1900. /*
  1901. * The configure endpoint was failed by the xHC for some other reason, so we
  1902. * need to revert the resources that failed configuration would have used.
  1903. *
  1904. * Must be called with xhci->lock held.
  1905. */
  1906. static void xhci_free_host_resources(struct xhci_hcd *xhci,
  1907. struct xhci_input_control_ctx *ctrl_ctx)
  1908. {
  1909. u32 num_failed_eps;
  1910. num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
  1911. xhci->num_active_eps -= num_failed_eps;
  1912. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1913. "Removing %u failed ep ctxs, %u now active.",
  1914. num_failed_eps,
  1915. xhci->num_active_eps);
  1916. }
  1917. /*
  1918. * Now that the command has completed, clean up the active endpoint count by
  1919. * subtracting out the endpoints that were dropped (but not changed).
  1920. *
  1921. * Must be called with xhci->lock held.
  1922. */
  1923. static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
  1924. struct xhci_input_control_ctx *ctrl_ctx)
  1925. {
  1926. u32 num_dropped_eps;
  1927. num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
  1928. xhci->num_active_eps -= num_dropped_eps;
  1929. if (num_dropped_eps)
  1930. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1931. "Removing %u dropped ep ctxs, %u now active.",
  1932. num_dropped_eps,
  1933. xhci->num_active_eps);
  1934. }
  1935. static unsigned int xhci_get_block_size(struct usb_device *udev)
  1936. {
  1937. switch (udev->speed) {
  1938. case USB_SPEED_LOW:
  1939. case USB_SPEED_FULL:
  1940. return FS_BLOCK;
  1941. case USB_SPEED_HIGH:
  1942. return HS_BLOCK;
  1943. case USB_SPEED_SUPER:
  1944. case USB_SPEED_SUPER_PLUS:
  1945. return SS_BLOCK;
  1946. case USB_SPEED_UNKNOWN:
  1947. default:
  1948. /* Should never happen */
  1949. return 1;
  1950. }
  1951. }
  1952. static unsigned int
  1953. xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
  1954. {
  1955. if (interval_bw->overhead[LS_OVERHEAD_TYPE])
  1956. return LS_OVERHEAD;
  1957. if (interval_bw->overhead[FS_OVERHEAD_TYPE])
  1958. return FS_OVERHEAD;
  1959. return HS_OVERHEAD;
  1960. }
  1961. /* If we are changing a LS/FS device under a HS hub,
  1962. * make sure (if we are activating a new TT) that the HS bus has enough
  1963. * bandwidth for this new TT.
  1964. */
  1965. static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
  1966. struct xhci_virt_device *virt_dev,
  1967. int old_active_eps)
  1968. {
  1969. struct xhci_interval_bw_table *bw_table;
  1970. struct xhci_tt_bw_info *tt_info;
  1971. /* Find the bandwidth table for the root port this TT is attached to. */
  1972. bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
  1973. tt_info = virt_dev->tt_info;
  1974. /* If this TT already had active endpoints, the bandwidth for this TT
  1975. * has already been added. Removing all periodic endpoints (and thus
  1976. * making the TT enactive) will only decrease the bandwidth used.
  1977. */
  1978. if (old_active_eps)
  1979. return 0;
  1980. if (old_active_eps == 0 && tt_info->active_eps != 0) {
  1981. if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
  1982. return -ENOMEM;
  1983. return 0;
  1984. }
  1985. /* Not sure why we would have no new active endpoints...
  1986. *
  1987. * Maybe because of an Evaluate Context change for a hub update or a
  1988. * control endpoint 0 max packet size change?
  1989. * FIXME: skip the bandwidth calculation in that case.
  1990. */
  1991. return 0;
  1992. }
  1993. static int xhci_check_ss_bw(struct xhci_hcd *xhci,
  1994. struct xhci_virt_device *virt_dev)
  1995. {
  1996. unsigned int bw_reserved;
  1997. bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
  1998. if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
  1999. return -ENOMEM;
  2000. bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
  2001. if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
  2002. return -ENOMEM;
  2003. return 0;
  2004. }
  2005. /*
  2006. * This algorithm is a very conservative estimate of the worst-case scheduling
  2007. * scenario for any one interval. The hardware dynamically schedules the
  2008. * packets, so we can't tell which microframe could be the limiting factor in
  2009. * the bandwidth scheduling. This only takes into account periodic endpoints.
  2010. *
  2011. * Obviously, we can't solve an NP complete problem to find the minimum worst
  2012. * case scenario. Instead, we come up with an estimate that is no less than
  2013. * the worst case bandwidth used for any one microframe, but may be an
  2014. * over-estimate.
  2015. *
  2016. * We walk the requirements for each endpoint by interval, starting with the
  2017. * smallest interval, and place packets in the schedule where there is only one
  2018. * possible way to schedule packets for that interval. In order to simplify
  2019. * this algorithm, we record the largest max packet size for each interval, and
  2020. * assume all packets will be that size.
  2021. *
  2022. * For interval 0, we obviously must schedule all packets for each interval.
  2023. * The bandwidth for interval 0 is just the amount of data to be transmitted
  2024. * (the sum of all max ESIT payload sizes, plus any overhead per packet times
  2025. * the number of packets).
  2026. *
  2027. * For interval 1, we have two possible microframes to schedule those packets
  2028. * in. For this algorithm, if we can schedule the same number of packets for
  2029. * each possible scheduling opportunity (each microframe), we will do so. The
  2030. * remaining number of packets will be saved to be transmitted in the gaps in
  2031. * the next interval's scheduling sequence.
  2032. *
  2033. * As we move those remaining packets to be scheduled with interval 2 packets,
  2034. * we have to double the number of remaining packets to transmit. This is
  2035. * because the intervals are actually powers of 2, and we would be transmitting
  2036. * the previous interval's packets twice in this interval. We also have to be
  2037. * sure that when we look at the largest max packet size for this interval, we
  2038. * also look at the largest max packet size for the remaining packets and take
  2039. * the greater of the two.
  2040. *
  2041. * The algorithm continues to evenly distribute packets in each scheduling
  2042. * opportunity, and push the remaining packets out, until we get to the last
  2043. * interval. Then those packets and their associated overhead are just added
  2044. * to the bandwidth used.
  2045. */
  2046. static int xhci_check_bw_table(struct xhci_hcd *xhci,
  2047. struct xhci_virt_device *virt_dev,
  2048. int old_active_eps)
  2049. {
  2050. unsigned int bw_reserved;
  2051. unsigned int max_bandwidth;
  2052. unsigned int bw_used;
  2053. unsigned int block_size;
  2054. struct xhci_interval_bw_table *bw_table;
  2055. unsigned int packet_size = 0;
  2056. unsigned int overhead = 0;
  2057. unsigned int packets_transmitted = 0;
  2058. unsigned int packets_remaining = 0;
  2059. unsigned int i;
  2060. if (virt_dev->udev->speed >= USB_SPEED_SUPER)
  2061. return xhci_check_ss_bw(xhci, virt_dev);
  2062. if (virt_dev->udev->speed == USB_SPEED_HIGH) {
  2063. max_bandwidth = HS_BW_LIMIT;
  2064. /* Convert percent of bus BW reserved to blocks reserved */
  2065. bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
  2066. } else {
  2067. max_bandwidth = FS_BW_LIMIT;
  2068. bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
  2069. }
  2070. bw_table = virt_dev->bw_table;
  2071. /* We need to translate the max packet size and max ESIT payloads into
  2072. * the units the hardware uses.
  2073. */
  2074. block_size = xhci_get_block_size(virt_dev->udev);
  2075. /* If we are manipulating a LS/FS device under a HS hub, double check
  2076. * that the HS bus has enough bandwidth if we are activing a new TT.
  2077. */
  2078. if (virt_dev->tt_info) {
  2079. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  2080. "Recalculating BW for rootport %u",
  2081. virt_dev->rhub_port->hw_portnum + 1);
  2082. if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
  2083. xhci_warn(xhci, "Not enough bandwidth on HS bus for "
  2084. "newly activated TT.\n");
  2085. return -ENOMEM;
  2086. }
  2087. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  2088. "Recalculating BW for TT slot %u port %u",
  2089. virt_dev->tt_info->slot_id,
  2090. virt_dev->tt_info->ttport);
  2091. } else {
  2092. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  2093. "Recalculating BW for rootport %u",
  2094. virt_dev->rhub_port->hw_portnum + 1);
  2095. }
  2096. /* Add in how much bandwidth will be used for interval zero, or the
  2097. * rounded max ESIT payload + number of packets * largest overhead.
  2098. */
  2099. bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
  2100. bw_table->interval_bw[0].num_packets *
  2101. xhci_get_largest_overhead(&bw_table->interval_bw[0]);
  2102. for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
  2103. unsigned int bw_added;
  2104. unsigned int largest_mps;
  2105. unsigned int interval_overhead;
  2106. /*
  2107. * How many packets could we transmit in this interval?
  2108. * If packets didn't fit in the previous interval, we will need
  2109. * to transmit that many packets twice within this interval.
  2110. */
  2111. packets_remaining = 2 * packets_remaining +
  2112. bw_table->interval_bw[i].num_packets;
  2113. /* Find the largest max packet size of this or the previous
  2114. * interval.
  2115. */
  2116. if (list_empty(&bw_table->interval_bw[i].endpoints))
  2117. largest_mps = 0;
  2118. else {
  2119. struct xhci_virt_ep *virt_ep;
  2120. struct list_head *ep_entry;
  2121. ep_entry = bw_table->interval_bw[i].endpoints.next;
  2122. virt_ep = list_entry(ep_entry,
  2123. struct xhci_virt_ep, bw_endpoint_list);
  2124. /* Convert to blocks, rounding up */
  2125. largest_mps = DIV_ROUND_UP(
  2126. virt_ep->bw_info.max_packet_size,
  2127. block_size);
  2128. }
  2129. if (largest_mps > packet_size)
  2130. packet_size = largest_mps;
  2131. /* Use the larger overhead of this or the previous interval. */
  2132. interval_overhead = xhci_get_largest_overhead(
  2133. &bw_table->interval_bw[i]);
  2134. if (interval_overhead > overhead)
  2135. overhead = interval_overhead;
  2136. /* How many packets can we evenly distribute across
  2137. * (1 << (i + 1)) possible scheduling opportunities?
  2138. */
  2139. packets_transmitted = packets_remaining >> (i + 1);
  2140. /* Add in the bandwidth used for those scheduled packets */
  2141. bw_added = packets_transmitted * (overhead + packet_size);
  2142. /* How many packets do we have remaining to transmit? */
  2143. packets_remaining = packets_remaining % (1 << (i + 1));
  2144. /* What largest max packet size should those packets have? */
  2145. /* If we've transmitted all packets, don't carry over the
  2146. * largest packet size.
  2147. */
  2148. if (packets_remaining == 0) {
  2149. packet_size = 0;
  2150. overhead = 0;
  2151. } else if (packets_transmitted > 0) {
  2152. /* Otherwise if we do have remaining packets, and we've
  2153. * scheduled some packets in this interval, take the
  2154. * largest max packet size from endpoints with this
  2155. * interval.
  2156. */
  2157. packet_size = largest_mps;
  2158. overhead = interval_overhead;
  2159. }
  2160. /* Otherwise carry over packet_size and overhead from the last
  2161. * time we had a remainder.
  2162. */
  2163. bw_used += bw_added;
  2164. if (bw_used > max_bandwidth) {
  2165. xhci_warn(xhci, "Not enough bandwidth. "
  2166. "Proposed: %u, Max: %u\n",
  2167. bw_used, max_bandwidth);
  2168. return -ENOMEM;
  2169. }
  2170. }
  2171. /*
  2172. * Ok, we know we have some packets left over after even-handedly
  2173. * scheduling interval 15. We don't know which microframes they will
  2174. * fit into, so we over-schedule and say they will be scheduled every
  2175. * microframe.
  2176. */
  2177. if (packets_remaining > 0)
  2178. bw_used += overhead + packet_size;
  2179. if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
  2180. /* OK, we're manipulating a HS device attached to a
  2181. * root port bandwidth domain. Include the number of active TTs
  2182. * in the bandwidth used.
  2183. */
  2184. bw_used += TT_HS_OVERHEAD *
  2185. xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
  2186. }
  2187. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  2188. "Final bandwidth: %u, Limit: %u, Reserved: %u, "
  2189. "Available: %u " "percent",
  2190. bw_used, max_bandwidth, bw_reserved,
  2191. (max_bandwidth - bw_used - bw_reserved) * 100 /
  2192. max_bandwidth);
  2193. bw_used += bw_reserved;
  2194. if (bw_used > max_bandwidth) {
  2195. xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
  2196. bw_used, max_bandwidth);
  2197. return -ENOMEM;
  2198. }
  2199. bw_table->bw_used = bw_used;
  2200. return 0;
  2201. }
  2202. static bool xhci_is_async_ep(unsigned int ep_type)
  2203. {
  2204. return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
  2205. ep_type != ISOC_IN_EP &&
  2206. ep_type != INT_IN_EP);
  2207. }
  2208. static bool xhci_is_sync_in_ep(unsigned int ep_type)
  2209. {
  2210. return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
  2211. }
  2212. static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
  2213. {
  2214. unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
  2215. if (ep_bw->ep_interval == 0)
  2216. return SS_OVERHEAD_BURST +
  2217. (ep_bw->mult * ep_bw->num_packets *
  2218. (SS_OVERHEAD + mps));
  2219. return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
  2220. (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
  2221. 1 << ep_bw->ep_interval);
  2222. }
  2223. static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
  2224. struct xhci_bw_info *ep_bw,
  2225. struct xhci_interval_bw_table *bw_table,
  2226. struct usb_device *udev,
  2227. struct xhci_virt_ep *virt_ep,
  2228. struct xhci_tt_bw_info *tt_info)
  2229. {
  2230. struct xhci_interval_bw *interval_bw;
  2231. int normalized_interval;
  2232. if (xhci_is_async_ep(ep_bw->type))
  2233. return;
  2234. if (udev->speed >= USB_SPEED_SUPER) {
  2235. if (xhci_is_sync_in_ep(ep_bw->type))
  2236. xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
  2237. xhci_get_ss_bw_consumed(ep_bw);
  2238. else
  2239. xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
  2240. xhci_get_ss_bw_consumed(ep_bw);
  2241. return;
  2242. }
  2243. /* SuperSpeed endpoints never get added to intervals in the table, so
  2244. * this check is only valid for HS/FS/LS devices.
  2245. */
  2246. if (list_empty(&virt_ep->bw_endpoint_list))
  2247. return;
  2248. /* For LS/FS devices, we need to translate the interval expressed in
  2249. * microframes to frames.
  2250. */
  2251. if (udev->speed == USB_SPEED_HIGH)
  2252. normalized_interval = ep_bw->ep_interval;
  2253. else
  2254. normalized_interval = ep_bw->ep_interval - 3;
  2255. if (normalized_interval == 0)
  2256. bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
  2257. interval_bw = &bw_table->interval_bw[normalized_interval];
  2258. interval_bw->num_packets -= ep_bw->num_packets;
  2259. switch (udev->speed) {
  2260. case USB_SPEED_LOW:
  2261. interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
  2262. break;
  2263. case USB_SPEED_FULL:
  2264. interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
  2265. break;
  2266. case USB_SPEED_HIGH:
  2267. interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
  2268. break;
  2269. default:
  2270. /* Should never happen because only LS/FS/HS endpoints will get
  2271. * added to the endpoint list.
  2272. */
  2273. return;
  2274. }
  2275. if (tt_info)
  2276. tt_info->active_eps -= 1;
  2277. list_del_init(&virt_ep->bw_endpoint_list);
  2278. }
  2279. static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
  2280. struct xhci_bw_info *ep_bw,
  2281. struct xhci_interval_bw_table *bw_table,
  2282. struct usb_device *udev,
  2283. struct xhci_virt_ep *virt_ep,
  2284. struct xhci_tt_bw_info *tt_info)
  2285. {
  2286. struct xhci_interval_bw *interval_bw;
  2287. struct xhci_virt_ep *smaller_ep;
  2288. int normalized_interval;
  2289. if (xhci_is_async_ep(ep_bw->type))
  2290. return;
  2291. if (udev->speed == USB_SPEED_SUPER) {
  2292. if (xhci_is_sync_in_ep(ep_bw->type))
  2293. xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
  2294. xhci_get_ss_bw_consumed(ep_bw);
  2295. else
  2296. xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
  2297. xhci_get_ss_bw_consumed(ep_bw);
  2298. return;
  2299. }
  2300. /* For LS/FS devices, we need to translate the interval expressed in
  2301. * microframes to frames.
  2302. */
  2303. if (udev->speed == USB_SPEED_HIGH)
  2304. normalized_interval = ep_bw->ep_interval;
  2305. else
  2306. normalized_interval = ep_bw->ep_interval - 3;
  2307. if (normalized_interval == 0)
  2308. bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
  2309. interval_bw = &bw_table->interval_bw[normalized_interval];
  2310. interval_bw->num_packets += ep_bw->num_packets;
  2311. switch (udev->speed) {
  2312. case USB_SPEED_LOW:
  2313. interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
  2314. break;
  2315. case USB_SPEED_FULL:
  2316. interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
  2317. break;
  2318. case USB_SPEED_HIGH:
  2319. interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
  2320. break;
  2321. default:
  2322. /* Should never happen because only LS/FS/HS endpoints will get
  2323. * added to the endpoint list.
  2324. */
  2325. return;
  2326. }
  2327. if (tt_info)
  2328. tt_info->active_eps += 1;
  2329. /* Insert the endpoint into the list, largest max packet size first. */
  2330. list_for_each_entry(smaller_ep, &interval_bw->endpoints,
  2331. bw_endpoint_list) {
  2332. if (ep_bw->max_packet_size >=
  2333. smaller_ep->bw_info.max_packet_size) {
  2334. /* Add the new ep before the smaller endpoint */
  2335. list_add_tail(&virt_ep->bw_endpoint_list,
  2336. &smaller_ep->bw_endpoint_list);
  2337. return;
  2338. }
  2339. }
  2340. /* Add the new endpoint at the end of the list. */
  2341. list_add_tail(&virt_ep->bw_endpoint_list,
  2342. &interval_bw->endpoints);
  2343. }
  2344. void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
  2345. struct xhci_virt_device *virt_dev,
  2346. int old_active_eps)
  2347. {
  2348. struct xhci_root_port_bw_info *rh_bw_info;
  2349. if (!virt_dev->tt_info)
  2350. return;
  2351. rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
  2352. if (old_active_eps == 0 &&
  2353. virt_dev->tt_info->active_eps != 0) {
  2354. rh_bw_info->num_active_tts += 1;
  2355. rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
  2356. } else if (old_active_eps != 0 &&
  2357. virt_dev->tt_info->active_eps == 0) {
  2358. rh_bw_info->num_active_tts -= 1;
  2359. rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
  2360. }
  2361. }
  2362. static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
  2363. struct xhci_virt_device *virt_dev,
  2364. struct xhci_container_ctx *in_ctx)
  2365. {
  2366. struct xhci_bw_info ep_bw_info[31];
  2367. int i;
  2368. struct xhci_input_control_ctx *ctrl_ctx;
  2369. int old_active_eps = 0;
  2370. if (virt_dev->tt_info)
  2371. old_active_eps = virt_dev->tt_info->active_eps;
  2372. ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
  2373. if (!ctrl_ctx) {
  2374. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  2375. __func__);
  2376. return -ENOMEM;
  2377. }
  2378. for (i = 0; i < 31; i++) {
  2379. if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
  2380. continue;
  2381. /* Make a copy of the BW info in case we need to revert this */
  2382. memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
  2383. sizeof(ep_bw_info[i]));
  2384. /* Drop the endpoint from the interval table if the endpoint is
  2385. * being dropped or changed.
  2386. */
  2387. if (EP_IS_DROPPED(ctrl_ctx, i))
  2388. xhci_drop_ep_from_interval_table(xhci,
  2389. &virt_dev->eps[i].bw_info,
  2390. virt_dev->bw_table,
  2391. virt_dev->udev,
  2392. &virt_dev->eps[i],
  2393. virt_dev->tt_info);
  2394. }
  2395. /* Overwrite the information stored in the endpoints' bw_info */
  2396. xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
  2397. for (i = 0; i < 31; i++) {
  2398. /* Add any changed or added endpoints to the interval table */
  2399. if (EP_IS_ADDED(ctrl_ctx, i))
  2400. xhci_add_ep_to_interval_table(xhci,
  2401. &virt_dev->eps[i].bw_info,
  2402. virt_dev->bw_table,
  2403. virt_dev->udev,
  2404. &virt_dev->eps[i],
  2405. virt_dev->tt_info);
  2406. }
  2407. if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
  2408. /* Ok, this fits in the bandwidth we have.
  2409. * Update the number of active TTs.
  2410. */
  2411. xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
  2412. return 0;
  2413. }
  2414. /* We don't have enough bandwidth for this, revert the stored info. */
  2415. for (i = 0; i < 31; i++) {
  2416. if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
  2417. continue;
  2418. /* Drop the new copies of any added or changed endpoints from
  2419. * the interval table.
  2420. */
  2421. if (EP_IS_ADDED(ctrl_ctx, i)) {
  2422. xhci_drop_ep_from_interval_table(xhci,
  2423. &virt_dev->eps[i].bw_info,
  2424. virt_dev->bw_table,
  2425. virt_dev->udev,
  2426. &virt_dev->eps[i],
  2427. virt_dev->tt_info);
  2428. }
  2429. /* Revert the endpoint back to its old information */
  2430. memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
  2431. sizeof(ep_bw_info[i]));
  2432. /* Add any changed or dropped endpoints back into the table */
  2433. if (EP_IS_DROPPED(ctrl_ctx, i))
  2434. xhci_add_ep_to_interval_table(xhci,
  2435. &virt_dev->eps[i].bw_info,
  2436. virt_dev->bw_table,
  2437. virt_dev->udev,
  2438. &virt_dev->eps[i],
  2439. virt_dev->tt_info);
  2440. }
  2441. return -ENOMEM;
  2442. }
  2443. /* Issue a configure endpoint command or evaluate context command
  2444. * and wait for it to finish.
  2445. */
  2446. static int xhci_configure_endpoint(struct xhci_hcd *xhci,
  2447. struct usb_device *udev,
  2448. struct xhci_command *command,
  2449. bool ctx_change, bool must_succeed)
  2450. {
  2451. int ret;
  2452. unsigned long flags;
  2453. struct xhci_input_control_ctx *ctrl_ctx;
  2454. struct xhci_virt_device *virt_dev;
  2455. struct xhci_slot_ctx *slot_ctx;
  2456. if (!command)
  2457. return -EINVAL;
  2458. spin_lock_irqsave(&xhci->lock, flags);
  2459. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2460. spin_unlock_irqrestore(&xhci->lock, flags);
  2461. return -ESHUTDOWN;
  2462. }
  2463. virt_dev = xhci->devs[udev->slot_id];
  2464. ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
  2465. if (!ctrl_ctx) {
  2466. spin_unlock_irqrestore(&xhci->lock, flags);
  2467. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  2468. __func__);
  2469. return -ENOMEM;
  2470. }
  2471. if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
  2472. xhci_reserve_host_resources(xhci, ctrl_ctx)) {
  2473. spin_unlock_irqrestore(&xhci->lock, flags);
  2474. xhci_warn(xhci, "Not enough host resources, "
  2475. "active endpoint contexts = %u\n",
  2476. xhci->num_active_eps);
  2477. return -ENOMEM;
  2478. }
  2479. if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
  2480. xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
  2481. if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
  2482. xhci_free_host_resources(xhci, ctrl_ctx);
  2483. spin_unlock_irqrestore(&xhci->lock, flags);
  2484. xhci_warn(xhci, "Not enough bandwidth\n");
  2485. return -ENOMEM;
  2486. }
  2487. slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
  2488. trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
  2489. trace_xhci_configure_endpoint(slot_ctx);
  2490. if (!ctx_change)
  2491. ret = xhci_queue_configure_endpoint(xhci, command,
  2492. command->in_ctx->dma,
  2493. udev->slot_id, must_succeed);
  2494. else
  2495. ret = xhci_queue_evaluate_context(xhci, command,
  2496. command->in_ctx->dma,
  2497. udev->slot_id, must_succeed);
  2498. if (ret < 0) {
  2499. if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
  2500. xhci_free_host_resources(xhci, ctrl_ctx);
  2501. spin_unlock_irqrestore(&xhci->lock, flags);
  2502. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  2503. "FIXME allocate a new ring segment");
  2504. return -ENOMEM;
  2505. }
  2506. xhci_ring_cmd_db(xhci);
  2507. spin_unlock_irqrestore(&xhci->lock, flags);
  2508. /* Wait for the configure endpoint command to complete */
  2509. wait_for_completion(command->completion);
  2510. if (!ctx_change)
  2511. ret = xhci_configure_endpoint_result(xhci, udev,
  2512. &command->status);
  2513. else
  2514. ret = xhci_evaluate_context_result(xhci, udev,
  2515. &command->status);
  2516. if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
  2517. spin_lock_irqsave(&xhci->lock, flags);
  2518. /* If the command failed, remove the reserved resources.
  2519. * Otherwise, clean up the estimate to include dropped eps.
  2520. */
  2521. if (ret)
  2522. xhci_free_host_resources(xhci, ctrl_ctx);
  2523. else
  2524. xhci_finish_resource_reservation(xhci, ctrl_ctx);
  2525. spin_unlock_irqrestore(&xhci->lock, flags);
  2526. }
  2527. return ret;
  2528. }
  2529. static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
  2530. struct xhci_virt_device *vdev, int i)
  2531. {
  2532. struct xhci_virt_ep *ep = &vdev->eps[i];
  2533. if (ep->ep_state & EP_HAS_STREAMS) {
  2534. xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
  2535. xhci_get_endpoint_address(i));
  2536. xhci_free_stream_info(xhci, ep->stream_info);
  2537. ep->stream_info = NULL;
  2538. ep->ep_state &= ~EP_HAS_STREAMS;
  2539. }
  2540. }
  2541. /* Called after one or more calls to xhci_add_endpoint() or
  2542. * xhci_drop_endpoint(). If this call fails, the USB core is expected
  2543. * to call xhci_reset_bandwidth().
  2544. *
  2545. * Since we are in the middle of changing either configuration or
  2546. * installing a new alt setting, the USB core won't allow URBs to be
  2547. * enqueued for any endpoint on the old config or interface. Nothing
  2548. * else should be touching the xhci->devs[slot_id] structure, so we
  2549. * don't need to take the xhci->lock for manipulating that.
  2550. */
  2551. int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
  2552. {
  2553. int i;
  2554. int ret = 0;
  2555. struct xhci_hcd *xhci;
  2556. struct xhci_virt_device *virt_dev;
  2557. struct xhci_input_control_ctx *ctrl_ctx;
  2558. struct xhci_slot_ctx *slot_ctx;
  2559. struct xhci_command *command;
  2560. ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
  2561. if (ret <= 0)
  2562. return ret;
  2563. xhci = hcd_to_xhci(hcd);
  2564. if ((xhci->xhc_state & XHCI_STATE_DYING) ||
  2565. (xhci->xhc_state & XHCI_STATE_REMOVING))
  2566. return -ENODEV;
  2567. xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
  2568. virt_dev = xhci->devs[udev->slot_id];
  2569. command = xhci_alloc_command(xhci, true, GFP_KERNEL);
  2570. if (!command)
  2571. return -ENOMEM;
  2572. command->in_ctx = virt_dev->in_ctx;
  2573. /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
  2574. ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
  2575. if (!ctrl_ctx) {
  2576. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  2577. __func__);
  2578. ret = -ENOMEM;
  2579. goto command_cleanup;
  2580. }
  2581. ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
  2582. ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
  2583. ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
  2584. /* Don't issue the command if there's no endpoints to update. */
  2585. if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
  2586. ctrl_ctx->drop_flags == 0) {
  2587. ret = 0;
  2588. goto command_cleanup;
  2589. }
  2590. /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
  2591. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
  2592. for (i = 31; i >= 1; i--) {
  2593. __le32 le32 = cpu_to_le32(BIT(i));
  2594. if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
  2595. || (ctrl_ctx->add_flags & le32) || i == 1) {
  2596. slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
  2597. slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
  2598. break;
  2599. }
  2600. }
  2601. ret = xhci_configure_endpoint(xhci, udev, command,
  2602. false, false);
  2603. if (ret)
  2604. /* Callee should call reset_bandwidth() */
  2605. goto command_cleanup;
  2606. /* Free any rings that were dropped, but not changed. */
  2607. for (i = 1; i < 31; i++) {
  2608. if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
  2609. !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
  2610. xhci_free_endpoint_ring(xhci, virt_dev, i);
  2611. xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
  2612. }
  2613. }
  2614. xhci_zero_in_ctx(xhci, virt_dev);
  2615. /*
  2616. * Install any rings for completely new endpoints or changed endpoints,
  2617. * and free any old rings from changed endpoints.
  2618. */
  2619. for (i = 1; i < 31; i++) {
  2620. if (!virt_dev->eps[i].new_ring)
  2621. continue;
  2622. /* Only free the old ring if it exists.
  2623. * It may not if this is the first add of an endpoint.
  2624. */
  2625. if (virt_dev->eps[i].ring) {
  2626. xhci_free_endpoint_ring(xhci, virt_dev, i);
  2627. }
  2628. xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
  2629. virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
  2630. virt_dev->eps[i].new_ring = NULL;
  2631. xhci_debugfs_create_endpoint(xhci, virt_dev, i);
  2632. }
  2633. command_cleanup:
  2634. kfree(command->completion);
  2635. kfree(command);
  2636. return ret;
  2637. }
  2638. EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
  2639. void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
  2640. {
  2641. struct xhci_hcd *xhci;
  2642. struct xhci_virt_device *virt_dev;
  2643. int i, ret;
  2644. ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
  2645. if (ret <= 0)
  2646. return;
  2647. xhci = hcd_to_xhci(hcd);
  2648. xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
  2649. virt_dev = xhci->devs[udev->slot_id];
  2650. /* Free any rings allocated for added endpoints */
  2651. for (i = 0; i < 31; i++) {
  2652. if (virt_dev->eps[i].new_ring) {
  2653. xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
  2654. xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
  2655. virt_dev->eps[i].new_ring = NULL;
  2656. }
  2657. }
  2658. xhci_zero_in_ctx(xhci, virt_dev);
  2659. }
  2660. EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
  2661. static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
  2662. struct xhci_container_ctx *in_ctx,
  2663. struct xhci_container_ctx *out_ctx,
  2664. struct xhci_input_control_ctx *ctrl_ctx,
  2665. u32 add_flags, u32 drop_flags)
  2666. {
  2667. ctrl_ctx->add_flags = cpu_to_le32(add_flags);
  2668. ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
  2669. xhci_slot_copy(xhci, in_ctx, out_ctx);
  2670. ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
  2671. }
  2672. static void xhci_endpoint_disable(struct usb_hcd *hcd,
  2673. struct usb_host_endpoint *host_ep)
  2674. {
  2675. struct xhci_hcd *xhci;
  2676. struct xhci_virt_device *vdev;
  2677. struct xhci_virt_ep *ep;
  2678. struct usb_device *udev;
  2679. unsigned long flags;
  2680. unsigned int ep_index;
  2681. xhci = hcd_to_xhci(hcd);
  2682. rescan:
  2683. spin_lock_irqsave(&xhci->lock, flags);
  2684. udev = (struct usb_device *)host_ep->hcpriv;
  2685. if (!udev || !udev->slot_id)
  2686. goto done;
  2687. vdev = xhci->devs[udev->slot_id];
  2688. if (!vdev)
  2689. goto done;
  2690. ep_index = xhci_get_endpoint_index(&host_ep->desc);
  2691. ep = &vdev->eps[ep_index];
  2692. /* wait for hub_tt_work to finish clearing hub TT */
  2693. if (ep->ep_state & EP_CLEARING_TT) {
  2694. spin_unlock_irqrestore(&xhci->lock, flags);
  2695. schedule_timeout_uninterruptible(1);
  2696. goto rescan;
  2697. }
  2698. if (ep->ep_state)
  2699. xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
  2700. ep->ep_state);
  2701. done:
  2702. host_ep->hcpriv = NULL;
  2703. spin_unlock_irqrestore(&xhci->lock, flags);
  2704. }
  2705. /*
  2706. * Called after usb core issues a clear halt control message.
  2707. * The host side of the halt should already be cleared by a reset endpoint
  2708. * command issued when the STALL event was received.
  2709. *
  2710. * The reset endpoint command may only be issued to endpoints in the halted
  2711. * state. For software that wishes to reset the data toggle or sequence number
  2712. * of an endpoint that isn't in the halted state this function will issue a
  2713. * configure endpoint command with the Drop and Add bits set for the target
  2714. * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
  2715. *
  2716. * vdev may be lost due to xHC restore error and re-initialization during S3/S4
  2717. * resume. A new vdev will be allocated later by xhci_discover_or_reset_device()
  2718. */
  2719. static void xhci_endpoint_reset(struct usb_hcd *hcd,
  2720. struct usb_host_endpoint *host_ep)
  2721. {
  2722. struct xhci_hcd *xhci;
  2723. struct usb_device *udev;
  2724. struct xhci_virt_device *vdev;
  2725. struct xhci_virt_ep *ep;
  2726. struct xhci_input_control_ctx *ctrl_ctx;
  2727. struct xhci_command *stop_cmd, *cfg_cmd;
  2728. unsigned int ep_index;
  2729. unsigned long flags;
  2730. u32 ep_flag;
  2731. int err;
  2732. xhci = hcd_to_xhci(hcd);
  2733. ep_index = xhci_get_endpoint_index(&host_ep->desc);
  2734. /*
  2735. * Usb core assumes a max packet value for ep0 on FS devices until the
  2736. * real value is read from the descriptor. Core resets Ep0 if values
  2737. * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
  2738. */
  2739. if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) {
  2740. udev = container_of(host_ep, struct usb_device, ep0);
  2741. if (udev->speed != USB_SPEED_FULL || !udev->slot_id)
  2742. return;
  2743. vdev = xhci->devs[udev->slot_id];
  2744. if (!vdev || vdev->udev != udev)
  2745. return;
  2746. xhci_check_ep0_maxpacket(xhci, vdev);
  2747. /* Nothing else should be done here for ep0 during ep reset */
  2748. return;
  2749. }
  2750. if (!host_ep->hcpriv)
  2751. return;
  2752. udev = (struct usb_device *) host_ep->hcpriv;
  2753. vdev = xhci->devs[udev->slot_id];
  2754. if (!udev->slot_id || !vdev)
  2755. return;
  2756. ep = &vdev->eps[ep_index];
  2757. /* Bail out if toggle is already being cleared by a endpoint reset */
  2758. spin_lock_irqsave(&xhci->lock, flags);
  2759. if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
  2760. ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
  2761. spin_unlock_irqrestore(&xhci->lock, flags);
  2762. return;
  2763. }
  2764. spin_unlock_irqrestore(&xhci->lock, flags);
  2765. /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
  2766. if (usb_endpoint_xfer_control(&host_ep->desc) ||
  2767. usb_endpoint_xfer_isoc(&host_ep->desc))
  2768. return;
  2769. ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
  2770. if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
  2771. return;
  2772. stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
  2773. if (!stop_cmd)
  2774. return;
  2775. cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
  2776. if (!cfg_cmd)
  2777. goto cleanup;
  2778. spin_lock_irqsave(&xhci->lock, flags);
  2779. /* block queuing new trbs and ringing ep doorbell */
  2780. ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
  2781. /*
  2782. * Make sure endpoint ring is empty before resetting the toggle/seq.
  2783. * Driver is required to synchronously cancel all transfer request.
  2784. * Stop the endpoint to force xHC to update the output context
  2785. */
  2786. if (!list_empty(&ep->ring->td_list)) {
  2787. dev_err(&udev->dev, "EP not empty, refuse reset\n");
  2788. spin_unlock_irqrestore(&xhci->lock, flags);
  2789. xhci_free_command(xhci, cfg_cmd);
  2790. goto cleanup;
  2791. }
  2792. err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
  2793. ep_index, 0);
  2794. if (err < 0) {
  2795. spin_unlock_irqrestore(&xhci->lock, flags);
  2796. xhci_free_command(xhci, cfg_cmd);
  2797. xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
  2798. __func__, err);
  2799. goto cleanup;
  2800. }
  2801. xhci_ring_cmd_db(xhci);
  2802. spin_unlock_irqrestore(&xhci->lock, flags);
  2803. wait_for_completion(stop_cmd->completion);
  2804. spin_lock_irqsave(&xhci->lock, flags);
  2805. /* config ep command clears toggle if add and drop ep flags are set */
  2806. ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
  2807. if (!ctrl_ctx) {
  2808. spin_unlock_irqrestore(&xhci->lock, flags);
  2809. xhci_free_command(xhci, cfg_cmd);
  2810. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  2811. __func__);
  2812. goto cleanup;
  2813. }
  2814. xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
  2815. ctrl_ctx, ep_flag, ep_flag);
  2816. xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
  2817. err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
  2818. udev->slot_id, false);
  2819. if (err < 0) {
  2820. spin_unlock_irqrestore(&xhci->lock, flags);
  2821. xhci_free_command(xhci, cfg_cmd);
  2822. xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
  2823. __func__, err);
  2824. goto cleanup;
  2825. }
  2826. xhci_ring_cmd_db(xhci);
  2827. spin_unlock_irqrestore(&xhci->lock, flags);
  2828. wait_for_completion(cfg_cmd->completion);
  2829. xhci_free_command(xhci, cfg_cmd);
  2830. cleanup:
  2831. xhci_free_command(xhci, stop_cmd);
  2832. spin_lock_irqsave(&xhci->lock, flags);
  2833. if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
  2834. ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
  2835. spin_unlock_irqrestore(&xhci->lock, flags);
  2836. }
  2837. static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
  2838. struct usb_device *udev, struct usb_host_endpoint *ep,
  2839. unsigned int slot_id)
  2840. {
  2841. int ret;
  2842. unsigned int ep_index;
  2843. unsigned int ep_state;
  2844. if (!ep)
  2845. return -EINVAL;
  2846. ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
  2847. if (ret <= 0)
  2848. return ret ? ret : -EINVAL;
  2849. if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
  2850. xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
  2851. " descriptor for ep 0x%x does not support streams\n",
  2852. ep->desc.bEndpointAddress);
  2853. return -EINVAL;
  2854. }
  2855. ep_index = xhci_get_endpoint_index(&ep->desc);
  2856. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  2857. if (ep_state & EP_HAS_STREAMS ||
  2858. ep_state & EP_GETTING_STREAMS) {
  2859. xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
  2860. "already has streams set up.\n",
  2861. ep->desc.bEndpointAddress);
  2862. xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
  2863. "dynamic stream context array reallocation.\n");
  2864. return -EINVAL;
  2865. }
  2866. if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
  2867. xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
  2868. "endpoint 0x%x; URBs are pending.\n",
  2869. ep->desc.bEndpointAddress);
  2870. return -EINVAL;
  2871. }
  2872. return 0;
  2873. }
  2874. static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
  2875. unsigned int *num_streams, unsigned int *num_stream_ctxs)
  2876. {
  2877. unsigned int max_streams;
  2878. /* The stream context array size must be a power of two */
  2879. *num_stream_ctxs = roundup_pow_of_two(*num_streams);
  2880. /*
  2881. * Find out how many primary stream array entries the host controller
  2882. * supports. Later we may use secondary stream arrays (similar to 2nd
  2883. * level page entries), but that's an optional feature for xHCI host
  2884. * controllers. xHCs must support at least 4 stream IDs.
  2885. */
  2886. max_streams = HCC_MAX_PSA(xhci->hcc_params);
  2887. if (*num_stream_ctxs > max_streams) {
  2888. xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
  2889. max_streams);
  2890. *num_stream_ctxs = max_streams;
  2891. *num_streams = max_streams;
  2892. }
  2893. }
  2894. /* Returns an error code if one of the endpoint already has streams.
  2895. * This does not change any data structures, it only checks and gathers
  2896. * information.
  2897. */
  2898. static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
  2899. struct usb_device *udev,
  2900. struct usb_host_endpoint **eps, unsigned int num_eps,
  2901. unsigned int *num_streams, u32 *changed_ep_bitmask)
  2902. {
  2903. unsigned int max_streams;
  2904. unsigned int endpoint_flag;
  2905. int i;
  2906. int ret;
  2907. for (i = 0; i < num_eps; i++) {
  2908. ret = xhci_check_streams_endpoint(xhci, udev,
  2909. eps[i], udev->slot_id);
  2910. if (ret < 0)
  2911. return ret;
  2912. max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
  2913. if (max_streams < (*num_streams - 1)) {
  2914. xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
  2915. eps[i]->desc.bEndpointAddress,
  2916. max_streams);
  2917. *num_streams = max_streams+1;
  2918. }
  2919. endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
  2920. if (*changed_ep_bitmask & endpoint_flag)
  2921. return -EINVAL;
  2922. *changed_ep_bitmask |= endpoint_flag;
  2923. }
  2924. return 0;
  2925. }
  2926. static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
  2927. struct usb_device *udev,
  2928. struct usb_host_endpoint **eps, unsigned int num_eps)
  2929. {
  2930. u32 changed_ep_bitmask = 0;
  2931. unsigned int slot_id;
  2932. unsigned int ep_index;
  2933. unsigned int ep_state;
  2934. int i;
  2935. slot_id = udev->slot_id;
  2936. if (!xhci->devs[slot_id])
  2937. return 0;
  2938. for (i = 0; i < num_eps; i++) {
  2939. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  2940. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  2941. /* Are streams already being freed for the endpoint? */
  2942. if (ep_state & EP_GETTING_NO_STREAMS) {
  2943. xhci_warn(xhci, "WARN Can't disable streams for "
  2944. "endpoint 0x%x, "
  2945. "streams are being disabled already\n",
  2946. eps[i]->desc.bEndpointAddress);
  2947. return 0;
  2948. }
  2949. /* Are there actually any streams to free? */
  2950. if (!(ep_state & EP_HAS_STREAMS) &&
  2951. !(ep_state & EP_GETTING_STREAMS)) {
  2952. xhci_warn(xhci, "WARN Can't disable streams for "
  2953. "endpoint 0x%x, "
  2954. "streams are already disabled!\n",
  2955. eps[i]->desc.bEndpointAddress);
  2956. xhci_warn(xhci, "WARN xhci_free_streams() called "
  2957. "with non-streams endpoint\n");
  2958. return 0;
  2959. }
  2960. changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
  2961. }
  2962. return changed_ep_bitmask;
  2963. }
  2964. /*
  2965. * The USB device drivers use this function (through the HCD interface in USB
  2966. * core) to prepare a set of bulk endpoints to use streams. Streams are used to
  2967. * coordinate mass storage command queueing across multiple endpoints (basically
  2968. * a stream ID == a task ID).
  2969. *
  2970. * Setting up streams involves allocating the same size stream context array
  2971. * for each endpoint and issuing a configure endpoint command for all endpoints.
  2972. *
  2973. * Don't allow the call to succeed if one endpoint only supports one stream
  2974. * (which means it doesn't support streams at all).
  2975. *
  2976. * Drivers may get less stream IDs than they asked for, if the host controller
  2977. * hardware or endpoints claim they can't support the number of requested
  2978. * stream IDs.
  2979. */
  2980. static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
  2981. struct usb_host_endpoint **eps, unsigned int num_eps,
  2982. unsigned int num_streams, gfp_t mem_flags)
  2983. {
  2984. int i, ret;
  2985. struct xhci_hcd *xhci;
  2986. struct xhci_virt_device *vdev;
  2987. struct xhci_command *config_cmd;
  2988. struct xhci_input_control_ctx *ctrl_ctx;
  2989. unsigned int ep_index;
  2990. unsigned int num_stream_ctxs;
  2991. unsigned int max_packet;
  2992. unsigned long flags;
  2993. u32 changed_ep_bitmask = 0;
  2994. if (!eps)
  2995. return -EINVAL;
  2996. /* Add one to the number of streams requested to account for
  2997. * stream 0 that is reserved for xHCI usage.
  2998. */
  2999. num_streams += 1;
  3000. xhci = hcd_to_xhci(hcd);
  3001. xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
  3002. num_streams);
  3003. /* MaxPSASize value 0 (2 streams) means streams are not supported */
  3004. if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
  3005. HCC_MAX_PSA(xhci->hcc_params) < 4) {
  3006. xhci_dbg(xhci, "xHCI controller does not support streams.\n");
  3007. return -ENOSYS;
  3008. }
  3009. config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
  3010. if (!config_cmd)
  3011. return -ENOMEM;
  3012. ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
  3013. if (!ctrl_ctx) {
  3014. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  3015. __func__);
  3016. xhci_free_command(xhci, config_cmd);
  3017. return -ENOMEM;
  3018. }
  3019. /* Check to make sure all endpoints are not already configured for
  3020. * streams. While we're at it, find the maximum number of streams that
  3021. * all the endpoints will support and check for duplicate endpoints.
  3022. */
  3023. spin_lock_irqsave(&xhci->lock, flags);
  3024. ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
  3025. num_eps, &num_streams, &changed_ep_bitmask);
  3026. if (ret < 0) {
  3027. xhci_free_command(xhci, config_cmd);
  3028. spin_unlock_irqrestore(&xhci->lock, flags);
  3029. return ret;
  3030. }
  3031. if (num_streams <= 1) {
  3032. xhci_warn(xhci, "WARN: endpoints can't handle "
  3033. "more than one stream.\n");
  3034. xhci_free_command(xhci, config_cmd);
  3035. spin_unlock_irqrestore(&xhci->lock, flags);
  3036. return -EINVAL;
  3037. }
  3038. vdev = xhci->devs[udev->slot_id];
  3039. /* Mark each endpoint as being in transition, so
  3040. * xhci_urb_enqueue() will reject all URBs.
  3041. */
  3042. for (i = 0; i < num_eps; i++) {
  3043. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3044. vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
  3045. }
  3046. spin_unlock_irqrestore(&xhci->lock, flags);
  3047. /* Setup internal data structures and allocate HW data structures for
  3048. * streams (but don't install the HW structures in the input context
  3049. * until we're sure all memory allocation succeeded).
  3050. */
  3051. xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
  3052. xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
  3053. num_stream_ctxs, num_streams);
  3054. for (i = 0; i < num_eps; i++) {
  3055. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3056. max_packet = usb_endpoint_maxp(&eps[i]->desc);
  3057. vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
  3058. num_stream_ctxs,
  3059. num_streams,
  3060. max_packet, mem_flags);
  3061. if (!vdev->eps[ep_index].stream_info)
  3062. goto cleanup;
  3063. /* Set maxPstreams in endpoint context and update deq ptr to
  3064. * point to stream context array. FIXME
  3065. */
  3066. }
  3067. /* Set up the input context for a configure endpoint command. */
  3068. for (i = 0; i < num_eps; i++) {
  3069. struct xhci_ep_ctx *ep_ctx;
  3070. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3071. ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
  3072. xhci_endpoint_copy(xhci, config_cmd->in_ctx,
  3073. vdev->out_ctx, ep_index);
  3074. xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
  3075. vdev->eps[ep_index].stream_info);
  3076. }
  3077. /* Tell the HW to drop its old copy of the endpoint context info
  3078. * and add the updated copy from the input context.
  3079. */
  3080. xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
  3081. vdev->out_ctx, ctrl_ctx,
  3082. changed_ep_bitmask, changed_ep_bitmask);
  3083. /* Issue and wait for the configure endpoint command */
  3084. ret = xhci_configure_endpoint(xhci, udev, config_cmd,
  3085. false, false);
  3086. /* xHC rejected the configure endpoint command for some reason, so we
  3087. * leave the old ring intact and free our internal streams data
  3088. * structure.
  3089. */
  3090. if (ret < 0)
  3091. goto cleanup;
  3092. spin_lock_irqsave(&xhci->lock, flags);
  3093. for (i = 0; i < num_eps; i++) {
  3094. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3095. vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
  3096. xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
  3097. udev->slot_id, ep_index);
  3098. vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
  3099. }
  3100. xhci_free_command(xhci, config_cmd);
  3101. spin_unlock_irqrestore(&xhci->lock, flags);
  3102. for (i = 0; i < num_eps; i++) {
  3103. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3104. xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
  3105. }
  3106. /* Subtract 1 for stream 0, which drivers can't use */
  3107. return num_streams - 1;
  3108. cleanup:
  3109. /* If it didn't work, free the streams! */
  3110. for (i = 0; i < num_eps; i++) {
  3111. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3112. xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
  3113. vdev->eps[ep_index].stream_info = NULL;
  3114. /* FIXME Unset maxPstreams in endpoint context and
  3115. * update deq ptr to point to normal string ring.
  3116. */
  3117. vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
  3118. vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
  3119. xhci_endpoint_zero(xhci, vdev, eps[i]);
  3120. }
  3121. xhci_free_command(xhci, config_cmd);
  3122. return -ENOMEM;
  3123. }
  3124. /* Transition the endpoint from using streams to being a "normal" endpoint
  3125. * without streams.
  3126. *
  3127. * Modify the endpoint context state, submit a configure endpoint command,
  3128. * and free all endpoint rings for streams if that completes successfully.
  3129. */
  3130. static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
  3131. struct usb_host_endpoint **eps, unsigned int num_eps,
  3132. gfp_t mem_flags)
  3133. {
  3134. int i, ret;
  3135. struct xhci_hcd *xhci;
  3136. struct xhci_virt_device *vdev;
  3137. struct xhci_command *command;
  3138. struct xhci_input_control_ctx *ctrl_ctx;
  3139. unsigned int ep_index;
  3140. unsigned long flags;
  3141. u32 changed_ep_bitmask;
  3142. xhci = hcd_to_xhci(hcd);
  3143. vdev = xhci->devs[udev->slot_id];
  3144. /* Set up a configure endpoint command to remove the streams rings */
  3145. spin_lock_irqsave(&xhci->lock, flags);
  3146. changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
  3147. udev, eps, num_eps);
  3148. if (changed_ep_bitmask == 0) {
  3149. spin_unlock_irqrestore(&xhci->lock, flags);
  3150. return -EINVAL;
  3151. }
  3152. /* Use the xhci_command structure from the first endpoint. We may have
  3153. * allocated too many, but the driver may call xhci_free_streams() for
  3154. * each endpoint it grouped into one call to xhci_alloc_streams().
  3155. */
  3156. ep_index = xhci_get_endpoint_index(&eps[0]->desc);
  3157. command = vdev->eps[ep_index].stream_info->free_streams_command;
  3158. ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
  3159. if (!ctrl_ctx) {
  3160. spin_unlock_irqrestore(&xhci->lock, flags);
  3161. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  3162. __func__);
  3163. return -EINVAL;
  3164. }
  3165. for (i = 0; i < num_eps; i++) {
  3166. struct xhci_ep_ctx *ep_ctx;
  3167. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3168. ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
  3169. xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
  3170. EP_GETTING_NO_STREAMS;
  3171. xhci_endpoint_copy(xhci, command->in_ctx,
  3172. vdev->out_ctx, ep_index);
  3173. xhci_setup_no_streams_ep_input_ctx(ep_ctx,
  3174. &vdev->eps[ep_index]);
  3175. }
  3176. xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
  3177. vdev->out_ctx, ctrl_ctx,
  3178. changed_ep_bitmask, changed_ep_bitmask);
  3179. spin_unlock_irqrestore(&xhci->lock, flags);
  3180. /* Issue and wait for the configure endpoint command,
  3181. * which must succeed.
  3182. */
  3183. ret = xhci_configure_endpoint(xhci, udev, command,
  3184. false, true);
  3185. /* xHC rejected the configure endpoint command for some reason, so we
  3186. * leave the streams rings intact.
  3187. */
  3188. if (ret < 0)
  3189. return ret;
  3190. spin_lock_irqsave(&xhci->lock, flags);
  3191. for (i = 0; i < num_eps; i++) {
  3192. ep_index = xhci_get_endpoint_index(&eps[i]->desc);
  3193. xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
  3194. vdev->eps[ep_index].stream_info = NULL;
  3195. /* FIXME Unset maxPstreams in endpoint context and
  3196. * update deq ptr to point to normal string ring.
  3197. */
  3198. vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
  3199. vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
  3200. }
  3201. spin_unlock_irqrestore(&xhci->lock, flags);
  3202. return 0;
  3203. }
  3204. /*
  3205. * Deletes endpoint resources for endpoints that were active before a Reset
  3206. * Device command, or a Disable Slot command. The Reset Device command leaves
  3207. * the control endpoint intact, whereas the Disable Slot command deletes it.
  3208. *
  3209. * Must be called with xhci->lock held.
  3210. */
  3211. void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
  3212. struct xhci_virt_device *virt_dev, bool drop_control_ep)
  3213. {
  3214. int i;
  3215. unsigned int num_dropped_eps = 0;
  3216. unsigned int drop_flags = 0;
  3217. for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
  3218. if (virt_dev->eps[i].ring) {
  3219. drop_flags |= 1 << i;
  3220. num_dropped_eps++;
  3221. }
  3222. }
  3223. xhci->num_active_eps -= num_dropped_eps;
  3224. if (num_dropped_eps)
  3225. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  3226. "Dropped %u ep ctxs, flags = 0x%x, "
  3227. "%u now active.",
  3228. num_dropped_eps, drop_flags,
  3229. xhci->num_active_eps);
  3230. }
  3231. static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
  3232. /*
  3233. * This submits a Reset Device Command, which will set the device state to 0,
  3234. * set the device address to 0, and disable all the endpoints except the default
  3235. * control endpoint. The USB core should come back and call
  3236. * xhci_address_device(), and then re-set up the configuration. If this is
  3237. * called because of a usb_reset_and_verify_device(), then the old alternate
  3238. * settings will be re-installed through the normal bandwidth allocation
  3239. * functions.
  3240. *
  3241. * Wait for the Reset Device command to finish. Remove all structures
  3242. * associated with the endpoints that were disabled. Clear the input device
  3243. * structure? Reset the control endpoint 0 max packet size?
  3244. *
  3245. * If the virt_dev to be reset does not exist or does not match the udev,
  3246. * it means the device is lost, possibly due to the xHC restore error and
  3247. * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
  3248. * re-allocate the device.
  3249. */
  3250. static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
  3251. struct usb_device *udev)
  3252. {
  3253. int ret, i;
  3254. unsigned long flags;
  3255. struct xhci_hcd *xhci;
  3256. unsigned int slot_id;
  3257. struct xhci_virt_device *virt_dev;
  3258. struct xhci_command *reset_device_cmd;
  3259. struct xhci_slot_ctx *slot_ctx;
  3260. int old_active_eps = 0;
  3261. ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
  3262. if (ret <= 0)
  3263. return ret;
  3264. xhci = hcd_to_xhci(hcd);
  3265. slot_id = udev->slot_id;
  3266. virt_dev = xhci->devs[slot_id];
  3267. if (!virt_dev) {
  3268. xhci_dbg(xhci, "The device to be reset with slot ID %u does "
  3269. "not exist. Re-allocate the device\n", slot_id);
  3270. ret = xhci_alloc_dev(hcd, udev);
  3271. if (ret == 1)
  3272. return 0;
  3273. else
  3274. return -EINVAL;
  3275. }
  3276. if (virt_dev->tt_info)
  3277. old_active_eps = virt_dev->tt_info->active_eps;
  3278. if (virt_dev->udev != udev) {
  3279. /* If the virt_dev and the udev does not match, this virt_dev
  3280. * may belong to another udev.
  3281. * Re-allocate the device.
  3282. */
  3283. xhci_dbg(xhci, "The device to be reset with slot ID %u does "
  3284. "not match the udev. Re-allocate the device\n",
  3285. slot_id);
  3286. ret = xhci_alloc_dev(hcd, udev);
  3287. if (ret == 1)
  3288. return 0;
  3289. else
  3290. return -EINVAL;
  3291. }
  3292. /* If device is not setup, there is no point in resetting it */
  3293. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
  3294. if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
  3295. SLOT_STATE_DISABLED)
  3296. return 0;
  3297. if (xhci->quirks & XHCI_ETRON_HOST) {
  3298. /*
  3299. * Obtaining a new device slot to inform the xHCI host that
  3300. * the USB device has been reset.
  3301. */
  3302. ret = xhci_disable_slot(xhci, udev->slot_id);
  3303. xhci_free_virt_device(xhci, udev->slot_id);
  3304. if (!ret) {
  3305. ret = xhci_alloc_dev(hcd, udev);
  3306. if (ret == 1)
  3307. ret = 0;
  3308. else
  3309. ret = -EINVAL;
  3310. }
  3311. return ret;
  3312. }
  3313. trace_xhci_discover_or_reset_device(slot_ctx);
  3314. xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
  3315. /* Allocate the command structure that holds the struct completion.
  3316. * Assume we're in process context, since the normal device reset
  3317. * process has to wait for the device anyway. Storage devices are
  3318. * reset as part of error handling, so use GFP_NOIO instead of
  3319. * GFP_KERNEL.
  3320. */
  3321. reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
  3322. if (!reset_device_cmd) {
  3323. xhci_dbg(xhci, "Couldn't allocate command structure.\n");
  3324. return -ENOMEM;
  3325. }
  3326. /* Attempt to submit the Reset Device command to the command ring */
  3327. spin_lock_irqsave(&xhci->lock, flags);
  3328. ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
  3329. if (ret) {
  3330. xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
  3331. spin_unlock_irqrestore(&xhci->lock, flags);
  3332. goto command_cleanup;
  3333. }
  3334. xhci_ring_cmd_db(xhci);
  3335. spin_unlock_irqrestore(&xhci->lock, flags);
  3336. /* Wait for the Reset Device command to finish */
  3337. wait_for_completion(reset_device_cmd->completion);
  3338. /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
  3339. * unless we tried to reset a slot ID that wasn't enabled,
  3340. * or the device wasn't in the addressed or configured state.
  3341. */
  3342. ret = reset_device_cmd->status;
  3343. switch (ret) {
  3344. case COMP_COMMAND_ABORTED:
  3345. case COMP_COMMAND_RING_STOPPED:
  3346. xhci_warn(xhci, "Timeout waiting for reset device command\n");
  3347. ret = -ETIME;
  3348. goto command_cleanup;
  3349. case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
  3350. case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
  3351. xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
  3352. slot_id,
  3353. xhci_get_slot_state(xhci, virt_dev->out_ctx));
  3354. xhci_dbg(xhci, "Not freeing device rings.\n");
  3355. /* Don't treat this as an error. May change my mind later. */
  3356. ret = 0;
  3357. goto command_cleanup;
  3358. case COMP_SUCCESS:
  3359. xhci_dbg(xhci, "Successful reset device command.\n");
  3360. break;
  3361. default:
  3362. if (xhci_is_vendor_info_code(xhci, ret))
  3363. break;
  3364. xhci_warn(xhci, "Unknown completion code %u for "
  3365. "reset device command.\n", ret);
  3366. ret = -EINVAL;
  3367. goto command_cleanup;
  3368. }
  3369. /* Free up host controller endpoint resources */
  3370. if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
  3371. spin_lock_irqsave(&xhci->lock, flags);
  3372. /* Don't delete the default control endpoint resources */
  3373. xhci_free_device_endpoint_resources(xhci, virt_dev, false);
  3374. spin_unlock_irqrestore(&xhci->lock, flags);
  3375. }
  3376. /* Everything but endpoint 0 is disabled, so free the rings. */
  3377. for (i = 1; i < 31; i++) {
  3378. struct xhci_virt_ep *ep = &virt_dev->eps[i];
  3379. if (ep->ep_state & EP_HAS_STREAMS) {
  3380. xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
  3381. xhci_get_endpoint_address(i));
  3382. xhci_free_stream_info(xhci, ep->stream_info);
  3383. ep->stream_info = NULL;
  3384. ep->ep_state &= ~EP_HAS_STREAMS;
  3385. }
  3386. if (ep->ring) {
  3387. xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
  3388. xhci_free_endpoint_ring(xhci, virt_dev, i);
  3389. }
  3390. if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
  3391. xhci_drop_ep_from_interval_table(xhci,
  3392. &virt_dev->eps[i].bw_info,
  3393. virt_dev->bw_table,
  3394. udev,
  3395. &virt_dev->eps[i],
  3396. virt_dev->tt_info);
  3397. xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
  3398. }
  3399. /* If necessary, update the number of active TTs on this root port */
  3400. xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
  3401. virt_dev->flags = 0;
  3402. ret = 0;
  3403. command_cleanup:
  3404. xhci_free_command(xhci, reset_device_cmd);
  3405. return ret;
  3406. }
  3407. /*
  3408. * At this point, the struct usb_device is about to go away, the device has
  3409. * disconnected, and all traffic has been stopped and the endpoints have been
  3410. * disabled. Free any HC data structures associated with that device.
  3411. */
  3412. static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
  3413. {
  3414. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  3415. struct xhci_virt_device *virt_dev;
  3416. struct xhci_slot_ctx *slot_ctx;
  3417. unsigned long flags;
  3418. int i, ret;
  3419. /*
  3420. * We called pm_runtime_get_noresume when the device was attached.
  3421. * Decrement the counter here to allow controller to runtime suspend
  3422. * if no devices remain.
  3423. */
  3424. if (xhci->quirks & XHCI_RESET_ON_RESUME)
  3425. pm_runtime_put_noidle(hcd->self.controller);
  3426. ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
  3427. /* If the host is halted due to driver unload, we still need to free the
  3428. * device.
  3429. */
  3430. if (ret <= 0 && ret != -ENODEV)
  3431. return;
  3432. virt_dev = xhci->devs[udev->slot_id];
  3433. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
  3434. trace_xhci_free_dev(slot_ctx);
  3435. /* Stop any wayward timer functions (which may grab the lock) */
  3436. for (i = 0; i < 31; i++)
  3437. virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
  3438. virt_dev->udev = NULL;
  3439. xhci_disable_slot(xhci, udev->slot_id);
  3440. spin_lock_irqsave(&xhci->lock, flags);
  3441. xhci_free_virt_device(xhci, udev->slot_id);
  3442. spin_unlock_irqrestore(&xhci->lock, flags);
  3443. }
  3444. int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
  3445. {
  3446. struct xhci_command *command;
  3447. unsigned long flags;
  3448. u32 state;
  3449. int ret;
  3450. command = xhci_alloc_command(xhci, true, GFP_KERNEL);
  3451. if (!command)
  3452. return -ENOMEM;
  3453. xhci_debugfs_remove_slot(xhci, slot_id);
  3454. spin_lock_irqsave(&xhci->lock, flags);
  3455. /* Don't disable the slot if the host controller is dead. */
  3456. state = readl(&xhci->op_regs->status);
  3457. if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
  3458. (xhci->xhc_state & XHCI_STATE_HALTED)) {
  3459. spin_unlock_irqrestore(&xhci->lock, flags);
  3460. kfree(command);
  3461. return -ENODEV;
  3462. }
  3463. ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
  3464. slot_id);
  3465. if (ret) {
  3466. spin_unlock_irqrestore(&xhci->lock, flags);
  3467. kfree(command);
  3468. return ret;
  3469. }
  3470. xhci_ring_cmd_db(xhci);
  3471. spin_unlock_irqrestore(&xhci->lock, flags);
  3472. wait_for_completion(command->completion);
  3473. if (command->status != COMP_SUCCESS)
  3474. xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
  3475. slot_id, command->status);
  3476. xhci_free_command(xhci, command);
  3477. return 0;
  3478. }
  3479. /*
  3480. * Checks if we have enough host controller resources for the default control
  3481. * endpoint.
  3482. *
  3483. * Must be called with xhci->lock held.
  3484. */
  3485. static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
  3486. {
  3487. if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
  3488. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  3489. "Not enough ep ctxs: "
  3490. "%u active, need to add 1, limit is %u.",
  3491. xhci->num_active_eps, xhci->limit_active_eps);
  3492. return -ENOMEM;
  3493. }
  3494. xhci->num_active_eps += 1;
  3495. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  3496. "Adding 1 ep ctx, %u now active.",
  3497. xhci->num_active_eps);
  3498. return 0;
  3499. }
  3500. /*
  3501. * Returns 0 if the xHC ran out of device slots, the Enable Slot command
  3502. * timed out, or allocating memory failed. Returns 1 on success.
  3503. */
  3504. int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
  3505. {
  3506. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  3507. struct xhci_virt_device *vdev;
  3508. struct xhci_slot_ctx *slot_ctx;
  3509. unsigned long flags;
  3510. int ret, slot_id;
  3511. struct xhci_command *command;
  3512. command = xhci_alloc_command(xhci, true, GFP_KERNEL);
  3513. if (!command)
  3514. return 0;
  3515. spin_lock_irqsave(&xhci->lock, flags);
  3516. ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
  3517. if (ret) {
  3518. spin_unlock_irqrestore(&xhci->lock, flags);
  3519. xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
  3520. xhci_free_command(xhci, command);
  3521. return 0;
  3522. }
  3523. xhci_ring_cmd_db(xhci);
  3524. spin_unlock_irqrestore(&xhci->lock, flags);
  3525. wait_for_completion(command->completion);
  3526. slot_id = command->slot_id;
  3527. if (!slot_id || command->status != COMP_SUCCESS) {
  3528. xhci_err(xhci, "Error while assigning device slot ID: %s\n",
  3529. xhci_trb_comp_code_string(command->status));
  3530. xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
  3531. HCS_MAX_SLOTS(
  3532. readl(&xhci->cap_regs->hcs_params1)));
  3533. xhci_free_command(xhci, command);
  3534. return 0;
  3535. }
  3536. xhci_free_command(xhci, command);
  3537. if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
  3538. spin_lock_irqsave(&xhci->lock, flags);
  3539. ret = xhci_reserve_host_control_ep_resources(xhci);
  3540. if (ret) {
  3541. spin_unlock_irqrestore(&xhci->lock, flags);
  3542. xhci_warn(xhci, "Not enough host resources, "
  3543. "active endpoint contexts = %u\n",
  3544. xhci->num_active_eps);
  3545. goto disable_slot;
  3546. }
  3547. spin_unlock_irqrestore(&xhci->lock, flags);
  3548. }
  3549. /* Use GFP_NOIO, since this function can be called from
  3550. * xhci_discover_or_reset_device(), which may be called as part of
  3551. * mass storage driver error handling.
  3552. */
  3553. if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
  3554. xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
  3555. goto disable_slot;
  3556. }
  3557. vdev = xhci->devs[slot_id];
  3558. slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
  3559. trace_xhci_alloc_dev(slot_ctx);
  3560. udev->slot_id = slot_id;
  3561. xhci_debugfs_create_slot(xhci, slot_id);
  3562. /*
  3563. * If resetting upon resume, we can't put the controller into runtime
  3564. * suspend if there is a device attached.
  3565. */
  3566. if (xhci->quirks & XHCI_RESET_ON_RESUME)
  3567. pm_runtime_get_noresume(hcd->self.controller);
  3568. /* Is this a LS or FS device under a HS hub? */
  3569. /* Hub or peripherial? */
  3570. return 1;
  3571. disable_slot:
  3572. xhci_disable_slot(xhci, udev->slot_id);
  3573. xhci_free_virt_device(xhci, udev->slot_id);
  3574. return 0;
  3575. }
  3576. /**
  3577. * xhci_setup_device - issues an Address Device command to assign a unique
  3578. * USB bus address.
  3579. * @hcd: USB host controller data structure.
  3580. * @udev: USB dev structure representing the connected device.
  3581. * @setup: Enum specifying setup mode: address only or with context.
  3582. * @timeout_ms: Max wait time (ms) for the command operation to complete.
  3583. *
  3584. * Return: 0 if successful; otherwise, negative error code.
  3585. */
  3586. static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
  3587. enum xhci_setup_dev setup, unsigned int timeout_ms)
  3588. {
  3589. const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
  3590. unsigned long flags;
  3591. struct xhci_virt_device *virt_dev;
  3592. int ret = 0;
  3593. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  3594. struct xhci_slot_ctx *slot_ctx;
  3595. struct xhci_input_control_ctx *ctrl_ctx;
  3596. u64 temp_64;
  3597. struct xhci_command *command = NULL;
  3598. mutex_lock(&xhci->mutex);
  3599. if (xhci->xhc_state) { /* dying, removing or halted */
  3600. ret = -ESHUTDOWN;
  3601. goto out;
  3602. }
  3603. if (!udev->slot_id) {
  3604. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3605. "Bad Slot ID %d", udev->slot_id);
  3606. ret = -EINVAL;
  3607. goto out;
  3608. }
  3609. virt_dev = xhci->devs[udev->slot_id];
  3610. if (WARN_ON(!virt_dev)) {
  3611. /*
  3612. * In plug/unplug torture test with an NEC controller,
  3613. * a zero-dereference was observed once due to virt_dev = 0.
  3614. * Print useful debug rather than crash if it is observed again!
  3615. */
  3616. xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
  3617. udev->slot_id);
  3618. ret = -EINVAL;
  3619. goto out;
  3620. }
  3621. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
  3622. trace_xhci_setup_device_slot(slot_ctx);
  3623. if (setup == SETUP_CONTEXT_ONLY) {
  3624. if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
  3625. SLOT_STATE_DEFAULT) {
  3626. xhci_dbg(xhci, "Slot already in default state\n");
  3627. goto out;
  3628. }
  3629. }
  3630. command = xhci_alloc_command(xhci, true, GFP_KERNEL);
  3631. if (!command) {
  3632. ret = -ENOMEM;
  3633. goto out;
  3634. }
  3635. command->in_ctx = virt_dev->in_ctx;
  3636. command->timeout_ms = timeout_ms;
  3637. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
  3638. ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
  3639. if (!ctrl_ctx) {
  3640. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  3641. __func__);
  3642. ret = -EINVAL;
  3643. goto out;
  3644. }
  3645. /*
  3646. * If this is the first Set Address since device plug-in or
  3647. * virt_device realloaction after a resume with an xHCI power loss,
  3648. * then set up the slot context.
  3649. */
  3650. if (!slot_ctx->dev_info)
  3651. xhci_setup_addressable_virt_dev(xhci, udev);
  3652. /* Otherwise, update the control endpoint ring enqueue pointer. */
  3653. else
  3654. xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
  3655. ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
  3656. ctrl_ctx->drop_flags = 0;
  3657. trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
  3658. le32_to_cpu(slot_ctx->dev_info) >> 27);
  3659. trace_xhci_address_ctrl_ctx(ctrl_ctx);
  3660. spin_lock_irqsave(&xhci->lock, flags);
  3661. trace_xhci_setup_device(virt_dev);
  3662. ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
  3663. udev->slot_id, setup);
  3664. if (ret) {
  3665. spin_unlock_irqrestore(&xhci->lock, flags);
  3666. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3667. "FIXME: allocate a command ring segment");
  3668. goto out;
  3669. }
  3670. xhci_ring_cmd_db(xhci);
  3671. spin_unlock_irqrestore(&xhci->lock, flags);
  3672. /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
  3673. wait_for_completion(command->completion);
  3674. /* FIXME: From section 4.3.4: "Software shall be responsible for timing
  3675. * the SetAddress() "recovery interval" required by USB and aborting the
  3676. * command on a timeout.
  3677. */
  3678. switch (command->status) {
  3679. case COMP_COMMAND_ABORTED:
  3680. case COMP_COMMAND_RING_STOPPED:
  3681. xhci_warn(xhci, "Timeout while waiting for setup device command\n");
  3682. ret = -ETIME;
  3683. break;
  3684. case COMP_CONTEXT_STATE_ERROR:
  3685. case COMP_SLOT_NOT_ENABLED_ERROR:
  3686. xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
  3687. act, udev->slot_id);
  3688. ret = -EINVAL;
  3689. break;
  3690. case COMP_USB_TRANSACTION_ERROR:
  3691. dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
  3692. mutex_unlock(&xhci->mutex);
  3693. ret = xhci_disable_slot(xhci, udev->slot_id);
  3694. xhci_free_virt_device(xhci, udev->slot_id);
  3695. if (!ret) {
  3696. if (xhci_alloc_dev(hcd, udev) == 1)
  3697. xhci_setup_addressable_virt_dev(xhci, udev);
  3698. }
  3699. kfree(command->completion);
  3700. kfree(command);
  3701. return -EPROTO;
  3702. case COMP_INCOMPATIBLE_DEVICE_ERROR:
  3703. dev_warn(&udev->dev,
  3704. "ERROR: Incompatible device for setup %s command\n", act);
  3705. ret = -ENODEV;
  3706. break;
  3707. case COMP_SUCCESS:
  3708. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3709. "Successful setup %s command", act);
  3710. break;
  3711. default:
  3712. xhci_err(xhci,
  3713. "ERROR: unexpected setup %s command completion code 0x%x.\n",
  3714. act, command->status);
  3715. trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
  3716. ret = -EINVAL;
  3717. break;
  3718. }
  3719. if (ret)
  3720. goto out;
  3721. temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
  3722. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3723. "Op regs DCBAA ptr = %#016llx", temp_64);
  3724. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3725. "Slot ID %d dcbaa entry @%p = %#016llx",
  3726. udev->slot_id,
  3727. &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
  3728. (unsigned long long)
  3729. le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
  3730. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3731. "Output Context DMA address = %#08llx",
  3732. (unsigned long long)virt_dev->out_ctx->dma);
  3733. trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
  3734. le32_to_cpu(slot_ctx->dev_info) >> 27);
  3735. /*
  3736. * USB core uses address 1 for the roothubs, so we add one to the
  3737. * address given back to us by the HC.
  3738. */
  3739. trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
  3740. le32_to_cpu(slot_ctx->dev_info) >> 27);
  3741. /* Zero the input context control for later use */
  3742. ctrl_ctx->add_flags = 0;
  3743. ctrl_ctx->drop_flags = 0;
  3744. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
  3745. udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
  3746. xhci_dbg_trace(xhci, trace_xhci_dbg_address,
  3747. "Internal device address = %d",
  3748. le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
  3749. out:
  3750. mutex_unlock(&xhci->mutex);
  3751. if (command) {
  3752. kfree(command->completion);
  3753. kfree(command);
  3754. }
  3755. return ret;
  3756. }
  3757. static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
  3758. unsigned int timeout_ms)
  3759. {
  3760. return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
  3761. }
  3762. static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
  3763. {
  3764. return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
  3765. XHCI_CMD_DEFAULT_TIMEOUT);
  3766. }
  3767. /*
  3768. * Transfer the port index into real index in the HW port status
  3769. * registers. Caculate offset between the port's PORTSC register
  3770. * and port status base. Divide the number of per port register
  3771. * to get the real index. The raw port number bases 1.
  3772. */
  3773. int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
  3774. {
  3775. struct xhci_hub *rhub;
  3776. rhub = xhci_get_rhub(hcd);
  3777. return rhub->ports[port1 - 1]->hw_portnum + 1;
  3778. }
  3779. /*
  3780. * Issue an Evaluate Context command to change the Maximum Exit Latency in the
  3781. * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
  3782. */
  3783. static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
  3784. struct usb_device *udev, u16 max_exit_latency)
  3785. {
  3786. struct xhci_virt_device *virt_dev;
  3787. struct xhci_command *command;
  3788. struct xhci_input_control_ctx *ctrl_ctx;
  3789. struct xhci_slot_ctx *slot_ctx;
  3790. unsigned long flags;
  3791. int ret;
  3792. command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
  3793. if (!command)
  3794. return -ENOMEM;
  3795. spin_lock_irqsave(&xhci->lock, flags);
  3796. virt_dev = xhci->devs[udev->slot_id];
  3797. /*
  3798. * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
  3799. * xHC was re-initialized. Exit latency will be set later after
  3800. * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
  3801. */
  3802. if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
  3803. spin_unlock_irqrestore(&xhci->lock, flags);
  3804. xhci_free_command(xhci, command);
  3805. return 0;
  3806. }
  3807. /* Attempt to issue an Evaluate Context command to change the MEL. */
  3808. ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
  3809. if (!ctrl_ctx) {
  3810. spin_unlock_irqrestore(&xhci->lock, flags);
  3811. xhci_free_command(xhci, command);
  3812. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  3813. __func__);
  3814. return -ENOMEM;
  3815. }
  3816. xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
  3817. spin_unlock_irqrestore(&xhci->lock, flags);
  3818. ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
  3819. slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
  3820. slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
  3821. slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
  3822. slot_ctx->dev_state = 0;
  3823. xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
  3824. "Set up evaluate context for LPM MEL change.");
  3825. /* Issue and wait for the evaluate context command. */
  3826. ret = xhci_configure_endpoint(xhci, udev, command,
  3827. true, true);
  3828. if (!ret) {
  3829. spin_lock_irqsave(&xhci->lock, flags);
  3830. virt_dev->current_mel = max_exit_latency;
  3831. spin_unlock_irqrestore(&xhci->lock, flags);
  3832. }
  3833. xhci_free_command(xhci, command);
  3834. return ret;
  3835. }
  3836. #ifdef CONFIG_PM
  3837. /* BESL to HIRD Encoding array for USB2 LPM */
  3838. static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
  3839. 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
  3840. /* Calculate HIRD/BESL for USB2 PORTPMSC*/
  3841. static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
  3842. struct usb_device *udev)
  3843. {
  3844. int u2del, besl, besl_host;
  3845. int besl_device = 0;
  3846. u32 field;
  3847. u2del = HCS_U2_LATENCY(xhci->hcs_params3);
  3848. field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
  3849. if (field & USB_BESL_SUPPORT) {
  3850. for (besl_host = 0; besl_host < 16; besl_host++) {
  3851. if (xhci_besl_encoding[besl_host] >= u2del)
  3852. break;
  3853. }
  3854. /* Use baseline BESL value as default */
  3855. if (field & USB_BESL_BASELINE_VALID)
  3856. besl_device = USB_GET_BESL_BASELINE(field);
  3857. else if (field & USB_BESL_DEEP_VALID)
  3858. besl_device = USB_GET_BESL_DEEP(field);
  3859. } else {
  3860. if (u2del <= 50)
  3861. besl_host = 0;
  3862. else
  3863. besl_host = (u2del - 51) / 75 + 1;
  3864. }
  3865. besl = besl_host + besl_device;
  3866. if (besl > 15)
  3867. besl = 15;
  3868. return besl;
  3869. }
  3870. /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
  3871. static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
  3872. {
  3873. u32 field;
  3874. int l1;
  3875. int besld = 0;
  3876. int hirdm = 0;
  3877. field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
  3878. /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
  3879. l1 = udev->l1_params.timeout / 256;
  3880. /* device has preferred BESLD */
  3881. if (field & USB_BESL_DEEP_VALID) {
  3882. besld = USB_GET_BESL_DEEP(field);
  3883. hirdm = 1;
  3884. }
  3885. return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
  3886. }
  3887. static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
  3888. struct usb_device *udev, int enable)
  3889. {
  3890. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  3891. struct xhci_port **ports;
  3892. __le32 __iomem *pm_addr, *hlpm_addr;
  3893. u32 pm_val, hlpm_val, field;
  3894. unsigned int port_num;
  3895. unsigned long flags;
  3896. int hird, exit_latency;
  3897. int ret;
  3898. if (xhci->quirks & XHCI_HW_LPM_DISABLE)
  3899. return -EPERM;
  3900. if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
  3901. !udev->lpm_capable)
  3902. return -EPERM;
  3903. if (!udev->parent || udev->parent->parent ||
  3904. udev->descriptor.bDeviceClass == USB_CLASS_HUB)
  3905. return -EPERM;
  3906. if (udev->usb2_hw_lpm_capable != 1)
  3907. return -EPERM;
  3908. spin_lock_irqsave(&xhci->lock, flags);
  3909. ports = xhci->usb2_rhub.ports;
  3910. port_num = udev->portnum - 1;
  3911. pm_addr = ports[port_num]->addr + PORTPMSC;
  3912. pm_val = readl(pm_addr);
  3913. hlpm_addr = ports[port_num]->addr + PORTHLPMC;
  3914. xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
  3915. enable ? "enable" : "disable", port_num + 1);
  3916. if (enable) {
  3917. /* Host supports BESL timeout instead of HIRD */
  3918. if (udev->usb2_hw_lpm_besl_capable) {
  3919. /* if device doesn't have a preferred BESL value use a
  3920. * default one which works with mixed HIRD and BESL
  3921. * systems. See XHCI_DEFAULT_BESL definition in xhci.h
  3922. */
  3923. field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
  3924. if ((field & USB_BESL_SUPPORT) &&
  3925. (field & USB_BESL_BASELINE_VALID))
  3926. hird = USB_GET_BESL_BASELINE(field);
  3927. else
  3928. hird = udev->l1_params.besl;
  3929. exit_latency = xhci_besl_encoding[hird];
  3930. spin_unlock_irqrestore(&xhci->lock, flags);
  3931. ret = xhci_change_max_exit_latency(xhci, udev,
  3932. exit_latency);
  3933. if (ret < 0)
  3934. return ret;
  3935. spin_lock_irqsave(&xhci->lock, flags);
  3936. hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
  3937. writel(hlpm_val, hlpm_addr);
  3938. /* flush write */
  3939. readl(hlpm_addr);
  3940. } else {
  3941. hird = xhci_calculate_hird_besl(xhci, udev);
  3942. }
  3943. pm_val &= ~PORT_HIRD_MASK;
  3944. pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
  3945. writel(pm_val, pm_addr);
  3946. pm_val = readl(pm_addr);
  3947. pm_val |= PORT_HLE;
  3948. writel(pm_val, pm_addr);
  3949. /* flush write */
  3950. readl(pm_addr);
  3951. } else {
  3952. pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
  3953. writel(pm_val, pm_addr);
  3954. /* flush write */
  3955. readl(pm_addr);
  3956. if (udev->usb2_hw_lpm_besl_capable) {
  3957. spin_unlock_irqrestore(&xhci->lock, flags);
  3958. xhci_change_max_exit_latency(xhci, udev, 0);
  3959. readl_poll_timeout(ports[port_num]->addr, pm_val,
  3960. (pm_val & PORT_PLS_MASK) == XDEV_U0,
  3961. 100, 10000);
  3962. return 0;
  3963. }
  3964. }
  3965. spin_unlock_irqrestore(&xhci->lock, flags);
  3966. return 0;
  3967. }
  3968. static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
  3969. {
  3970. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  3971. struct xhci_port *port;
  3972. u32 capability;
  3973. /* Check if USB3 device at root port is tunneled over USB4 */
  3974. if (hcd->speed >= HCD_USB3 && !udev->parent->parent) {
  3975. port = xhci->usb3_rhub.ports[udev->portnum - 1];
  3976. udev->tunnel_mode = xhci_port_is_tunneled(xhci, port);
  3977. if (udev->tunnel_mode == USB_LINK_UNKNOWN)
  3978. dev_dbg(&udev->dev, "link tunnel state unknown\n");
  3979. else if (udev->tunnel_mode == USB_LINK_TUNNELED)
  3980. dev_dbg(&udev->dev, "tunneled over USB4 link\n");
  3981. else if (udev->tunnel_mode == USB_LINK_NATIVE)
  3982. dev_dbg(&udev->dev, "native USB 3.x link\n");
  3983. return 0;
  3984. }
  3985. if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support)
  3986. return 0;
  3987. /* we only support lpm for non-hub device connected to root hub yet */
  3988. if (!udev->parent || udev->parent->parent ||
  3989. udev->descriptor.bDeviceClass == USB_CLASS_HUB)
  3990. return 0;
  3991. port = xhci->usb2_rhub.ports[udev->portnum - 1];
  3992. capability = port->port_cap->protocol_caps;
  3993. if (capability & XHCI_HLC) {
  3994. udev->usb2_hw_lpm_capable = 1;
  3995. udev->l1_params.timeout = XHCI_L1_TIMEOUT;
  3996. udev->l1_params.besl = XHCI_DEFAULT_BESL;
  3997. if (capability & XHCI_BLC)
  3998. udev->usb2_hw_lpm_besl_capable = 1;
  3999. }
  4000. return 0;
  4001. }
  4002. /*---------------------- USB 3.0 Link PM functions ------------------------*/
  4003. /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
  4004. static unsigned long long xhci_service_interval_to_ns(
  4005. struct usb_endpoint_descriptor *desc)
  4006. {
  4007. return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
  4008. }
  4009. static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
  4010. enum usb3_link_state state)
  4011. {
  4012. unsigned long long sel;
  4013. unsigned long long pel;
  4014. unsigned int max_sel_pel;
  4015. char *state_name;
  4016. switch (state) {
  4017. case USB3_LPM_U1:
  4018. /* Convert SEL and PEL stored in nanoseconds to microseconds */
  4019. sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
  4020. pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
  4021. max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
  4022. state_name = "U1";
  4023. break;
  4024. case USB3_LPM_U2:
  4025. sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
  4026. pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
  4027. max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
  4028. state_name = "U2";
  4029. break;
  4030. default:
  4031. dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
  4032. __func__);
  4033. return USB3_LPM_DISABLED;
  4034. }
  4035. if (sel <= max_sel_pel && pel <= max_sel_pel)
  4036. return USB3_LPM_DEVICE_INITIATED;
  4037. if (sel > max_sel_pel)
  4038. dev_dbg(&udev->dev, "Device-initiated %s disabled "
  4039. "due to long SEL %llu ms\n",
  4040. state_name, sel);
  4041. else
  4042. dev_dbg(&udev->dev, "Device-initiated %s disabled "
  4043. "due to long PEL %llu ms\n",
  4044. state_name, pel);
  4045. return USB3_LPM_DISABLED;
  4046. }
  4047. /* The U1 timeout should be the maximum of the following values:
  4048. * - For control endpoints, U1 system exit latency (SEL) * 3
  4049. * - For bulk endpoints, U1 SEL * 5
  4050. * - For interrupt endpoints:
  4051. * - Notification EPs, U1 SEL * 3
  4052. * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
  4053. * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
  4054. */
  4055. static unsigned long long xhci_calculate_intel_u1_timeout(
  4056. struct usb_device *udev,
  4057. struct usb_endpoint_descriptor *desc)
  4058. {
  4059. unsigned long long timeout_ns;
  4060. int ep_type;
  4061. int intr_type;
  4062. ep_type = usb_endpoint_type(desc);
  4063. switch (ep_type) {
  4064. case USB_ENDPOINT_XFER_CONTROL:
  4065. timeout_ns = udev->u1_params.sel * 3;
  4066. break;
  4067. case USB_ENDPOINT_XFER_BULK:
  4068. timeout_ns = udev->u1_params.sel * 5;
  4069. break;
  4070. case USB_ENDPOINT_XFER_INT:
  4071. intr_type = usb_endpoint_interrupt_type(desc);
  4072. if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
  4073. timeout_ns = udev->u1_params.sel * 3;
  4074. break;
  4075. }
  4076. /* Otherwise the calculation is the same as isoc eps */
  4077. fallthrough;
  4078. case USB_ENDPOINT_XFER_ISOC:
  4079. timeout_ns = xhci_service_interval_to_ns(desc);
  4080. timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
  4081. if (timeout_ns < udev->u1_params.sel * 2)
  4082. timeout_ns = udev->u1_params.sel * 2;
  4083. break;
  4084. default:
  4085. return 0;
  4086. }
  4087. return timeout_ns;
  4088. }
  4089. /* Returns the hub-encoded U1 timeout value. */
  4090. static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
  4091. struct usb_device *udev,
  4092. struct usb_endpoint_descriptor *desc)
  4093. {
  4094. unsigned long long timeout_ns;
  4095. /* Prevent U1 if service interval is shorter than U1 exit latency */
  4096. if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
  4097. if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
  4098. dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
  4099. return USB3_LPM_DISABLED;
  4100. }
  4101. }
  4102. if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
  4103. timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
  4104. else
  4105. timeout_ns = udev->u1_params.sel;
  4106. /* The U1 timeout is encoded in 1us intervals.
  4107. * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
  4108. */
  4109. if (timeout_ns == USB3_LPM_DISABLED)
  4110. timeout_ns = 1;
  4111. else
  4112. timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
  4113. /* If the necessary timeout value is bigger than what we can set in the
  4114. * USB 3.0 hub, we have to disable hub-initiated U1.
  4115. */
  4116. if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
  4117. return timeout_ns;
  4118. dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
  4119. "due to long timeout %llu ms\n", timeout_ns);
  4120. return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
  4121. }
  4122. /* The U2 timeout should be the maximum of:
  4123. * - 10 ms (to avoid the bandwidth impact on the scheduler)
  4124. * - largest bInterval of any active periodic endpoint (to avoid going
  4125. * into lower power link states between intervals).
  4126. * - the U2 Exit Latency of the device
  4127. */
  4128. static unsigned long long xhci_calculate_intel_u2_timeout(
  4129. struct usb_device *udev,
  4130. struct usb_endpoint_descriptor *desc)
  4131. {
  4132. unsigned long long timeout_ns;
  4133. unsigned long long u2_del_ns;
  4134. timeout_ns = 10 * 1000 * 1000;
  4135. if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
  4136. (xhci_service_interval_to_ns(desc) > timeout_ns))
  4137. timeout_ns = xhci_service_interval_to_ns(desc);
  4138. u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
  4139. if (u2_del_ns > timeout_ns)
  4140. timeout_ns = u2_del_ns;
  4141. return timeout_ns;
  4142. }
  4143. /* Returns the hub-encoded U2 timeout value. */
  4144. static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
  4145. struct usb_device *udev,
  4146. struct usb_endpoint_descriptor *desc)
  4147. {
  4148. unsigned long long timeout_ns;
  4149. /* Prevent U2 if service interval is shorter than U2 exit latency */
  4150. if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
  4151. if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
  4152. dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
  4153. return USB3_LPM_DISABLED;
  4154. }
  4155. }
  4156. if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
  4157. timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
  4158. else
  4159. timeout_ns = udev->u2_params.sel;
  4160. /* The U2 timeout is encoded in 256us intervals */
  4161. timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
  4162. /* If the necessary timeout value is bigger than what we can set in the
  4163. * USB 3.0 hub, we have to disable hub-initiated U2.
  4164. */
  4165. if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
  4166. return timeout_ns;
  4167. dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
  4168. "due to long timeout %llu ms\n", timeout_ns);
  4169. return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
  4170. }
  4171. static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
  4172. struct usb_device *udev,
  4173. struct usb_endpoint_descriptor *desc,
  4174. enum usb3_link_state state,
  4175. u16 *timeout)
  4176. {
  4177. if (state == USB3_LPM_U1)
  4178. return xhci_calculate_u1_timeout(xhci, udev, desc);
  4179. else if (state == USB3_LPM_U2)
  4180. return xhci_calculate_u2_timeout(xhci, udev, desc);
  4181. return USB3_LPM_DISABLED;
  4182. }
  4183. static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
  4184. struct usb_device *udev,
  4185. struct usb_endpoint_descriptor *desc,
  4186. enum usb3_link_state state,
  4187. u16 *timeout)
  4188. {
  4189. u16 alt_timeout;
  4190. alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
  4191. desc, state, timeout);
  4192. /* If we found we can't enable hub-initiated LPM, and
  4193. * the U1 or U2 exit latency was too high to allow
  4194. * device-initiated LPM as well, then we will disable LPM
  4195. * for this device, so stop searching any further.
  4196. */
  4197. if (alt_timeout == USB3_LPM_DISABLED) {
  4198. *timeout = alt_timeout;
  4199. return -E2BIG;
  4200. }
  4201. if (alt_timeout > *timeout)
  4202. *timeout = alt_timeout;
  4203. return 0;
  4204. }
  4205. static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
  4206. struct usb_device *udev,
  4207. struct usb_host_interface *alt,
  4208. enum usb3_link_state state,
  4209. u16 *timeout)
  4210. {
  4211. int j;
  4212. for (j = 0; j < alt->desc.bNumEndpoints; j++) {
  4213. if (xhci_update_timeout_for_endpoint(xhci, udev,
  4214. &alt->endpoint[j].desc, state, timeout))
  4215. return -E2BIG;
  4216. }
  4217. return 0;
  4218. }
  4219. static int xhci_check_tier_policy(struct xhci_hcd *xhci,
  4220. struct usb_device *udev,
  4221. enum usb3_link_state state)
  4222. {
  4223. struct usb_device *parent = udev->parent;
  4224. int tier = 1; /* roothub is tier1 */
  4225. while (parent) {
  4226. parent = parent->parent;
  4227. tier++;
  4228. }
  4229. if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
  4230. goto fail;
  4231. if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
  4232. goto fail;
  4233. return 0;
  4234. fail:
  4235. dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
  4236. tier);
  4237. return -E2BIG;
  4238. }
  4239. /* Returns the U1 or U2 timeout that should be enabled.
  4240. * If the tier check or timeout setting functions return with a non-zero exit
  4241. * code, that means the timeout value has been finalized and we shouldn't look
  4242. * at any more endpoints.
  4243. */
  4244. static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
  4245. struct usb_device *udev, enum usb3_link_state state)
  4246. {
  4247. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  4248. struct usb_host_config *config;
  4249. char *state_name;
  4250. int i;
  4251. u16 timeout = USB3_LPM_DISABLED;
  4252. if (state == USB3_LPM_U1)
  4253. state_name = "U1";
  4254. else if (state == USB3_LPM_U2)
  4255. state_name = "U2";
  4256. else {
  4257. dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
  4258. state);
  4259. return timeout;
  4260. }
  4261. /* Gather some information about the currently installed configuration
  4262. * and alternate interface settings.
  4263. */
  4264. if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
  4265. state, &timeout))
  4266. return timeout;
  4267. config = udev->actconfig;
  4268. if (!config)
  4269. return timeout;
  4270. for (i = 0; i < config->desc.bNumInterfaces; i++) {
  4271. struct usb_driver *driver;
  4272. struct usb_interface *intf = config->interface[i];
  4273. if (!intf)
  4274. continue;
  4275. /* Check if any currently bound drivers want hub-initiated LPM
  4276. * disabled.
  4277. */
  4278. if (intf->dev.driver) {
  4279. driver = to_usb_driver(intf->dev.driver);
  4280. if (driver && driver->disable_hub_initiated_lpm) {
  4281. dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
  4282. state_name, driver->name);
  4283. timeout = xhci_get_timeout_no_hub_lpm(udev,
  4284. state);
  4285. if (timeout == USB3_LPM_DISABLED)
  4286. return timeout;
  4287. }
  4288. }
  4289. /* Not sure how this could happen... */
  4290. if (!intf->cur_altsetting)
  4291. continue;
  4292. if (xhci_update_timeout_for_interface(xhci, udev,
  4293. intf->cur_altsetting,
  4294. state, &timeout))
  4295. return timeout;
  4296. }
  4297. return timeout;
  4298. }
  4299. static int calculate_max_exit_latency(struct usb_device *udev,
  4300. enum usb3_link_state state_changed,
  4301. u16 hub_encoded_timeout)
  4302. {
  4303. unsigned long long u1_mel_us = 0;
  4304. unsigned long long u2_mel_us = 0;
  4305. unsigned long long mel_us = 0;
  4306. bool disabling_u1;
  4307. bool disabling_u2;
  4308. bool enabling_u1;
  4309. bool enabling_u2;
  4310. disabling_u1 = (state_changed == USB3_LPM_U1 &&
  4311. hub_encoded_timeout == USB3_LPM_DISABLED);
  4312. disabling_u2 = (state_changed == USB3_LPM_U2 &&
  4313. hub_encoded_timeout == USB3_LPM_DISABLED);
  4314. enabling_u1 = (state_changed == USB3_LPM_U1 &&
  4315. hub_encoded_timeout != USB3_LPM_DISABLED);
  4316. enabling_u2 = (state_changed == USB3_LPM_U2 &&
  4317. hub_encoded_timeout != USB3_LPM_DISABLED);
  4318. /* If U1 was already enabled and we're not disabling it,
  4319. * or we're going to enable U1, account for the U1 max exit latency.
  4320. */
  4321. if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
  4322. enabling_u1)
  4323. u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
  4324. if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
  4325. enabling_u2)
  4326. u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
  4327. mel_us = max(u1_mel_us, u2_mel_us);
  4328. /* xHCI host controller max exit latency field is only 16 bits wide. */
  4329. if (mel_us > MAX_EXIT) {
  4330. dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
  4331. "is too big.\n", mel_us);
  4332. return -E2BIG;
  4333. }
  4334. return mel_us;
  4335. }
  4336. /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
  4337. static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
  4338. struct usb_device *udev, enum usb3_link_state state)
  4339. {
  4340. struct xhci_hcd *xhci;
  4341. struct xhci_port *port;
  4342. u16 hub_encoded_timeout;
  4343. int mel;
  4344. int ret;
  4345. xhci = hcd_to_xhci(hcd);
  4346. /* The LPM timeout values are pretty host-controller specific, so don't
  4347. * enable hub-initiated timeouts unless the vendor has provided
  4348. * information about their timeout algorithm.
  4349. */
  4350. if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
  4351. !xhci->devs[udev->slot_id])
  4352. return USB3_LPM_DISABLED;
  4353. if (xhci_check_tier_policy(xhci, udev, state) < 0)
  4354. return USB3_LPM_DISABLED;
  4355. /* If connected to root port then check port can handle lpm */
  4356. if (udev->parent && !udev->parent->parent) {
  4357. port = xhci->usb3_rhub.ports[udev->portnum - 1];
  4358. if (port->lpm_incapable)
  4359. return USB3_LPM_DISABLED;
  4360. }
  4361. hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
  4362. mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
  4363. if (mel < 0) {
  4364. /* Max Exit Latency is too big, disable LPM. */
  4365. hub_encoded_timeout = USB3_LPM_DISABLED;
  4366. mel = 0;
  4367. }
  4368. ret = xhci_change_max_exit_latency(xhci, udev, mel);
  4369. if (ret)
  4370. return ret;
  4371. return hub_encoded_timeout;
  4372. }
  4373. static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
  4374. struct usb_device *udev, enum usb3_link_state state)
  4375. {
  4376. struct xhci_hcd *xhci;
  4377. u16 mel;
  4378. xhci = hcd_to_xhci(hcd);
  4379. if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
  4380. !xhci->devs[udev->slot_id])
  4381. return 0;
  4382. mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
  4383. return xhci_change_max_exit_latency(xhci, udev, mel);
  4384. }
  4385. #else /* CONFIG_PM */
  4386. static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
  4387. struct usb_device *udev, int enable)
  4388. {
  4389. return 0;
  4390. }
  4391. static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
  4392. {
  4393. return 0;
  4394. }
  4395. static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
  4396. struct usb_device *udev, enum usb3_link_state state)
  4397. {
  4398. return USB3_LPM_DISABLED;
  4399. }
  4400. static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
  4401. struct usb_device *udev, enum usb3_link_state state)
  4402. {
  4403. return 0;
  4404. }
  4405. #endif /* CONFIG_PM */
  4406. /*-------------------------------------------------------------------------*/
  4407. /* Once a hub descriptor is fetched for a device, we need to update the xHC's
  4408. * internal data structures for the device.
  4409. */
  4410. int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
  4411. struct usb_tt *tt, gfp_t mem_flags)
  4412. {
  4413. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  4414. struct xhci_virt_device *vdev;
  4415. struct xhci_command *config_cmd;
  4416. struct xhci_input_control_ctx *ctrl_ctx;
  4417. struct xhci_slot_ctx *slot_ctx;
  4418. unsigned long flags;
  4419. unsigned think_time;
  4420. int ret;
  4421. /* Ignore root hubs */
  4422. if (!hdev->parent)
  4423. return 0;
  4424. vdev = xhci->devs[hdev->slot_id];
  4425. if (!vdev) {
  4426. xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
  4427. return -EINVAL;
  4428. }
  4429. config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
  4430. if (!config_cmd)
  4431. return -ENOMEM;
  4432. ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
  4433. if (!ctrl_ctx) {
  4434. xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
  4435. __func__);
  4436. xhci_free_command(xhci, config_cmd);
  4437. return -ENOMEM;
  4438. }
  4439. spin_lock_irqsave(&xhci->lock, flags);
  4440. if (hdev->speed == USB_SPEED_HIGH &&
  4441. xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
  4442. xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
  4443. xhci_free_command(xhci, config_cmd);
  4444. spin_unlock_irqrestore(&xhci->lock, flags);
  4445. return -ENOMEM;
  4446. }
  4447. xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
  4448. ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
  4449. slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
  4450. slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
  4451. /*
  4452. * refer to section 6.2.2: MTT should be 0 for full speed hub,
  4453. * but it may be already set to 1 when setup an xHCI virtual
  4454. * device, so clear it anyway.
  4455. */
  4456. if (tt->multi)
  4457. slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
  4458. else if (hdev->speed == USB_SPEED_FULL)
  4459. slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
  4460. if (xhci->hci_version > 0x95) {
  4461. xhci_dbg(xhci, "xHCI version %x needs hub "
  4462. "TT think time and number of ports\n",
  4463. (unsigned int) xhci->hci_version);
  4464. slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
  4465. /* Set TT think time - convert from ns to FS bit times.
  4466. * 0 = 8 FS bit times, 1 = 16 FS bit times,
  4467. * 2 = 24 FS bit times, 3 = 32 FS bit times.
  4468. *
  4469. * xHCI 1.0: this field shall be 0 if the device is not a
  4470. * High-spped hub.
  4471. */
  4472. think_time = tt->think_time;
  4473. if (think_time != 0)
  4474. think_time = (think_time / 666) - 1;
  4475. if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
  4476. slot_ctx->tt_info |=
  4477. cpu_to_le32(TT_THINK_TIME(think_time));
  4478. } else {
  4479. xhci_dbg(xhci, "xHCI version %x doesn't need hub "
  4480. "TT think time or number of ports\n",
  4481. (unsigned int) xhci->hci_version);
  4482. }
  4483. slot_ctx->dev_state = 0;
  4484. spin_unlock_irqrestore(&xhci->lock, flags);
  4485. xhci_dbg(xhci, "Set up %s for hub device.\n",
  4486. (xhci->hci_version > 0x95) ?
  4487. "configure endpoint" : "evaluate context");
  4488. /* Issue and wait for the configure endpoint or
  4489. * evaluate context command.
  4490. */
  4491. if (xhci->hci_version > 0x95)
  4492. ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
  4493. false, false);
  4494. else
  4495. ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
  4496. true, false);
  4497. xhci_free_command(xhci, config_cmd);
  4498. return ret;
  4499. }
  4500. EXPORT_SYMBOL_GPL(xhci_update_hub_device);
  4501. static int xhci_get_frame(struct usb_hcd *hcd)
  4502. {
  4503. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  4504. /* EHCI mods by the periodic size. Why? */
  4505. return readl(&xhci->run_regs->microframe_index) >> 3;
  4506. }
  4507. static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
  4508. {
  4509. xhci->usb2_rhub.hcd = hcd;
  4510. hcd->speed = HCD_USB2;
  4511. hcd->self.root_hub->speed = USB_SPEED_HIGH;
  4512. /*
  4513. * USB 2.0 roothub under xHCI has an integrated TT,
  4514. * (rate matching hub) as opposed to having an OHCI/UHCI
  4515. * companion controller.
  4516. */
  4517. hcd->has_tt = 1;
  4518. }
  4519. static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
  4520. {
  4521. unsigned int minor_rev;
  4522. /*
  4523. * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
  4524. * should return 0x31 for sbrn, or that the minor revision
  4525. * is a two digit BCD containig minor and sub-minor numbers.
  4526. * This was later clarified in xHCI 1.2.
  4527. *
  4528. * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
  4529. * minor revision set to 0x1 instead of 0x10.
  4530. */
  4531. if (xhci->usb3_rhub.min_rev == 0x1)
  4532. minor_rev = 1;
  4533. else
  4534. minor_rev = xhci->usb3_rhub.min_rev / 0x10;
  4535. switch (minor_rev) {
  4536. case 2:
  4537. hcd->speed = HCD_USB32;
  4538. hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
  4539. hcd->self.root_hub->rx_lanes = 2;
  4540. hcd->self.root_hub->tx_lanes = 2;
  4541. hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
  4542. break;
  4543. case 1:
  4544. hcd->speed = HCD_USB31;
  4545. hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
  4546. hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
  4547. break;
  4548. }
  4549. xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
  4550. minor_rev, minor_rev ? "Enhanced " : "");
  4551. xhci->usb3_rhub.hcd = hcd;
  4552. }
  4553. int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
  4554. {
  4555. struct xhci_hcd *xhci;
  4556. /*
  4557. * TODO: Check with DWC3 clients for sysdev according to
  4558. * quirks
  4559. */
  4560. struct device *dev = hcd->self.sysdev;
  4561. int retval;
  4562. /* Accept arbitrarily long scatter-gather lists */
  4563. hcd->self.sg_tablesize = ~0;
  4564. /* support to build packet from discontinuous buffers */
  4565. hcd->self.no_sg_constraint = 1;
  4566. /* XHCI controllers don't stop the ep queue on short packets :| */
  4567. hcd->self.no_stop_on_short = 1;
  4568. xhci = hcd_to_xhci(hcd);
  4569. if (!usb_hcd_is_primary_hcd(hcd)) {
  4570. xhci_hcd_init_usb3_data(xhci, hcd);
  4571. return 0;
  4572. }
  4573. mutex_init(&xhci->mutex);
  4574. xhci->main_hcd = hcd;
  4575. xhci->cap_regs = hcd->regs;
  4576. xhci->op_regs = hcd->regs +
  4577. HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
  4578. xhci->run_regs = hcd->regs +
  4579. (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
  4580. /* Cache read-only capability registers */
  4581. xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
  4582. xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
  4583. xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
  4584. xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
  4585. xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
  4586. if (xhci->hci_version > 0x100)
  4587. xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
  4588. /* xhci-plat or xhci-pci might have set max_interrupters already */
  4589. if ((!xhci->max_interrupters) ||
  4590. xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
  4591. xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
  4592. xhci->quirks |= quirks;
  4593. if (get_quirks)
  4594. get_quirks(dev, xhci);
  4595. /* In xhci controllers which follow xhci 1.0 spec gives a spurious
  4596. * success event after a short transfer. This quirk will ignore such
  4597. * spurious event.
  4598. */
  4599. if (xhci->hci_version > 0x96)
  4600. xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
  4601. /* Make sure the HC is halted. */
  4602. retval = xhci_halt(xhci);
  4603. if (retval)
  4604. return retval;
  4605. xhci_zero_64b_regs(xhci);
  4606. xhci_dbg(xhci, "Resetting HCD\n");
  4607. /* Reset the internal HC memory state and registers. */
  4608. retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
  4609. if (retval)
  4610. return retval;
  4611. xhci_dbg(xhci, "Reset complete\n");
  4612. /*
  4613. * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
  4614. * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
  4615. * address memory pointers actually. So, this driver clears the AC64
  4616. * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
  4617. * DMA_BIT_MASK(32)) in this xhci_gen_setup().
  4618. */
  4619. if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
  4620. xhci->hcc_params &= ~BIT(0);
  4621. /* Set dma_mask and coherent_dma_mask to 64-bits,
  4622. * if xHC supports 64-bit addressing */
  4623. if (HCC_64BIT_ADDR(xhci->hcc_params) &&
  4624. !dma_set_mask(dev, DMA_BIT_MASK(64))) {
  4625. xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
  4626. dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
  4627. } else {
  4628. /*
  4629. * This is to avoid error in cases where a 32-bit USB
  4630. * controller is used on a 64-bit capable system.
  4631. */
  4632. retval = dma_set_mask(dev, DMA_BIT_MASK(32));
  4633. if (retval)
  4634. return retval;
  4635. xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
  4636. dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
  4637. }
  4638. xhci_dbg(xhci, "Calling HCD init\n");
  4639. /* Initialize HCD and host controller data structures. */
  4640. retval = xhci_init(hcd);
  4641. if (retval)
  4642. return retval;
  4643. xhci_dbg(xhci, "Called HCD init\n");
  4644. if (xhci_hcd_is_usb3(hcd))
  4645. xhci_hcd_init_usb3_data(xhci, hcd);
  4646. else
  4647. xhci_hcd_init_usb2_data(xhci, hcd);
  4648. xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
  4649. xhci->hcc_params, xhci->hci_version, xhci->quirks);
  4650. return 0;
  4651. }
  4652. EXPORT_SYMBOL_GPL(xhci_gen_setup);
  4653. static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
  4654. struct usb_host_endpoint *ep)
  4655. {
  4656. struct xhci_hcd *xhci;
  4657. struct usb_device *udev;
  4658. unsigned int slot_id;
  4659. unsigned int ep_index;
  4660. unsigned long flags;
  4661. xhci = hcd_to_xhci(hcd);
  4662. spin_lock_irqsave(&xhci->lock, flags);
  4663. udev = (struct usb_device *)ep->hcpriv;
  4664. slot_id = udev->slot_id;
  4665. ep_index = xhci_get_endpoint_index(&ep->desc);
  4666. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
  4667. xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  4668. spin_unlock_irqrestore(&xhci->lock, flags);
  4669. }
  4670. static const struct hc_driver xhci_hc_driver = {
  4671. .description = "xhci-hcd",
  4672. .product_desc = "xHCI Host Controller",
  4673. .hcd_priv_size = sizeof(struct xhci_hcd),
  4674. /*
  4675. * generic hardware linkage
  4676. */
  4677. .irq = xhci_irq,
  4678. .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
  4679. HCD_BH,
  4680. /*
  4681. * basic lifecycle operations
  4682. */
  4683. .reset = NULL, /* set in xhci_init_driver() */
  4684. .start = xhci_run,
  4685. .stop = xhci_stop,
  4686. .shutdown = xhci_shutdown,
  4687. /*
  4688. * managing i/o requests and associated device resources
  4689. */
  4690. .map_urb_for_dma = xhci_map_urb_for_dma,
  4691. .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
  4692. .urb_enqueue = xhci_urb_enqueue,
  4693. .urb_dequeue = xhci_urb_dequeue,
  4694. .alloc_dev = xhci_alloc_dev,
  4695. .free_dev = xhci_free_dev,
  4696. .alloc_streams = xhci_alloc_streams,
  4697. .free_streams = xhci_free_streams,
  4698. .add_endpoint = xhci_add_endpoint,
  4699. .drop_endpoint = xhci_drop_endpoint,
  4700. .endpoint_disable = xhci_endpoint_disable,
  4701. .endpoint_reset = xhci_endpoint_reset,
  4702. .check_bandwidth = xhci_check_bandwidth,
  4703. .reset_bandwidth = xhci_reset_bandwidth,
  4704. .address_device = xhci_address_device,
  4705. .enable_device = xhci_enable_device,
  4706. .update_hub_device = xhci_update_hub_device,
  4707. .reset_device = xhci_discover_or_reset_device,
  4708. /*
  4709. * scheduling support
  4710. */
  4711. .get_frame_number = xhci_get_frame,
  4712. /*
  4713. * root hub support
  4714. */
  4715. .hub_control = xhci_hub_control,
  4716. .hub_status_data = xhci_hub_status_data,
  4717. .bus_suspend = xhci_bus_suspend,
  4718. .bus_resume = xhci_bus_resume,
  4719. .get_resuming_ports = xhci_get_resuming_ports,
  4720. /*
  4721. * call back when device connected and addressed
  4722. */
  4723. .update_device = xhci_update_device,
  4724. .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
  4725. .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
  4726. .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
  4727. .find_raw_port_number = xhci_find_raw_port_number,
  4728. .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
  4729. };
  4730. void xhci_init_driver(struct hc_driver *drv,
  4731. const struct xhci_driver_overrides *over)
  4732. {
  4733. BUG_ON(!over);
  4734. /* Copy the generic table to drv then apply the overrides */
  4735. *drv = xhci_hc_driver;
  4736. if (over) {
  4737. drv->hcd_priv_size += over->extra_priv_size;
  4738. if (over->reset)
  4739. drv->reset = over->reset;
  4740. if (over->start)
  4741. drv->start = over->start;
  4742. if (over->add_endpoint)
  4743. drv->add_endpoint = over->add_endpoint;
  4744. if (over->drop_endpoint)
  4745. drv->drop_endpoint = over->drop_endpoint;
  4746. if (over->check_bandwidth)
  4747. drv->check_bandwidth = over->check_bandwidth;
  4748. if (over->reset_bandwidth)
  4749. drv->reset_bandwidth = over->reset_bandwidth;
  4750. if (over->update_hub_device)
  4751. drv->update_hub_device = over->update_hub_device;
  4752. if (over->hub_control)
  4753. drv->hub_control = over->hub_control;
  4754. }
  4755. }
  4756. EXPORT_SYMBOL_GPL(xhci_init_driver);
  4757. MODULE_DESCRIPTION(DRIVER_DESC);
  4758. MODULE_AUTHOR(DRIVER_AUTHOR);
  4759. MODULE_LICENSE("GPL");
  4760. static int __init xhci_hcd_init(void)
  4761. {
  4762. /*
  4763. * Check the compiler generated sizes of structures that must be laid
  4764. * out in specific ways for hardware access.
  4765. */
  4766. BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
  4767. BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
  4768. BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
  4769. /* xhci_device_control has eight fields, and also
  4770. * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
  4771. */
  4772. BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
  4773. BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
  4774. BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
  4775. BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
  4776. BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
  4777. /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
  4778. BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
  4779. if (usb_disabled())
  4780. return -ENODEV;
  4781. xhci_debugfs_create_root();
  4782. xhci_dbc_init();
  4783. return 0;
  4784. }
  4785. /*
  4786. * If an init function is provided, an exit function must also be provided
  4787. * to allow module unload.
  4788. */
  4789. static void __exit xhci_hcd_fini(void)
  4790. {
  4791. xhci_debugfs_remove_root();
  4792. xhci_dbc_exit();
  4793. }
  4794. module_init(xhci_hcd_init);
  4795. module_exit(xhci_hcd_fini);