smb2pdu.c 123 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483
  1. /*
  2. * fs/cifs/smb2pdu.c
  3. *
  4. * Copyright (C) International Business Machines Corp., 2009, 2013
  5. * Etersoft, 2012
  6. * Author(s): Steve French (sfrench@us.ibm.com)
  7. * Pavel Shilovsky (pshilovsky@samba.org) 2012
  8. *
  9. * Contains the routines for constructing the SMB2 PDUs themselves
  10. *
  11. * This library is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU Lesser General Public License as published
  13. * by the Free Software Foundation; either version 2.1 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  19. * the GNU Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public License
  22. * along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
  26. /* Note that there are handle based routines which must be */
  27. /* treated slightly differently for reconnection purposes since we never */
  28. /* want to reuse a stale file handle and only the caller knows the file info */
  29. #include <linux/fs.h>
  30. #include <linux/kernel.h>
  31. #include <linux/vfs.h>
  32. #include <linux/task_io_accounting_ops.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/uuid.h>
  35. #include <linux/pagemap.h>
  36. #include <linux/xattr.h>
  37. #include "smb2pdu.h"
  38. #include "cifsglob.h"
  39. #include "cifsacl.h"
  40. #include "cifsproto.h"
  41. #include "smb2proto.h"
  42. #include "cifs_unicode.h"
  43. #include "cifs_debug.h"
  44. #include "ntlmssp.h"
  45. #include "smb2status.h"
  46. #include "smb2glob.h"
  47. #include "cifspdu.h"
  48. #include "cifs_spnego.h"
  49. #include "smbdirect.h"
  50. #include "trace.h"
  51. /*
  52. * The following table defines the expected "StructureSize" of SMB2 requests
  53. * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
  54. *
  55. * Note that commands are defined in smb2pdu.h in le16 but the array below is
  56. * indexed by command in host byte order.
  57. */
  58. static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
  59. /* SMB2_NEGOTIATE */ 36,
  60. /* SMB2_SESSION_SETUP */ 25,
  61. /* SMB2_LOGOFF */ 4,
  62. /* SMB2_TREE_CONNECT */ 9,
  63. /* SMB2_TREE_DISCONNECT */ 4,
  64. /* SMB2_CREATE */ 57,
  65. /* SMB2_CLOSE */ 24,
  66. /* SMB2_FLUSH */ 24,
  67. /* SMB2_READ */ 49,
  68. /* SMB2_WRITE */ 49,
  69. /* SMB2_LOCK */ 48,
  70. /* SMB2_IOCTL */ 57,
  71. /* SMB2_CANCEL */ 4,
  72. /* SMB2_ECHO */ 4,
  73. /* SMB2_QUERY_DIRECTORY */ 33,
  74. /* SMB2_CHANGE_NOTIFY */ 32,
  75. /* SMB2_QUERY_INFO */ 41,
  76. /* SMB2_SET_INFO */ 33,
  77. /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
  78. };
  79. int smb3_encryption_required(const struct cifs_tcon *tcon)
  80. {
  81. if (!tcon)
  82. return 0;
  83. if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
  84. (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
  85. return 1;
  86. if (tcon->seal &&
  87. (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
  88. return 1;
  89. return 0;
  90. }
  91. static void
  92. smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
  93. const struct cifs_tcon *tcon)
  94. {
  95. shdr->ProtocolId = SMB2_PROTO_NUMBER;
  96. shdr->StructureSize = cpu_to_le16(64);
  97. shdr->Command = smb2_cmd;
  98. if (tcon && tcon->ses && tcon->ses->server) {
  99. struct TCP_Server_Info *server = tcon->ses->server;
  100. spin_lock(&server->req_lock);
  101. /* Request up to 2 credits but don't go over the limit. */
  102. if (server->credits >= server->max_credits)
  103. shdr->CreditRequest = cpu_to_le16(0);
  104. else
  105. shdr->CreditRequest = cpu_to_le16(
  106. min_t(int, server->max_credits -
  107. server->credits, 2));
  108. spin_unlock(&server->req_lock);
  109. } else {
  110. shdr->CreditRequest = cpu_to_le16(2);
  111. }
  112. shdr->ProcessId = cpu_to_le32((__u16)current->tgid);
  113. if (!tcon)
  114. goto out;
  115. /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
  116. /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
  117. if ((tcon->ses) && (tcon->ses->server) &&
  118. (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
  119. shdr->CreditCharge = cpu_to_le16(1);
  120. /* else CreditCharge MBZ */
  121. shdr->TreeId = tcon->tid;
  122. /* Uid is not converted */
  123. if (tcon->ses)
  124. shdr->SessionId = tcon->ses->Suid;
  125. /*
  126. * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
  127. * to pass the path on the Open SMB prefixed by \\server\share.
  128. * Not sure when we would need to do the augmented path (if ever) and
  129. * setting this flag breaks the SMB2 open operation since it is
  130. * illegal to send an empty path name (without \\server\share prefix)
  131. * when the DFS flag is set in the SMB open header. We could
  132. * consider setting the flag on all operations other than open
  133. * but it is safer to net set it for now.
  134. */
  135. /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
  136. shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
  137. if (tcon->ses && tcon->ses->server && tcon->ses->server->sign &&
  138. !smb3_encryption_required(tcon))
  139. shdr->Flags |= SMB2_FLAGS_SIGNED;
  140. out:
  141. return;
  142. }
  143. static int
  144. smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
  145. {
  146. int rc;
  147. struct nls_table *nls_codepage;
  148. struct cifs_ses *ses;
  149. struct TCP_Server_Info *server;
  150. /*
  151. * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
  152. * check for tcp and smb session status done differently
  153. * for those three - in the calling routine.
  154. */
  155. if (tcon == NULL)
  156. return 0;
  157. if (smb2_command == SMB2_TREE_CONNECT)
  158. return 0;
  159. if (tcon->tidStatus == CifsExiting) {
  160. /*
  161. * only tree disconnect, open, and write,
  162. * (and ulogoff which does not have tcon)
  163. * are allowed as we start force umount.
  164. */
  165. if ((smb2_command != SMB2_WRITE) &&
  166. (smb2_command != SMB2_CREATE) &&
  167. (smb2_command != SMB2_TREE_DISCONNECT)) {
  168. cifs_dbg(FYI, "can not send cmd %d while umounting\n",
  169. smb2_command);
  170. return -ENODEV;
  171. }
  172. }
  173. if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
  174. (!tcon->ses->server))
  175. return -EIO;
  176. ses = tcon->ses;
  177. server = ses->server;
  178. /*
  179. * Give demultiplex thread up to 10 seconds to reconnect, should be
  180. * greater than cifs socket timeout which is 7 seconds
  181. */
  182. while (server->tcpStatus == CifsNeedReconnect) {
  183. /*
  184. * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
  185. * here since they are implicitly done when session drops.
  186. */
  187. switch (smb2_command) {
  188. /*
  189. * BB Should we keep oplock break and add flush to exceptions?
  190. */
  191. case SMB2_TREE_DISCONNECT:
  192. case SMB2_CANCEL:
  193. case SMB2_CLOSE:
  194. case SMB2_OPLOCK_BREAK:
  195. return -EAGAIN;
  196. }
  197. rc = wait_event_interruptible_timeout(server->response_q,
  198. (server->tcpStatus != CifsNeedReconnect),
  199. 10 * HZ);
  200. if (rc < 0) {
  201. cifs_dbg(FYI, "%s: aborting reconnect due to a received"
  202. " signal by the process\n", __func__);
  203. return -ERESTARTSYS;
  204. }
  205. /* are we still trying to reconnect? */
  206. if (server->tcpStatus != CifsNeedReconnect)
  207. break;
  208. /*
  209. * on "soft" mounts we wait once. Hard mounts keep
  210. * retrying until process is killed or server comes
  211. * back on-line
  212. */
  213. if (!tcon->retry) {
  214. cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
  215. return -EHOSTDOWN;
  216. }
  217. }
  218. if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
  219. return 0;
  220. nls_codepage = load_nls_default();
  221. /*
  222. * need to prevent multiple threads trying to simultaneously reconnect
  223. * the same SMB session
  224. */
  225. mutex_lock(&tcon->ses->session_mutex);
  226. /*
  227. * Recheck after acquire mutex. If another thread is negotiating
  228. * and the server never sends an answer the socket will be closed
  229. * and tcpStatus set to reconnect.
  230. */
  231. if (server->tcpStatus == CifsNeedReconnect) {
  232. rc = -EHOSTDOWN;
  233. mutex_unlock(&tcon->ses->session_mutex);
  234. goto out;
  235. }
  236. rc = cifs_negotiate_protocol(0, tcon->ses);
  237. if (!rc && tcon->ses->need_reconnect) {
  238. rc = cifs_setup_session(0, tcon->ses, nls_codepage);
  239. if ((rc == -EACCES) && !tcon->retry) {
  240. rc = -EHOSTDOWN;
  241. mutex_unlock(&tcon->ses->session_mutex);
  242. goto failed;
  243. }
  244. }
  245. if (rc || !tcon->need_reconnect) {
  246. mutex_unlock(&tcon->ses->session_mutex);
  247. goto out;
  248. }
  249. cifs_mark_open_files_invalid(tcon);
  250. if (tcon->use_persistent)
  251. tcon->need_reopen_files = true;
  252. rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
  253. mutex_unlock(&tcon->ses->session_mutex);
  254. cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
  255. if (rc) {
  256. /* If sess reconnected but tcon didn't, something strange ... */
  257. printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc);
  258. goto out;
  259. }
  260. if (smb2_command != SMB2_INTERNAL_CMD)
  261. queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
  262. atomic_inc(&tconInfoReconnectCount);
  263. out:
  264. /*
  265. * Check if handle based operation so we know whether we can continue
  266. * or not without returning to caller to reset file handle.
  267. */
  268. /*
  269. * BB Is flush done by server on drop of tcp session? Should we special
  270. * case it and skip above?
  271. */
  272. switch (smb2_command) {
  273. case SMB2_FLUSH:
  274. case SMB2_READ:
  275. case SMB2_WRITE:
  276. case SMB2_LOCK:
  277. case SMB2_IOCTL:
  278. case SMB2_QUERY_DIRECTORY:
  279. case SMB2_CHANGE_NOTIFY:
  280. case SMB2_QUERY_INFO:
  281. case SMB2_SET_INFO:
  282. rc = -EAGAIN;
  283. }
  284. failed:
  285. unload_nls(nls_codepage);
  286. return rc;
  287. }
  288. static void
  289. fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
  290. unsigned int *total_len)
  291. {
  292. struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf;
  293. /* lookup word count ie StructureSize from table */
  294. __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
  295. /*
  296. * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
  297. * largest operations (Create)
  298. */
  299. memset(buf, 0, 256);
  300. smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon);
  301. spdu->StructureSize2 = cpu_to_le16(parmsize);
  302. *total_len = parmsize + sizeof(struct smb2_sync_hdr);
  303. }
  304. /*
  305. * Allocate and return pointer to an SMB request hdr, and set basic
  306. * SMB information in the SMB header. If the return code is zero, this
  307. * function must have filled in request_buf pointer.
  308. */
  309. static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
  310. void **request_buf, unsigned int *total_len)
  311. {
  312. /* BB eventually switch this to SMB2 specific small buf size */
  313. if (smb2_command == SMB2_SET_INFO)
  314. *request_buf = cifs_buf_get();
  315. else
  316. *request_buf = cifs_small_buf_get();
  317. if (*request_buf == NULL) {
  318. /* BB should we add a retry in here if not a writepage? */
  319. return -ENOMEM;
  320. }
  321. fill_small_buf(smb2_command, tcon,
  322. (struct smb2_sync_hdr *)(*request_buf),
  323. total_len);
  324. if (tcon != NULL) {
  325. uint16_t com_code = le16_to_cpu(smb2_command);
  326. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
  327. cifs_stats_inc(&tcon->num_smbs_sent);
  328. }
  329. return 0;
  330. }
  331. static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
  332. void **request_buf, unsigned int *total_len)
  333. {
  334. int rc;
  335. rc = smb2_reconnect(smb2_command, tcon);
  336. if (rc)
  337. return rc;
  338. return __smb2_plain_req_init(smb2_command, tcon, request_buf,
  339. total_len);
  340. }
  341. static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
  342. void **request_buf, unsigned int *total_len)
  343. {
  344. /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
  345. if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
  346. return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf,
  347. total_len);
  348. }
  349. return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len);
  350. }
  351. /* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */
  352. #define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */
  353. #define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
  354. #define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
  355. #define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
  356. static void
  357. build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
  358. {
  359. pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
  360. pneg_ctxt->DataLength = cpu_to_le16(38);
  361. pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
  362. pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE);
  363. get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE);
  364. pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
  365. }
  366. static void
  367. build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
  368. {
  369. pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
  370. pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */
  371. pneg_ctxt->CipherCount = cpu_to_le16(1);
  372. /* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */
  373. pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM;
  374. }
  375. static void
  376. build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
  377. {
  378. pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
  379. pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
  380. }
  381. static void
  382. assemble_neg_contexts(struct smb2_negotiate_req *req,
  383. unsigned int *total_len)
  384. {
  385. char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT;
  386. unsigned int ctxt_len;
  387. *total_len += 2; /* Add 2 due to round to 8 byte boundary for 1st ctxt */
  388. build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
  389. ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
  390. *total_len += ctxt_len;
  391. pneg_ctxt += ctxt_len;
  392. build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
  393. ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
  394. *total_len += ctxt_len;
  395. pneg_ctxt += ctxt_len;
  396. build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
  397. *total_len += sizeof(struct smb2_posix_neg_context);
  398. req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
  399. req->NegotiateContextCount = cpu_to_le16(3);
  400. }
  401. static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
  402. {
  403. unsigned int len = le16_to_cpu(ctxt->DataLength);
  404. /* If invalid preauth context warn but use what we requested, SHA-512 */
  405. if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
  406. printk_once(KERN_WARNING "server sent bad preauth context\n");
  407. return;
  408. } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
  409. pr_warn_once("server sent invalid SaltLength\n");
  410. return;
  411. }
  412. if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
  413. printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
  414. if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
  415. printk_once(KERN_WARNING "unknown SMB3 hash algorithm\n");
  416. }
  417. static int decode_encrypt_ctx(struct TCP_Server_Info *server,
  418. struct smb2_encryption_neg_context *ctxt)
  419. {
  420. unsigned int len = le16_to_cpu(ctxt->DataLength);
  421. cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
  422. if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
  423. printk_once(KERN_WARNING "server sent bad crypto ctxt len\n");
  424. return -EINVAL;
  425. }
  426. if (le16_to_cpu(ctxt->CipherCount) != 1) {
  427. printk_once(KERN_WARNING "illegal SMB3.11 cipher count\n");
  428. return -EINVAL;
  429. }
  430. cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
  431. if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
  432. (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM)) {
  433. printk_once(KERN_WARNING "invalid SMB3.11 cipher returned\n");
  434. return -EINVAL;
  435. }
  436. server->cipher_type = ctxt->Ciphers[0];
  437. server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
  438. return 0;
  439. }
  440. static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
  441. struct TCP_Server_Info *server,
  442. unsigned int len_of_smb)
  443. {
  444. struct smb2_neg_context *pctx;
  445. unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
  446. unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
  447. unsigned int len_of_ctxts, i;
  448. int rc = 0;
  449. cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
  450. if (len_of_smb <= offset) {
  451. cifs_dbg(VFS, "Invalid response: negotiate context offset\n");
  452. return -EINVAL;
  453. }
  454. len_of_ctxts = len_of_smb - offset;
  455. for (i = 0; i < ctxt_cnt; i++) {
  456. int clen;
  457. /* check that offset is not beyond end of SMB */
  458. if (len_of_ctxts == 0)
  459. break;
  460. if (len_of_ctxts < sizeof(struct smb2_neg_context))
  461. break;
  462. pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
  463. clen = le16_to_cpu(pctx->DataLength);
  464. if (clen > len_of_ctxts)
  465. break;
  466. if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
  467. decode_preauth_context(
  468. (struct smb2_preauth_neg_context *)pctx);
  469. else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
  470. rc = decode_encrypt_ctx(server,
  471. (struct smb2_encryption_neg_context *)pctx);
  472. else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
  473. server->posix_ext_supported = true;
  474. else
  475. cifs_dbg(VFS, "unknown negcontext of type %d ignored\n",
  476. le16_to_cpu(pctx->ContextType));
  477. if (rc)
  478. break;
  479. /* offsets must be 8 byte aligned */
  480. clen = (clen + 7) & ~0x7;
  481. offset += clen + sizeof(struct smb2_neg_context);
  482. len_of_ctxts -= clen;
  483. }
  484. return rc;
  485. }
  486. static struct create_posix *
  487. create_posix_buf(umode_t mode)
  488. {
  489. struct create_posix *buf;
  490. buf = kzalloc(sizeof(struct create_posix),
  491. GFP_KERNEL);
  492. if (!buf)
  493. return NULL;
  494. buf->ccontext.DataOffset =
  495. cpu_to_le16(offsetof(struct create_posix, Mode));
  496. buf->ccontext.DataLength = cpu_to_le32(4);
  497. buf->ccontext.NameOffset =
  498. cpu_to_le16(offsetof(struct create_posix, Name));
  499. buf->ccontext.NameLength = cpu_to_le16(16);
  500. /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
  501. buf->Name[0] = 0x93;
  502. buf->Name[1] = 0xAD;
  503. buf->Name[2] = 0x25;
  504. buf->Name[3] = 0x50;
  505. buf->Name[4] = 0x9C;
  506. buf->Name[5] = 0xB4;
  507. buf->Name[6] = 0x11;
  508. buf->Name[7] = 0xE7;
  509. buf->Name[8] = 0xB4;
  510. buf->Name[9] = 0x23;
  511. buf->Name[10] = 0x83;
  512. buf->Name[11] = 0xDE;
  513. buf->Name[12] = 0x96;
  514. buf->Name[13] = 0x8B;
  515. buf->Name[14] = 0xCD;
  516. buf->Name[15] = 0x7C;
  517. buf->Mode = cpu_to_le32(mode);
  518. cifs_dbg(FYI, "mode on posix create 0%o", mode);
  519. return buf;
  520. }
  521. static int
  522. add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
  523. {
  524. struct smb2_create_req *req = iov[0].iov_base;
  525. unsigned int num = *num_iovec;
  526. iov[num].iov_base = create_posix_buf(mode);
  527. if (iov[num].iov_base == NULL)
  528. return -ENOMEM;
  529. iov[num].iov_len = sizeof(struct create_posix);
  530. if (!req->CreateContextsOffset)
  531. req->CreateContextsOffset = cpu_to_le32(
  532. sizeof(struct smb2_create_req) +
  533. iov[num - 1].iov_len);
  534. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
  535. *num_iovec = num + 1;
  536. return 0;
  537. }
  538. /*
  539. *
  540. * SMB2 Worker functions follow:
  541. *
  542. * The general structure of the worker functions is:
  543. * 1) Call smb2_init (assembles SMB2 header)
  544. * 2) Initialize SMB2 command specific fields in fixed length area of SMB
  545. * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
  546. * 4) Decode SMB2 command specific fields in the fixed length area
  547. * 5) Decode variable length data area (if any for this SMB2 command type)
  548. * 6) Call free smb buffer
  549. * 7) return
  550. *
  551. */
  552. int
  553. SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
  554. {
  555. struct smb_rqst rqst;
  556. struct smb2_negotiate_req *req;
  557. struct smb2_negotiate_rsp *rsp;
  558. struct kvec iov[1];
  559. struct kvec rsp_iov;
  560. int rc = 0;
  561. int resp_buftype;
  562. struct TCP_Server_Info *server = ses->server;
  563. int blob_offset, blob_length;
  564. char *security_blob;
  565. int flags = CIFS_NEG_OP;
  566. unsigned int total_len;
  567. cifs_dbg(FYI, "Negotiate protocol\n");
  568. if (!server) {
  569. WARN(1, "%s: server is NULL!\n", __func__);
  570. return -EIO;
  571. }
  572. rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
  573. if (rc)
  574. return rc;
  575. req->sync_hdr.SessionId = 0;
  576. memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
  577. memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
  578. if (strcmp(ses->server->vals->version_string,
  579. SMB3ANY_VERSION_STRING) == 0) {
  580. req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
  581. req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
  582. req->DialectCount = cpu_to_le16(2);
  583. total_len += 4;
  584. } else if (strcmp(ses->server->vals->version_string,
  585. SMBDEFAULT_VERSION_STRING) == 0) {
  586. req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
  587. req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
  588. req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
  589. req->DialectCount = cpu_to_le16(3);
  590. total_len += 6;
  591. } else {
  592. /* otherwise send specific dialect */
  593. req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
  594. req->DialectCount = cpu_to_le16(1);
  595. total_len += 2;
  596. }
  597. /* only one of SMB2 signing flags may be set in SMB2 request */
  598. if (ses->sign)
  599. req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
  600. else if (global_secflags & CIFSSEC_MAY_SIGN)
  601. req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
  602. else
  603. req->SecurityMode = 0;
  604. req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
  605. /* ClientGUID must be zero for SMB2.02 dialect */
  606. if (ses->server->vals->protocol_id == SMB20_PROT_ID)
  607. memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
  608. else {
  609. memcpy(req->ClientGUID, server->client_guid,
  610. SMB2_CLIENT_GUID_SIZE);
  611. if (ses->server->vals->protocol_id == SMB311_PROT_ID)
  612. assemble_neg_contexts(req, &total_len);
  613. }
  614. iov[0].iov_base = (char *)req;
  615. iov[0].iov_len = total_len;
  616. memset(&rqst, 0, sizeof(struct smb_rqst));
  617. rqst.rq_iov = iov;
  618. rqst.rq_nvec = 1;
  619. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  620. cifs_small_buf_release(req);
  621. rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
  622. /*
  623. * No tcon so can't do
  624. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  625. */
  626. if (rc == -EOPNOTSUPP) {
  627. cifs_dbg(VFS, "Dialect not supported by server. Consider "
  628. "specifying vers=1.0 or vers=2.0 on mount for accessing"
  629. " older servers\n");
  630. goto neg_exit;
  631. } else if (rc != 0)
  632. goto neg_exit;
  633. if (strcmp(ses->server->vals->version_string,
  634. SMB3ANY_VERSION_STRING) == 0) {
  635. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
  636. cifs_dbg(VFS,
  637. "SMB2 dialect returned but not requested\n");
  638. return -EIO;
  639. } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
  640. cifs_dbg(VFS,
  641. "SMB2.1 dialect returned but not requested\n");
  642. return -EIO;
  643. }
  644. } else if (strcmp(ses->server->vals->version_string,
  645. SMBDEFAULT_VERSION_STRING) == 0) {
  646. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
  647. cifs_dbg(VFS,
  648. "SMB2 dialect returned but not requested\n");
  649. return -EIO;
  650. } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
  651. /* ops set to 3.0 by default for default so update */
  652. ses->server->ops = &smb21_operations;
  653. ses->server->vals = &smb21_values;
  654. }
  655. } else if (le16_to_cpu(rsp->DialectRevision) !=
  656. ses->server->vals->protocol_id) {
  657. /* if requested single dialect ensure returned dialect matched */
  658. cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
  659. le16_to_cpu(rsp->DialectRevision));
  660. return -EIO;
  661. }
  662. cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
  663. if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
  664. cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
  665. else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
  666. cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
  667. else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
  668. cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
  669. else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
  670. cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
  671. else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
  672. cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
  673. else {
  674. cifs_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
  675. le16_to_cpu(rsp->DialectRevision));
  676. rc = -EIO;
  677. goto neg_exit;
  678. }
  679. server->dialect = le16_to_cpu(rsp->DialectRevision);
  680. /*
  681. * Keep a copy of the hash after negprot. This hash will be
  682. * the starting hash value for all sessions made from this
  683. * server.
  684. */
  685. memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
  686. SMB2_PREAUTH_HASH_SIZE);
  687. /* SMB2 only has an extended negflavor */
  688. server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
  689. /* set it to the maximum buffer size value we can send with 1 credit */
  690. server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
  691. SMB2_MAX_BUFFER_SIZE);
  692. server->max_read = le32_to_cpu(rsp->MaxReadSize);
  693. server->max_write = le32_to_cpu(rsp->MaxWriteSize);
  694. server->sec_mode = le16_to_cpu(rsp->SecurityMode);
  695. if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
  696. cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
  697. server->sec_mode);
  698. server->capabilities = le32_to_cpu(rsp->Capabilities);
  699. /* Internal types */
  700. server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
  701. security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
  702. (struct smb2_sync_hdr *)rsp);
  703. /*
  704. * See MS-SMB2 section 2.2.4: if no blob, client picks default which
  705. * for us will be
  706. * ses->sectype = RawNTLMSSP;
  707. * but for time being this is our only auth choice so doesn't matter.
  708. * We just found a server which sets blob length to zero expecting raw.
  709. */
  710. if (blob_length == 0) {
  711. cifs_dbg(FYI, "missing security blob on negprot\n");
  712. server->sec_ntlmssp = true;
  713. }
  714. rc = cifs_enable_signing(server, ses->sign);
  715. if (rc)
  716. goto neg_exit;
  717. if (blob_length) {
  718. rc = decode_negTokenInit(security_blob, blob_length, server);
  719. if (rc == 1)
  720. rc = 0;
  721. else if (rc == 0)
  722. rc = -EIO;
  723. }
  724. if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
  725. if (rsp->NegotiateContextCount)
  726. rc = smb311_decode_neg_context(rsp, server,
  727. rsp_iov.iov_len);
  728. else
  729. cifs_dbg(VFS, "Missing expected negotiate contexts\n");
  730. }
  731. neg_exit:
  732. free_rsp_buf(resp_buftype, rsp);
  733. return rc;
  734. }
  735. int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
  736. {
  737. int rc;
  738. struct validate_negotiate_info_req *pneg_inbuf;
  739. struct validate_negotiate_info_rsp *pneg_rsp = NULL;
  740. u32 rsplen;
  741. u32 inbuflen; /* max of 4 dialects */
  742. cifs_dbg(FYI, "validate negotiate\n");
  743. /* In SMB3.11 preauth integrity supersedes validate negotiate */
  744. if (tcon->ses->server->dialect == SMB311_PROT_ID)
  745. return 0;
  746. /*
  747. * validation ioctl must be signed, so no point sending this if we
  748. * can not sign it (ie are not known user). Even if signing is not
  749. * required (enabled but not negotiated), in those cases we selectively
  750. * sign just this, the first and only signed request on a connection.
  751. * Having validation of negotiate info helps reduce attack vectors.
  752. */
  753. if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
  754. return 0; /* validation requires signing */
  755. if (tcon->ses->user_name == NULL) {
  756. cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
  757. return 0; /* validation requires signing */
  758. }
  759. if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
  760. cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
  761. pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
  762. if (!pneg_inbuf)
  763. return -ENOMEM;
  764. pneg_inbuf->Capabilities =
  765. cpu_to_le32(tcon->ses->server->vals->req_capabilities);
  766. memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid,
  767. SMB2_CLIENT_GUID_SIZE);
  768. if (tcon->ses->sign)
  769. pneg_inbuf->SecurityMode =
  770. cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
  771. else if (global_secflags & CIFSSEC_MAY_SIGN)
  772. pneg_inbuf->SecurityMode =
  773. cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
  774. else
  775. pneg_inbuf->SecurityMode = 0;
  776. if (strcmp(tcon->ses->server->vals->version_string,
  777. SMB3ANY_VERSION_STRING) == 0) {
  778. pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
  779. pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
  780. pneg_inbuf->DialectCount = cpu_to_le16(2);
  781. /* structure is big enough for 3 dialects, sending only 2 */
  782. inbuflen = sizeof(*pneg_inbuf) -
  783. sizeof(pneg_inbuf->Dialects[0]);
  784. } else if (strcmp(tcon->ses->server->vals->version_string,
  785. SMBDEFAULT_VERSION_STRING) == 0) {
  786. pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
  787. pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
  788. pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
  789. pneg_inbuf->DialectCount = cpu_to_le16(3);
  790. /* structure is big enough for 3 dialects */
  791. inbuflen = sizeof(*pneg_inbuf);
  792. } else {
  793. /* otherwise specific dialect was requested */
  794. pneg_inbuf->Dialects[0] =
  795. cpu_to_le16(tcon->ses->server->vals->protocol_id);
  796. pneg_inbuf->DialectCount = cpu_to_le16(1);
  797. /* structure is big enough for 3 dialects, sending only 1 */
  798. inbuflen = sizeof(*pneg_inbuf) -
  799. sizeof(pneg_inbuf->Dialects[0]) * 2;
  800. }
  801. rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
  802. FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
  803. (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
  804. if (rc == -EOPNOTSUPP) {
  805. /*
  806. * Old Windows versions or Netapp SMB server can return
  807. * not supported error. Client should accept it.
  808. */
  809. cifs_dbg(VFS, "Server does not support validate negotiate\n");
  810. rc = 0;
  811. goto out_free_inbuf;
  812. } else if (rc != 0) {
  813. cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
  814. rc = -EIO;
  815. goto out_free_inbuf;
  816. }
  817. rc = -EIO;
  818. if (rsplen != sizeof(*pneg_rsp)) {
  819. cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
  820. rsplen);
  821. /* relax check since Mac returns max bufsize allowed on ioctl */
  822. if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
  823. goto out_free_rsp;
  824. }
  825. /* check validate negotiate info response matches what we got earlier */
  826. if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
  827. goto vneg_out;
  828. if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
  829. goto vneg_out;
  830. /* do not validate server guid because not saved at negprot time yet */
  831. if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
  832. SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
  833. goto vneg_out;
  834. /* validate negotiate successful */
  835. rc = 0;
  836. cifs_dbg(FYI, "validate negotiate info successful\n");
  837. goto out_free_rsp;
  838. vneg_out:
  839. cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
  840. out_free_rsp:
  841. kfree(pneg_rsp);
  842. out_free_inbuf:
  843. kfree(pneg_inbuf);
  844. return rc;
  845. }
  846. enum securityEnum
  847. smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
  848. {
  849. switch (requested) {
  850. case Kerberos:
  851. case RawNTLMSSP:
  852. return requested;
  853. case NTLMv2:
  854. return RawNTLMSSP;
  855. case Unspecified:
  856. if (server->sec_ntlmssp &&
  857. (global_secflags & CIFSSEC_MAY_NTLMSSP))
  858. return RawNTLMSSP;
  859. if ((server->sec_kerberos || server->sec_mskerberos) &&
  860. (global_secflags & CIFSSEC_MAY_KRB5))
  861. return Kerberos;
  862. /* Fallthrough */
  863. default:
  864. return Unspecified;
  865. }
  866. }
  867. struct SMB2_sess_data {
  868. unsigned int xid;
  869. struct cifs_ses *ses;
  870. struct nls_table *nls_cp;
  871. void (*func)(struct SMB2_sess_data *);
  872. int result;
  873. u64 previous_session;
  874. /* we will send the SMB in three pieces:
  875. * a fixed length beginning part, an optional
  876. * SPNEGO blob (which can be zero length), and a
  877. * last part which will include the strings
  878. * and rest of bcc area. This allows us to avoid
  879. * a large buffer 17K allocation
  880. */
  881. int buf0_type;
  882. struct kvec iov[2];
  883. };
  884. static int
  885. SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
  886. {
  887. int rc;
  888. struct cifs_ses *ses = sess_data->ses;
  889. struct smb2_sess_setup_req *req;
  890. struct TCP_Server_Info *server = ses->server;
  891. unsigned int total_len;
  892. rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
  893. &total_len);
  894. if (rc)
  895. return rc;
  896. /* First session, not a reauthenticate */
  897. req->sync_hdr.SessionId = 0;
  898. /* if reconnect, we need to send previous sess id, otherwise it is 0 */
  899. req->PreviousSessionId = sess_data->previous_session;
  900. req->Flags = 0; /* MBZ */
  901. /* enough to enable echos and oplocks and one max size write */
  902. req->sync_hdr.CreditRequest = cpu_to_le16(130);
  903. /* only one of SMB2 signing flags may be set in SMB2 request */
  904. if (server->sign)
  905. req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
  906. else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
  907. req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
  908. else
  909. req->SecurityMode = 0;
  910. #ifdef CONFIG_CIFS_DFS_UPCALL
  911. req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
  912. #else
  913. req->Capabilities = 0;
  914. #endif /* DFS_UPCALL */
  915. req->Channel = 0; /* MBZ */
  916. sess_data->iov[0].iov_base = (char *)req;
  917. /* 1 for pad */
  918. sess_data->iov[0].iov_len = total_len - 1;
  919. /*
  920. * This variable will be used to clear the buffer
  921. * allocated above in case of any error in the calling function.
  922. */
  923. sess_data->buf0_type = CIFS_SMALL_BUFFER;
  924. return 0;
  925. }
  926. static void
  927. SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
  928. {
  929. free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
  930. sess_data->buf0_type = CIFS_NO_BUFFER;
  931. }
  932. static int
  933. SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
  934. {
  935. int rc;
  936. struct smb_rqst rqst;
  937. struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
  938. struct kvec rsp_iov = { NULL, 0 };
  939. /* Testing shows that buffer offset must be at location of Buffer[0] */
  940. req->SecurityBufferOffset =
  941. cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
  942. req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
  943. memset(&rqst, 0, sizeof(struct smb_rqst));
  944. rqst.rq_iov = sess_data->iov;
  945. rqst.rq_nvec = 2;
  946. /* BB add code to build os and lm fields */
  947. rc = cifs_send_recv(sess_data->xid, sess_data->ses,
  948. &rqst,
  949. &sess_data->buf0_type,
  950. CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
  951. cifs_small_buf_release(sess_data->iov[0].iov_base);
  952. memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
  953. return rc;
  954. }
  955. static int
  956. SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
  957. {
  958. int rc = 0;
  959. struct cifs_ses *ses = sess_data->ses;
  960. mutex_lock(&ses->server->srv_mutex);
  961. if (ses->server->ops->generate_signingkey) {
  962. rc = ses->server->ops->generate_signingkey(ses);
  963. if (rc) {
  964. cifs_dbg(FYI,
  965. "SMB3 session key generation failed\n");
  966. mutex_unlock(&ses->server->srv_mutex);
  967. return rc;
  968. }
  969. }
  970. if (!ses->server->session_estab) {
  971. ses->server->sequence_number = 0x2;
  972. ses->server->session_estab = true;
  973. }
  974. mutex_unlock(&ses->server->srv_mutex);
  975. cifs_dbg(FYI, "SMB2/3 session established successfully\n");
  976. spin_lock(&GlobalMid_Lock);
  977. ses->status = CifsGood;
  978. ses->need_reconnect = false;
  979. spin_unlock(&GlobalMid_Lock);
  980. return rc;
  981. }
  982. #ifdef CONFIG_CIFS_UPCALL
  983. static void
  984. SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
  985. {
  986. int rc;
  987. struct cifs_ses *ses = sess_data->ses;
  988. struct cifs_spnego_msg *msg;
  989. struct key *spnego_key = NULL;
  990. struct smb2_sess_setup_rsp *rsp = NULL;
  991. rc = SMB2_sess_alloc_buffer(sess_data);
  992. if (rc)
  993. goto out;
  994. spnego_key = cifs_get_spnego_key(ses);
  995. if (IS_ERR(spnego_key)) {
  996. rc = PTR_ERR(spnego_key);
  997. if (rc == -ENOKEY)
  998. cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
  999. spnego_key = NULL;
  1000. goto out;
  1001. }
  1002. msg = spnego_key->payload.data[0];
  1003. /*
  1004. * check version field to make sure that cifs.upcall is
  1005. * sending us a response in an expected form
  1006. */
  1007. if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
  1008. cifs_dbg(VFS,
  1009. "bad cifs.upcall version. Expected %d got %d",
  1010. CIFS_SPNEGO_UPCALL_VERSION, msg->version);
  1011. rc = -EKEYREJECTED;
  1012. goto out_put_spnego_key;
  1013. }
  1014. ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
  1015. GFP_KERNEL);
  1016. if (!ses->auth_key.response) {
  1017. cifs_dbg(VFS,
  1018. "Kerberos can't allocate (%u bytes) memory",
  1019. msg->sesskey_len);
  1020. rc = -ENOMEM;
  1021. goto out_put_spnego_key;
  1022. }
  1023. ses->auth_key.len = msg->sesskey_len;
  1024. sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
  1025. sess_data->iov[1].iov_len = msg->secblob_len;
  1026. rc = SMB2_sess_sendreceive(sess_data);
  1027. if (rc)
  1028. goto out_put_spnego_key;
  1029. rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
  1030. ses->Suid = rsp->sync_hdr.SessionId;
  1031. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  1032. rc = SMB2_sess_establish_session(sess_data);
  1033. out_put_spnego_key:
  1034. key_invalidate(spnego_key);
  1035. key_put(spnego_key);
  1036. out:
  1037. sess_data->result = rc;
  1038. sess_data->func = NULL;
  1039. SMB2_sess_free_buffer(sess_data);
  1040. }
  1041. #else
  1042. static void
  1043. SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
  1044. {
  1045. cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
  1046. sess_data->result = -EOPNOTSUPP;
  1047. sess_data->func = NULL;
  1048. }
  1049. #endif
  1050. static void
  1051. SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
  1052. static void
  1053. SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
  1054. {
  1055. int rc;
  1056. struct cifs_ses *ses = sess_data->ses;
  1057. struct smb2_sess_setup_rsp *rsp = NULL;
  1058. char *ntlmssp_blob = NULL;
  1059. bool use_spnego = false; /* else use raw ntlmssp */
  1060. u16 blob_length = 0;
  1061. /*
  1062. * If memory allocation is successful, caller of this function
  1063. * frees it.
  1064. */
  1065. ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
  1066. if (!ses->ntlmssp) {
  1067. rc = -ENOMEM;
  1068. goto out_err;
  1069. }
  1070. ses->ntlmssp->sesskey_per_smbsess = true;
  1071. rc = SMB2_sess_alloc_buffer(sess_data);
  1072. if (rc)
  1073. goto out_err;
  1074. ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
  1075. GFP_KERNEL);
  1076. if (ntlmssp_blob == NULL) {
  1077. rc = -ENOMEM;
  1078. goto out;
  1079. }
  1080. build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
  1081. if (use_spnego) {
  1082. /* BB eventually need to add this */
  1083. cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
  1084. rc = -EOPNOTSUPP;
  1085. goto out;
  1086. } else {
  1087. blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
  1088. /* with raw NTLMSSP we don't encapsulate in SPNEGO */
  1089. }
  1090. sess_data->iov[1].iov_base = ntlmssp_blob;
  1091. sess_data->iov[1].iov_len = blob_length;
  1092. rc = SMB2_sess_sendreceive(sess_data);
  1093. rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
  1094. /* If true, rc here is expected and not an error */
  1095. if (sess_data->buf0_type != CIFS_NO_BUFFER &&
  1096. rsp->sync_hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
  1097. rc = 0;
  1098. if (rc)
  1099. goto out;
  1100. if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
  1101. le16_to_cpu(rsp->SecurityBufferOffset)) {
  1102. cifs_dbg(VFS, "Invalid security buffer offset %d\n",
  1103. le16_to_cpu(rsp->SecurityBufferOffset));
  1104. rc = -EIO;
  1105. goto out;
  1106. }
  1107. rc = decode_ntlmssp_challenge(rsp->Buffer,
  1108. le16_to_cpu(rsp->SecurityBufferLength), ses);
  1109. if (rc)
  1110. goto out;
  1111. cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
  1112. ses->Suid = rsp->sync_hdr.SessionId;
  1113. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  1114. out:
  1115. kfree(ntlmssp_blob);
  1116. SMB2_sess_free_buffer(sess_data);
  1117. if (!rc) {
  1118. sess_data->result = 0;
  1119. sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
  1120. return;
  1121. }
  1122. out_err:
  1123. kfree(ses->ntlmssp);
  1124. ses->ntlmssp = NULL;
  1125. sess_data->result = rc;
  1126. sess_data->func = NULL;
  1127. }
  1128. static void
  1129. SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
  1130. {
  1131. int rc;
  1132. struct cifs_ses *ses = sess_data->ses;
  1133. struct smb2_sess_setup_req *req;
  1134. struct smb2_sess_setup_rsp *rsp = NULL;
  1135. unsigned char *ntlmssp_blob = NULL;
  1136. bool use_spnego = false; /* else use raw ntlmssp */
  1137. u16 blob_length = 0;
  1138. rc = SMB2_sess_alloc_buffer(sess_data);
  1139. if (rc)
  1140. goto out;
  1141. req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
  1142. req->sync_hdr.SessionId = ses->Suid;
  1143. rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
  1144. sess_data->nls_cp);
  1145. if (rc) {
  1146. cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
  1147. goto out;
  1148. }
  1149. if (use_spnego) {
  1150. /* BB eventually need to add this */
  1151. cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
  1152. rc = -EOPNOTSUPP;
  1153. goto out;
  1154. }
  1155. sess_data->iov[1].iov_base = ntlmssp_blob;
  1156. sess_data->iov[1].iov_len = blob_length;
  1157. rc = SMB2_sess_sendreceive(sess_data);
  1158. if (rc)
  1159. goto out;
  1160. rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
  1161. ses->Suid = rsp->sync_hdr.SessionId;
  1162. ses->session_flags = le16_to_cpu(rsp->SessionFlags);
  1163. rc = SMB2_sess_establish_session(sess_data);
  1164. out:
  1165. kfree(ntlmssp_blob);
  1166. SMB2_sess_free_buffer(sess_data);
  1167. kfree(ses->ntlmssp);
  1168. ses->ntlmssp = NULL;
  1169. sess_data->result = rc;
  1170. sess_data->func = NULL;
  1171. }
  1172. static int
  1173. SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
  1174. {
  1175. int type;
  1176. type = smb2_select_sectype(ses->server, ses->sectype);
  1177. cifs_dbg(FYI, "sess setup type %d\n", type);
  1178. if (type == Unspecified) {
  1179. cifs_dbg(VFS,
  1180. "Unable to select appropriate authentication method!");
  1181. return -EINVAL;
  1182. }
  1183. switch (type) {
  1184. case Kerberos:
  1185. sess_data->func = SMB2_auth_kerberos;
  1186. break;
  1187. case RawNTLMSSP:
  1188. sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
  1189. break;
  1190. default:
  1191. cifs_dbg(VFS, "secType %d not supported!\n", type);
  1192. return -EOPNOTSUPP;
  1193. }
  1194. return 0;
  1195. }
  1196. int
  1197. SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
  1198. const struct nls_table *nls_cp)
  1199. {
  1200. int rc = 0;
  1201. struct TCP_Server_Info *server = ses->server;
  1202. struct SMB2_sess_data *sess_data;
  1203. cifs_dbg(FYI, "Session Setup\n");
  1204. if (!server) {
  1205. WARN(1, "%s: server is NULL!\n", __func__);
  1206. return -EIO;
  1207. }
  1208. sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
  1209. if (!sess_data)
  1210. return -ENOMEM;
  1211. rc = SMB2_select_sec(ses, sess_data);
  1212. if (rc)
  1213. goto out;
  1214. sess_data->xid = xid;
  1215. sess_data->ses = ses;
  1216. sess_data->buf0_type = CIFS_NO_BUFFER;
  1217. sess_data->nls_cp = (struct nls_table *) nls_cp;
  1218. sess_data->previous_session = ses->Suid;
  1219. /*
  1220. * Initialize the session hash with the server one.
  1221. */
  1222. memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
  1223. SMB2_PREAUTH_HASH_SIZE);
  1224. while (sess_data->func)
  1225. sess_data->func(sess_data);
  1226. if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
  1227. cifs_dbg(VFS, "signing requested but authenticated as guest\n");
  1228. rc = sess_data->result;
  1229. out:
  1230. kfree(sess_data);
  1231. return rc;
  1232. }
  1233. int
  1234. SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
  1235. {
  1236. struct smb_rqst rqst;
  1237. struct smb2_logoff_req *req; /* response is also trivial struct */
  1238. int rc = 0;
  1239. struct TCP_Server_Info *server;
  1240. int flags = 0;
  1241. unsigned int total_len;
  1242. struct kvec iov[1];
  1243. struct kvec rsp_iov;
  1244. int resp_buf_type;
  1245. cifs_dbg(FYI, "disconnect session %p\n", ses);
  1246. if (ses && (ses->server))
  1247. server = ses->server;
  1248. else
  1249. return -EIO;
  1250. /* no need to send SMB logoff if uid already closed due to reconnect */
  1251. if (ses->need_reconnect)
  1252. goto smb2_session_already_dead;
  1253. rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
  1254. if (rc)
  1255. return rc;
  1256. /* since no tcon, smb2_init can not do this, so do here */
  1257. req->sync_hdr.SessionId = ses->Suid;
  1258. if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
  1259. flags |= CIFS_TRANSFORM_REQ;
  1260. else if (server->sign)
  1261. req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
  1262. flags |= CIFS_NO_RESP;
  1263. iov[0].iov_base = (char *)req;
  1264. iov[0].iov_len = total_len;
  1265. memset(&rqst, 0, sizeof(struct smb_rqst));
  1266. rqst.rq_iov = iov;
  1267. rqst.rq_nvec = 1;
  1268. rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
  1269. cifs_small_buf_release(req);
  1270. /*
  1271. * No tcon so can't do
  1272. * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
  1273. */
  1274. smb2_session_already_dead:
  1275. return rc;
  1276. }
  1277. static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
  1278. {
  1279. cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
  1280. }
  1281. #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
  1282. /* These are similar values to what Windows uses */
  1283. static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
  1284. {
  1285. tcon->max_chunks = 256;
  1286. tcon->max_bytes_chunk = 1048576;
  1287. tcon->max_bytes_copy = 16777216;
  1288. }
  1289. int
  1290. SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
  1291. struct cifs_tcon *tcon, const struct nls_table *cp)
  1292. {
  1293. struct smb_rqst rqst;
  1294. struct smb2_tree_connect_req *req;
  1295. struct smb2_tree_connect_rsp *rsp = NULL;
  1296. struct kvec iov[2];
  1297. struct kvec rsp_iov = { NULL, 0 };
  1298. int rc = 0;
  1299. int resp_buftype;
  1300. int unc_path_len;
  1301. __le16 *unc_path = NULL;
  1302. int flags = 0;
  1303. unsigned int total_len;
  1304. cifs_dbg(FYI, "TCON\n");
  1305. if (!(ses->server) || !tree)
  1306. return -EIO;
  1307. unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
  1308. if (unc_path == NULL)
  1309. return -ENOMEM;
  1310. unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
  1311. unc_path_len *= 2;
  1312. if (unc_path_len < 2) {
  1313. kfree(unc_path);
  1314. return -EINVAL;
  1315. }
  1316. /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
  1317. tcon->tid = 0;
  1318. rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
  1319. &total_len);
  1320. if (rc) {
  1321. kfree(unc_path);
  1322. return rc;
  1323. }
  1324. if (smb3_encryption_required(tcon))
  1325. flags |= CIFS_TRANSFORM_REQ;
  1326. iov[0].iov_base = (char *)req;
  1327. /* 1 for pad */
  1328. iov[0].iov_len = total_len - 1;
  1329. /* Testing shows that buffer offset must be at location of Buffer[0] */
  1330. req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
  1331. - 1 /* pad */);
  1332. req->PathLength = cpu_to_le16(unc_path_len - 2);
  1333. iov[1].iov_base = unc_path;
  1334. iov[1].iov_len = unc_path_len;
  1335. /*
  1336. * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
  1337. * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
  1338. * (Samba servers don't always set the flag so also check if null user)
  1339. */
  1340. if ((ses->server->dialect == SMB311_PROT_ID) &&
  1341. !smb3_encryption_required(tcon) &&
  1342. !(ses->session_flags &
  1343. (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
  1344. ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
  1345. req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
  1346. memset(&rqst, 0, sizeof(struct smb_rqst));
  1347. rqst.rq_iov = iov;
  1348. rqst.rq_nvec = 2;
  1349. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  1350. cifs_small_buf_release(req);
  1351. rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
  1352. if (rc != 0) {
  1353. if (tcon) {
  1354. cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
  1355. tcon->need_reconnect = true;
  1356. }
  1357. goto tcon_error_exit;
  1358. }
  1359. switch (rsp->ShareType) {
  1360. case SMB2_SHARE_TYPE_DISK:
  1361. cifs_dbg(FYI, "connection to disk share\n");
  1362. break;
  1363. case SMB2_SHARE_TYPE_PIPE:
  1364. tcon->pipe = true;
  1365. cifs_dbg(FYI, "connection to pipe share\n");
  1366. break;
  1367. case SMB2_SHARE_TYPE_PRINT:
  1368. tcon->print = true;
  1369. cifs_dbg(FYI, "connection to printer\n");
  1370. break;
  1371. default:
  1372. cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
  1373. rc = -EOPNOTSUPP;
  1374. goto tcon_error_exit;
  1375. }
  1376. tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
  1377. tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
  1378. tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
  1379. tcon->tidStatus = CifsGood;
  1380. tcon->need_reconnect = false;
  1381. tcon->tid = rsp->sync_hdr.TreeId;
  1382. strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
  1383. if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
  1384. ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
  1385. cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
  1386. if (tcon->seal &&
  1387. !(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
  1388. cifs_dbg(VFS, "Encryption is requested but not supported\n");
  1389. init_copy_chunk_defaults(tcon);
  1390. if (tcon->ses->server->ops->validate_negotiate)
  1391. rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
  1392. tcon_exit:
  1393. free_rsp_buf(resp_buftype, rsp);
  1394. kfree(unc_path);
  1395. return rc;
  1396. tcon_error_exit:
  1397. if (rsp && rsp->sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
  1398. cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
  1399. }
  1400. goto tcon_exit;
  1401. }
  1402. int
  1403. SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
  1404. {
  1405. struct smb_rqst rqst;
  1406. struct smb2_tree_disconnect_req *req; /* response is trivial */
  1407. int rc = 0;
  1408. struct cifs_ses *ses = tcon->ses;
  1409. int flags = 0;
  1410. unsigned int total_len;
  1411. struct kvec iov[1];
  1412. struct kvec rsp_iov;
  1413. int resp_buf_type;
  1414. cifs_dbg(FYI, "Tree Disconnect\n");
  1415. if (!ses || !(ses->server))
  1416. return -EIO;
  1417. if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
  1418. return 0;
  1419. rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
  1420. &total_len);
  1421. if (rc)
  1422. return rc;
  1423. if (smb3_encryption_required(tcon))
  1424. flags |= CIFS_TRANSFORM_REQ;
  1425. flags |= CIFS_NO_RESP;
  1426. iov[0].iov_base = (char *)req;
  1427. iov[0].iov_len = total_len;
  1428. memset(&rqst, 0, sizeof(struct smb_rqst));
  1429. rqst.rq_iov = iov;
  1430. rqst.rq_nvec = 1;
  1431. rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
  1432. cifs_small_buf_release(req);
  1433. if (rc)
  1434. cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
  1435. return rc;
  1436. }
  1437. static struct create_durable *
  1438. create_durable_buf(void)
  1439. {
  1440. struct create_durable *buf;
  1441. buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
  1442. if (!buf)
  1443. return NULL;
  1444. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1445. (struct create_durable, Data));
  1446. buf->ccontext.DataLength = cpu_to_le32(16);
  1447. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1448. (struct create_durable, Name));
  1449. buf->ccontext.NameLength = cpu_to_le16(4);
  1450. /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
  1451. buf->Name[0] = 'D';
  1452. buf->Name[1] = 'H';
  1453. buf->Name[2] = 'n';
  1454. buf->Name[3] = 'Q';
  1455. return buf;
  1456. }
  1457. static struct create_durable *
  1458. create_reconnect_durable_buf(struct cifs_fid *fid)
  1459. {
  1460. struct create_durable *buf;
  1461. buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
  1462. if (!buf)
  1463. return NULL;
  1464. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1465. (struct create_durable, Data));
  1466. buf->ccontext.DataLength = cpu_to_le32(16);
  1467. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1468. (struct create_durable, Name));
  1469. buf->ccontext.NameLength = cpu_to_le16(4);
  1470. buf->Data.Fid.PersistentFileId = fid->persistent_fid;
  1471. buf->Data.Fid.VolatileFileId = fid->volatile_fid;
  1472. /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
  1473. buf->Name[0] = 'D';
  1474. buf->Name[1] = 'H';
  1475. buf->Name[2] = 'n';
  1476. buf->Name[3] = 'C';
  1477. return buf;
  1478. }
  1479. static __u8
  1480. parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
  1481. unsigned int *epoch, char *lease_key)
  1482. {
  1483. char *data_offset;
  1484. struct create_context *cc;
  1485. unsigned int next;
  1486. unsigned int remaining;
  1487. char *name;
  1488. data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
  1489. remaining = le32_to_cpu(rsp->CreateContextsLength);
  1490. cc = (struct create_context *)data_offset;
  1491. while (remaining >= sizeof(struct create_context)) {
  1492. name = le16_to_cpu(cc->NameOffset) + (char *)cc;
  1493. if (le16_to_cpu(cc->NameLength) == 4 &&
  1494. strncmp(name, "RqLs", 4) == 0)
  1495. return server->ops->parse_lease_buf(cc, epoch,
  1496. lease_key);
  1497. next = le32_to_cpu(cc->Next);
  1498. if (!next)
  1499. break;
  1500. remaining -= next;
  1501. cc = (struct create_context *)((char *)cc + next);
  1502. }
  1503. return 0;
  1504. }
  1505. static int
  1506. add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
  1507. unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
  1508. {
  1509. struct smb2_create_req *req = iov[0].iov_base;
  1510. unsigned int num = *num_iovec;
  1511. iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
  1512. if (iov[num].iov_base == NULL)
  1513. return -ENOMEM;
  1514. iov[num].iov_len = server->vals->create_lease_size;
  1515. req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
  1516. if (!req->CreateContextsOffset)
  1517. req->CreateContextsOffset = cpu_to_le32(
  1518. sizeof(struct smb2_create_req) +
  1519. iov[num - 1].iov_len);
  1520. le32_add_cpu(&req->CreateContextsLength,
  1521. server->vals->create_lease_size);
  1522. *num_iovec = num + 1;
  1523. return 0;
  1524. }
  1525. static struct create_durable_v2 *
  1526. create_durable_v2_buf(struct cifs_fid *pfid)
  1527. {
  1528. struct create_durable_v2 *buf;
  1529. buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
  1530. if (!buf)
  1531. return NULL;
  1532. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1533. (struct create_durable_v2, dcontext));
  1534. buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
  1535. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1536. (struct create_durable_v2, Name));
  1537. buf->ccontext.NameLength = cpu_to_le16(4);
  1538. buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
  1539. buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
  1540. generate_random_uuid(buf->dcontext.CreateGuid);
  1541. memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
  1542. /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
  1543. buf->Name[0] = 'D';
  1544. buf->Name[1] = 'H';
  1545. buf->Name[2] = '2';
  1546. buf->Name[3] = 'Q';
  1547. return buf;
  1548. }
  1549. static struct create_durable_handle_reconnect_v2 *
  1550. create_reconnect_durable_v2_buf(struct cifs_fid *fid)
  1551. {
  1552. struct create_durable_handle_reconnect_v2 *buf;
  1553. buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
  1554. GFP_KERNEL);
  1555. if (!buf)
  1556. return NULL;
  1557. buf->ccontext.DataOffset =
  1558. cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
  1559. dcontext));
  1560. buf->ccontext.DataLength =
  1561. cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
  1562. buf->ccontext.NameOffset =
  1563. cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
  1564. Name));
  1565. buf->ccontext.NameLength = cpu_to_le16(4);
  1566. buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
  1567. buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
  1568. buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
  1569. memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
  1570. /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
  1571. buf->Name[0] = 'D';
  1572. buf->Name[1] = 'H';
  1573. buf->Name[2] = '2';
  1574. buf->Name[3] = 'C';
  1575. return buf;
  1576. }
  1577. static int
  1578. add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
  1579. struct cifs_open_parms *oparms)
  1580. {
  1581. struct smb2_create_req *req = iov[0].iov_base;
  1582. unsigned int num = *num_iovec;
  1583. iov[num].iov_base = create_durable_v2_buf(oparms->fid);
  1584. if (iov[num].iov_base == NULL)
  1585. return -ENOMEM;
  1586. iov[num].iov_len = sizeof(struct create_durable_v2);
  1587. if (!req->CreateContextsOffset)
  1588. req->CreateContextsOffset =
  1589. cpu_to_le32(sizeof(struct smb2_create_req) +
  1590. iov[1].iov_len);
  1591. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
  1592. *num_iovec = num + 1;
  1593. return 0;
  1594. }
  1595. static int
  1596. add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
  1597. struct cifs_open_parms *oparms)
  1598. {
  1599. struct smb2_create_req *req = iov[0].iov_base;
  1600. unsigned int num = *num_iovec;
  1601. /* indicate that we don't need to relock the file */
  1602. oparms->reconnect = false;
  1603. iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
  1604. if (iov[num].iov_base == NULL)
  1605. return -ENOMEM;
  1606. iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
  1607. if (!req->CreateContextsOffset)
  1608. req->CreateContextsOffset =
  1609. cpu_to_le32(sizeof(struct smb2_create_req) +
  1610. iov[1].iov_len);
  1611. le32_add_cpu(&req->CreateContextsLength,
  1612. sizeof(struct create_durable_handle_reconnect_v2));
  1613. *num_iovec = num + 1;
  1614. return 0;
  1615. }
  1616. static int
  1617. add_durable_context(struct kvec *iov, unsigned int *num_iovec,
  1618. struct cifs_open_parms *oparms, bool use_persistent)
  1619. {
  1620. struct smb2_create_req *req = iov[0].iov_base;
  1621. unsigned int num = *num_iovec;
  1622. if (use_persistent) {
  1623. if (oparms->reconnect)
  1624. return add_durable_reconnect_v2_context(iov, num_iovec,
  1625. oparms);
  1626. else
  1627. return add_durable_v2_context(iov, num_iovec, oparms);
  1628. }
  1629. if (oparms->reconnect) {
  1630. iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
  1631. /* indicate that we don't need to relock the file */
  1632. oparms->reconnect = false;
  1633. } else
  1634. iov[num].iov_base = create_durable_buf();
  1635. if (iov[num].iov_base == NULL)
  1636. return -ENOMEM;
  1637. iov[num].iov_len = sizeof(struct create_durable);
  1638. if (!req->CreateContextsOffset)
  1639. req->CreateContextsOffset =
  1640. cpu_to_le32(sizeof(struct smb2_create_req) +
  1641. iov[1].iov_len);
  1642. le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
  1643. *num_iovec = num + 1;
  1644. return 0;
  1645. }
  1646. /* See MS-SMB2 2.2.13.2.7 */
  1647. static struct crt_twarp_ctxt *
  1648. create_twarp_buf(__u64 timewarp)
  1649. {
  1650. struct crt_twarp_ctxt *buf;
  1651. buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
  1652. if (!buf)
  1653. return NULL;
  1654. buf->ccontext.DataOffset = cpu_to_le16(offsetof
  1655. (struct crt_twarp_ctxt, Timestamp));
  1656. buf->ccontext.DataLength = cpu_to_le32(8);
  1657. buf->ccontext.NameOffset = cpu_to_le16(offsetof
  1658. (struct crt_twarp_ctxt, Name));
  1659. buf->ccontext.NameLength = cpu_to_le16(4);
  1660. /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
  1661. buf->Name[0] = 'T';
  1662. buf->Name[1] = 'W';
  1663. buf->Name[2] = 'r';
  1664. buf->Name[3] = 'p';
  1665. buf->Timestamp = cpu_to_le64(timewarp);
  1666. return buf;
  1667. }
  1668. /* See MS-SMB2 2.2.13.2.7 */
  1669. static int
  1670. add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
  1671. {
  1672. struct smb2_create_req *req = iov[0].iov_base;
  1673. unsigned int num = *num_iovec;
  1674. iov[num].iov_base = create_twarp_buf(timewarp);
  1675. if (iov[num].iov_base == NULL)
  1676. return -ENOMEM;
  1677. iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
  1678. if (!req->CreateContextsOffset)
  1679. req->CreateContextsOffset = cpu_to_le32(
  1680. sizeof(struct smb2_create_req) +
  1681. iov[num - 1].iov_len);
  1682. le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
  1683. *num_iovec = num + 1;
  1684. return 0;
  1685. }
  1686. static int
  1687. alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
  1688. const char *treename, const __le16 *path)
  1689. {
  1690. int treename_len, path_len;
  1691. struct nls_table *cp;
  1692. const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
  1693. /*
  1694. * skip leading "\\"
  1695. */
  1696. treename_len = strlen(treename);
  1697. if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
  1698. return -EINVAL;
  1699. treename += 2;
  1700. treename_len -= 2;
  1701. path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
  1702. /*
  1703. * make room for one path separator between the treename and
  1704. * path
  1705. */
  1706. *out_len = treename_len + 1 + path_len;
  1707. /*
  1708. * final path needs to be null-terminated UTF16 with a
  1709. * size aligned to 8
  1710. */
  1711. *out_size = roundup((*out_len+1)*2, 8);
  1712. *out_path = kzalloc(*out_size, GFP_KERNEL);
  1713. if (!*out_path)
  1714. return -ENOMEM;
  1715. cp = load_nls_default();
  1716. cifs_strtoUTF16(*out_path, treename, treename_len, cp);
  1717. UniStrcat(*out_path, sep);
  1718. UniStrcat(*out_path, path);
  1719. unload_nls(cp);
  1720. return 0;
  1721. }
  1722. int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
  1723. umode_t mode, struct cifs_tcon *tcon,
  1724. const char *full_path,
  1725. struct cifs_sb_info *cifs_sb)
  1726. {
  1727. struct smb_rqst rqst;
  1728. struct smb2_create_req *req;
  1729. struct smb2_create_rsp *rsp = NULL;
  1730. struct TCP_Server_Info *server;
  1731. struct cifs_ses *ses = tcon->ses;
  1732. struct kvec iov[3]; /* make sure at least one for each open context */
  1733. struct kvec rsp_iov = {NULL, 0};
  1734. int resp_buftype;
  1735. int uni_path_len;
  1736. __le16 *copy_path = NULL;
  1737. int copy_size;
  1738. int rc = 0;
  1739. unsigned int n_iov = 2;
  1740. __u32 file_attributes = 0;
  1741. char *pc_buf = NULL;
  1742. int flags = 0;
  1743. unsigned int total_len;
  1744. __le16 *utf16_path = NULL;
  1745. cifs_dbg(FYI, "mkdir\n");
  1746. /* resource #1: path allocation */
  1747. utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
  1748. if (!utf16_path)
  1749. return -ENOMEM;
  1750. if (ses && (ses->server))
  1751. server = ses->server;
  1752. else {
  1753. rc = -EIO;
  1754. goto err_free_path;
  1755. }
  1756. /* resource #2: request */
  1757. rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
  1758. if (rc)
  1759. goto err_free_path;
  1760. if (smb3_encryption_required(tcon))
  1761. flags |= CIFS_TRANSFORM_REQ;
  1762. req->ImpersonationLevel = IL_IMPERSONATION;
  1763. req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
  1764. /* File attributes ignored on open (used in create though) */
  1765. req->FileAttributes = cpu_to_le32(file_attributes);
  1766. req->ShareAccess = FILE_SHARE_ALL_LE;
  1767. req->CreateDisposition = cpu_to_le32(FILE_CREATE);
  1768. req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
  1769. iov[0].iov_base = (char *)req;
  1770. /* -1 since last byte is buf[0] which is sent below (path) */
  1771. iov[0].iov_len = total_len - 1;
  1772. req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
  1773. /* [MS-SMB2] 2.2.13 NameOffset:
  1774. * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
  1775. * the SMB2 header, the file name includes a prefix that will
  1776. * be processed during DFS name normalization as specified in
  1777. * section 3.3.5.9. Otherwise, the file name is relative to
  1778. * the share that is identified by the TreeId in the SMB2
  1779. * header.
  1780. */
  1781. if (tcon->share_flags & SHI1005_FLAGS_DFS) {
  1782. int name_len;
  1783. req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
  1784. rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
  1785. &name_len,
  1786. tcon->treeName, utf16_path);
  1787. if (rc)
  1788. goto err_free_req;
  1789. req->NameLength = cpu_to_le16(name_len * 2);
  1790. uni_path_len = copy_size;
  1791. /* free before overwriting resource */
  1792. kfree(utf16_path);
  1793. utf16_path = copy_path;
  1794. } else {
  1795. uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
  1796. /* MUST set path len (NameLength) to 0 opening root of share */
  1797. req->NameLength = cpu_to_le16(uni_path_len - 2);
  1798. if (uni_path_len % 8 != 0) {
  1799. copy_size = roundup(uni_path_len, 8);
  1800. copy_path = kzalloc(copy_size, GFP_KERNEL);
  1801. if (!copy_path) {
  1802. rc = -ENOMEM;
  1803. goto err_free_req;
  1804. }
  1805. memcpy((char *)copy_path, (const char *)utf16_path,
  1806. uni_path_len);
  1807. uni_path_len = copy_size;
  1808. /* free before overwriting resource */
  1809. kfree(utf16_path);
  1810. utf16_path = copy_path;
  1811. }
  1812. }
  1813. iov[1].iov_len = uni_path_len;
  1814. iov[1].iov_base = utf16_path;
  1815. req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
  1816. if (tcon->posix_extensions) {
  1817. /* resource #3: posix buf */
  1818. rc = add_posix_context(iov, &n_iov, mode);
  1819. if (rc)
  1820. goto err_free_req;
  1821. pc_buf = iov[n_iov-1].iov_base;
  1822. }
  1823. memset(&rqst, 0, sizeof(struct smb_rqst));
  1824. rqst.rq_iov = iov;
  1825. rqst.rq_nvec = n_iov;
  1826. /* resource #4: response buffer */
  1827. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  1828. if (rc) {
  1829. cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
  1830. trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
  1831. CREATE_NOT_FILE,
  1832. FILE_WRITE_ATTRIBUTES, rc);
  1833. goto err_free_rsp_buf;
  1834. }
  1835. rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
  1836. trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
  1837. ses->Suid, CREATE_NOT_FILE,
  1838. FILE_WRITE_ATTRIBUTES);
  1839. SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
  1840. /* Eventually save off posix specific response info and timestaps */
  1841. err_free_rsp_buf:
  1842. free_rsp_buf(resp_buftype, rsp);
  1843. kfree(pc_buf);
  1844. err_free_req:
  1845. cifs_small_buf_release(req);
  1846. err_free_path:
  1847. kfree(utf16_path);
  1848. return rc;
  1849. }
  1850. int
  1851. SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
  1852. struct cifs_open_parms *oparms, __le16 *path)
  1853. {
  1854. struct TCP_Server_Info *server = tcon->ses->server;
  1855. struct smb2_create_req *req;
  1856. unsigned int n_iov = 2;
  1857. __u32 file_attributes = 0;
  1858. int copy_size;
  1859. int uni_path_len;
  1860. unsigned int total_len;
  1861. struct kvec *iov = rqst->rq_iov;
  1862. __le16 *copy_path;
  1863. int rc;
  1864. rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
  1865. if (rc)
  1866. return rc;
  1867. iov[0].iov_base = (char *)req;
  1868. /* -1 since last byte is buf[0] which is sent below (path) */
  1869. iov[0].iov_len = total_len - 1;
  1870. if (oparms->create_options & CREATE_OPTION_READONLY)
  1871. file_attributes |= ATTR_READONLY;
  1872. if (oparms->create_options & CREATE_OPTION_SPECIAL)
  1873. file_attributes |= ATTR_SYSTEM;
  1874. req->ImpersonationLevel = IL_IMPERSONATION;
  1875. req->DesiredAccess = cpu_to_le32(oparms->desired_access);
  1876. /* File attributes ignored on open (used in create though) */
  1877. req->FileAttributes = cpu_to_le32(file_attributes);
  1878. req->ShareAccess = FILE_SHARE_ALL_LE;
  1879. req->CreateDisposition = cpu_to_le32(oparms->disposition);
  1880. req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
  1881. req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
  1882. /* [MS-SMB2] 2.2.13 NameOffset:
  1883. * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
  1884. * the SMB2 header, the file name includes a prefix that will
  1885. * be processed during DFS name normalization as specified in
  1886. * section 3.3.5.9. Otherwise, the file name is relative to
  1887. * the share that is identified by the TreeId in the SMB2
  1888. * header.
  1889. */
  1890. if (tcon->share_flags & SHI1005_FLAGS_DFS) {
  1891. int name_len;
  1892. req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
  1893. rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
  1894. &name_len,
  1895. tcon->treeName, path);
  1896. if (rc)
  1897. return rc;
  1898. req->NameLength = cpu_to_le16(name_len * 2);
  1899. uni_path_len = copy_size;
  1900. path = copy_path;
  1901. } else {
  1902. uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
  1903. /* MUST set path len (NameLength) to 0 opening root of share */
  1904. req->NameLength = cpu_to_le16(uni_path_len - 2);
  1905. copy_size = uni_path_len;
  1906. if (copy_size % 8 != 0)
  1907. copy_size = roundup(copy_size, 8);
  1908. copy_path = kzalloc(copy_size, GFP_KERNEL);
  1909. if (!copy_path)
  1910. return -ENOMEM;
  1911. memcpy((char *)copy_path, (const char *)path,
  1912. uni_path_len);
  1913. uni_path_len = copy_size;
  1914. path = copy_path;
  1915. }
  1916. iov[1].iov_len = uni_path_len;
  1917. iov[1].iov_base = path;
  1918. if ((!server->oplocks) || (tcon->no_lease))
  1919. *oplock = SMB2_OPLOCK_LEVEL_NONE;
  1920. if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
  1921. *oplock == SMB2_OPLOCK_LEVEL_NONE)
  1922. req->RequestedOplockLevel = *oplock;
  1923. else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
  1924. (oparms->create_options & CREATE_NOT_FILE))
  1925. req->RequestedOplockLevel = *oplock; /* no srv lease support */
  1926. else {
  1927. rc = add_lease_context(server, iov, &n_iov,
  1928. oparms->fid->lease_key, oplock);
  1929. if (rc)
  1930. return rc;
  1931. }
  1932. if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
  1933. /* need to set Next field of lease context if we request it */
  1934. if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
  1935. struct create_context *ccontext =
  1936. (struct create_context *)iov[n_iov-1].iov_base;
  1937. ccontext->Next =
  1938. cpu_to_le32(server->vals->create_lease_size);
  1939. }
  1940. rc = add_durable_context(iov, &n_iov, oparms,
  1941. tcon->use_persistent);
  1942. if (rc)
  1943. return rc;
  1944. }
  1945. if (tcon->posix_extensions) {
  1946. if (n_iov > 2) {
  1947. struct create_context *ccontext =
  1948. (struct create_context *)iov[n_iov-1].iov_base;
  1949. ccontext->Next =
  1950. cpu_to_le32(iov[n_iov-1].iov_len);
  1951. }
  1952. rc = add_posix_context(iov, &n_iov, oparms->mode);
  1953. if (rc)
  1954. return rc;
  1955. }
  1956. if (tcon->snapshot_time) {
  1957. cifs_dbg(FYI, "adding snapshot context\n");
  1958. if (n_iov > 2) {
  1959. struct create_context *ccontext =
  1960. (struct create_context *)iov[n_iov-1].iov_base;
  1961. ccontext->Next =
  1962. cpu_to_le32(iov[n_iov-1].iov_len);
  1963. }
  1964. rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
  1965. if (rc)
  1966. return rc;
  1967. }
  1968. rqst->rq_nvec = n_iov;
  1969. return 0;
  1970. }
  1971. /* rq_iov[0] is the request and is released by cifs_small_buf_release().
  1972. * All other vectors are freed by kfree().
  1973. */
  1974. void
  1975. SMB2_open_free(struct smb_rqst *rqst)
  1976. {
  1977. int i;
  1978. if (rqst && rqst->rq_iov) {
  1979. cifs_small_buf_release(rqst->rq_iov[0].iov_base);
  1980. for (i = 1; i < rqst->rq_nvec; i++)
  1981. if (rqst->rq_iov[i].iov_base != smb2_padding)
  1982. kfree(rqst->rq_iov[i].iov_base);
  1983. }
  1984. }
  1985. int
  1986. SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
  1987. __u8 *oplock, struct smb2_file_all_info *buf,
  1988. struct kvec *err_iov, int *buftype)
  1989. {
  1990. struct smb_rqst rqst;
  1991. struct smb2_create_rsp *rsp = NULL;
  1992. struct TCP_Server_Info *server;
  1993. struct cifs_tcon *tcon = oparms->tcon;
  1994. struct cifs_ses *ses = tcon->ses;
  1995. struct kvec iov[SMB2_CREATE_IOV_SIZE];
  1996. struct kvec rsp_iov = {NULL, 0};
  1997. int resp_buftype = CIFS_NO_BUFFER;
  1998. int rc = 0;
  1999. int flags = 0;
  2000. cifs_dbg(FYI, "create/open\n");
  2001. if (ses && (ses->server))
  2002. server = ses->server;
  2003. else
  2004. return -EIO;
  2005. if (smb3_encryption_required(tcon))
  2006. flags |= CIFS_TRANSFORM_REQ;
  2007. memset(&rqst, 0, sizeof(struct smb_rqst));
  2008. memset(&iov, 0, sizeof(iov));
  2009. rqst.rq_iov = iov;
  2010. rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
  2011. rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path);
  2012. if (rc)
  2013. goto creat_exit;
  2014. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
  2015. &rsp_iov);
  2016. rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
  2017. if (rc != 0) {
  2018. cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
  2019. if (err_iov && rsp) {
  2020. *err_iov = rsp_iov;
  2021. *buftype = resp_buftype;
  2022. resp_buftype = CIFS_NO_BUFFER;
  2023. rsp = NULL;
  2024. }
  2025. trace_smb3_open_err(xid, tcon->tid, ses->Suid,
  2026. oparms->create_options, oparms->desired_access, rc);
  2027. goto creat_exit;
  2028. } else
  2029. trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid,
  2030. ses->Suid, oparms->create_options,
  2031. oparms->desired_access);
  2032. oparms->fid->persistent_fid = rsp->PersistentFileId;
  2033. oparms->fid->volatile_fid = rsp->VolatileFileId;
  2034. if (buf) {
  2035. memcpy(buf, &rsp->CreationTime, 32);
  2036. buf->AllocationSize = rsp->AllocationSize;
  2037. buf->EndOfFile = rsp->EndofFile;
  2038. buf->Attributes = rsp->FileAttributes;
  2039. buf->NumberOfLinks = cpu_to_le32(1);
  2040. buf->DeletePending = 0;
  2041. }
  2042. if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
  2043. *oplock = parse_lease_state(server, rsp, &oparms->fid->epoch,
  2044. oparms->fid->lease_key);
  2045. else
  2046. *oplock = rsp->OplockLevel;
  2047. creat_exit:
  2048. SMB2_open_free(&rqst);
  2049. free_rsp_buf(resp_buftype, rsp);
  2050. return rc;
  2051. }
  2052. /*
  2053. * SMB2 IOCTL is used for both IOCTLs and FSCTLs
  2054. */
  2055. int
  2056. SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  2057. u64 volatile_fid, u32 opcode, bool is_fsctl,
  2058. char *in_data, u32 indatalen,
  2059. char **out_data, u32 *plen /* returned data len */)
  2060. {
  2061. struct smb_rqst rqst;
  2062. struct smb2_ioctl_req *req;
  2063. struct smb2_ioctl_rsp *rsp;
  2064. struct cifs_ses *ses;
  2065. struct kvec iov[2];
  2066. struct kvec rsp_iov;
  2067. int resp_buftype;
  2068. int n_iov;
  2069. int rc = 0;
  2070. int flags = 0;
  2071. unsigned int total_len;
  2072. cifs_dbg(FYI, "SMB2 IOCTL\n");
  2073. if (out_data != NULL)
  2074. *out_data = NULL;
  2075. /* zero out returned data len, in case of error */
  2076. if (plen)
  2077. *plen = 0;
  2078. if (tcon)
  2079. ses = tcon->ses;
  2080. else
  2081. return -EIO;
  2082. if (!ses || !(ses->server))
  2083. return -EIO;
  2084. rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
  2085. if (rc)
  2086. return rc;
  2087. if (smb3_encryption_required(tcon))
  2088. flags |= CIFS_TRANSFORM_REQ;
  2089. req->CtlCode = cpu_to_le32(opcode);
  2090. req->PersistentFileId = persistent_fid;
  2091. req->VolatileFileId = volatile_fid;
  2092. if (indatalen) {
  2093. req->InputCount = cpu_to_le32(indatalen);
  2094. /* do not set InputOffset if no input data */
  2095. req->InputOffset =
  2096. cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
  2097. iov[1].iov_base = in_data;
  2098. iov[1].iov_len = indatalen;
  2099. n_iov = 2;
  2100. } else
  2101. n_iov = 1;
  2102. req->OutputOffset = 0;
  2103. req->OutputCount = 0; /* MBZ */
  2104. /*
  2105. * Could increase MaxOutputResponse, but that would require more
  2106. * than one credit. Windows typically sets this smaller, but for some
  2107. * ioctls it may be useful to allow server to send more. No point
  2108. * limiting what the server can send as long as fits in one credit
  2109. * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
  2110. * (by default, note that it can be overridden to make max larger)
  2111. * in responses (except for read responses which can be bigger.
  2112. * We may want to bump this limit up
  2113. */
  2114. req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
  2115. if (is_fsctl)
  2116. req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
  2117. else
  2118. req->Flags = 0;
  2119. iov[0].iov_base = (char *)req;
  2120. /*
  2121. * If no input data, the size of ioctl struct in
  2122. * protocol spec still includes a 1 byte data buffer,
  2123. * but if input data passed to ioctl, we do not
  2124. * want to double count this, so we do not send
  2125. * the dummy one byte of data in iovec[0] if sending
  2126. * input data (in iovec[1]).
  2127. */
  2128. if (indatalen) {
  2129. iov[0].iov_len = total_len - 1;
  2130. } else
  2131. iov[0].iov_len = total_len;
  2132. /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
  2133. if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
  2134. req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
  2135. memset(&rqst, 0, sizeof(struct smb_rqst));
  2136. rqst.rq_iov = iov;
  2137. rqst.rq_nvec = n_iov;
  2138. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
  2139. &rsp_iov);
  2140. cifs_small_buf_release(req);
  2141. rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
  2142. if (rc != 0)
  2143. trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
  2144. ses->Suid, 0, opcode, rc);
  2145. if ((rc != 0) && (rc != -EINVAL)) {
  2146. cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
  2147. goto ioctl_exit;
  2148. } else if (rc == -EINVAL) {
  2149. if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
  2150. (opcode != FSCTL_SRV_COPYCHUNK)) {
  2151. cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
  2152. goto ioctl_exit;
  2153. }
  2154. }
  2155. /* check if caller wants to look at return data or just return rc */
  2156. if ((plen == NULL) || (out_data == NULL))
  2157. goto ioctl_exit;
  2158. *plen = le32_to_cpu(rsp->OutputCount);
  2159. /* We check for obvious errors in the output buffer length and offset */
  2160. if (*plen == 0)
  2161. goto ioctl_exit; /* server returned no data */
  2162. else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
  2163. cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
  2164. *plen = 0;
  2165. rc = -EIO;
  2166. goto ioctl_exit;
  2167. }
  2168. if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
  2169. cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
  2170. le32_to_cpu(rsp->OutputOffset));
  2171. *plen = 0;
  2172. rc = -EIO;
  2173. goto ioctl_exit;
  2174. }
  2175. *out_data = kmalloc(*plen, GFP_KERNEL);
  2176. if (*out_data == NULL) {
  2177. rc = -ENOMEM;
  2178. goto ioctl_exit;
  2179. }
  2180. memcpy(*out_data, (char *)rsp + le32_to_cpu(rsp->OutputOffset), *plen);
  2181. ioctl_exit:
  2182. free_rsp_buf(resp_buftype, rsp);
  2183. return rc;
  2184. }
  2185. /*
  2186. * Individual callers to ioctl worker function follow
  2187. */
  2188. int
  2189. SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
  2190. u64 persistent_fid, u64 volatile_fid)
  2191. {
  2192. int rc;
  2193. struct compress_ioctl fsctl_input;
  2194. char *ret_data = NULL;
  2195. fsctl_input.CompressionState =
  2196. cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
  2197. rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
  2198. FSCTL_SET_COMPRESSION, true /* is_fsctl */,
  2199. (char *)&fsctl_input /* data input */,
  2200. 2 /* in data len */, &ret_data /* out data */, NULL);
  2201. cifs_dbg(FYI, "set compression rc %d\n", rc);
  2202. return rc;
  2203. }
  2204. int
  2205. SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
  2206. u64 persistent_fid, u64 volatile_fid)
  2207. {
  2208. struct smb2_close_req *req;
  2209. struct kvec *iov = rqst->rq_iov;
  2210. unsigned int total_len;
  2211. int rc;
  2212. rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
  2213. if (rc)
  2214. return rc;
  2215. req->PersistentFileId = persistent_fid;
  2216. req->VolatileFileId = volatile_fid;
  2217. iov[0].iov_base = (char *)req;
  2218. iov[0].iov_len = total_len;
  2219. return 0;
  2220. }
  2221. void
  2222. SMB2_close_free(struct smb_rqst *rqst)
  2223. {
  2224. if (rqst && rqst->rq_iov)
  2225. cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
  2226. }
  2227. int
  2228. SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
  2229. u64 persistent_fid, u64 volatile_fid, int flags)
  2230. {
  2231. struct smb_rqst rqst;
  2232. struct smb2_close_rsp *rsp = NULL;
  2233. struct cifs_ses *ses = tcon->ses;
  2234. struct kvec iov[1];
  2235. struct kvec rsp_iov;
  2236. int resp_buftype = CIFS_NO_BUFFER;
  2237. int rc = 0;
  2238. cifs_dbg(FYI, "Close\n");
  2239. if (!ses || !(ses->server))
  2240. return -EIO;
  2241. if (smb3_encryption_required(tcon))
  2242. flags |= CIFS_TRANSFORM_REQ;
  2243. memset(&rqst, 0, sizeof(struct smb_rqst));
  2244. memset(&iov, 0, sizeof(iov));
  2245. rqst.rq_iov = iov;
  2246. rqst.rq_nvec = 1;
  2247. rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid);
  2248. if (rc)
  2249. goto close_exit;
  2250. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  2251. rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
  2252. if (rc != 0) {
  2253. cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
  2254. trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
  2255. rc);
  2256. goto close_exit;
  2257. }
  2258. /* BB FIXME - decode close response, update inode for caching */
  2259. close_exit:
  2260. SMB2_close_free(&rqst);
  2261. free_rsp_buf(resp_buftype, rsp);
  2262. return rc;
  2263. }
  2264. int
  2265. SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
  2266. u64 persistent_fid, u64 volatile_fid)
  2267. {
  2268. int rc;
  2269. int tmp_rc;
  2270. rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
  2271. /* retry close in a worker thread if this one is interrupted */
  2272. if (rc == -EINTR) {
  2273. tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
  2274. volatile_fid);
  2275. if (tmp_rc)
  2276. cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
  2277. persistent_fid, tmp_rc);
  2278. }
  2279. return rc;
  2280. }
  2281. int
  2282. smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
  2283. struct kvec *iov, unsigned int min_buf_size)
  2284. {
  2285. unsigned int smb_len = iov->iov_len;
  2286. char *end_of_smb = smb_len + (char *)iov->iov_base;
  2287. char *begin_of_buf = offset + (char *)iov->iov_base;
  2288. char *end_of_buf = begin_of_buf + buffer_length;
  2289. if (buffer_length < min_buf_size) {
  2290. cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
  2291. buffer_length, min_buf_size);
  2292. return -EINVAL;
  2293. }
  2294. /* check if beyond RFC1001 maximum length */
  2295. if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
  2296. cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
  2297. buffer_length, smb_len);
  2298. return -EINVAL;
  2299. }
  2300. if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
  2301. cifs_dbg(VFS, "illegal server response, bad offset to data\n");
  2302. return -EINVAL;
  2303. }
  2304. return 0;
  2305. }
  2306. /*
  2307. * If SMB buffer fields are valid, copy into temporary buffer to hold result.
  2308. * Caller must free buffer.
  2309. */
  2310. static int
  2311. validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
  2312. struct kvec *iov, unsigned int minbufsize,
  2313. char *data)
  2314. {
  2315. char *begin_of_buf = offset + (char *)iov->iov_base;
  2316. int rc;
  2317. if (!data)
  2318. return -EINVAL;
  2319. rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
  2320. if (rc)
  2321. return rc;
  2322. memcpy(data, begin_of_buf, buffer_length);
  2323. return 0;
  2324. }
  2325. int
  2326. SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
  2327. u64 persistent_fid, u64 volatile_fid,
  2328. u8 info_class, u8 info_type, u32 additional_info,
  2329. size_t output_len)
  2330. {
  2331. struct smb2_query_info_req *req;
  2332. struct kvec *iov = rqst->rq_iov;
  2333. unsigned int total_len;
  2334. int rc;
  2335. rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
  2336. &total_len);
  2337. if (rc)
  2338. return rc;
  2339. req->InfoType = info_type;
  2340. req->FileInfoClass = info_class;
  2341. req->PersistentFileId = persistent_fid;
  2342. req->VolatileFileId = volatile_fid;
  2343. req->AdditionalInformation = cpu_to_le32(additional_info);
  2344. /*
  2345. * We do not use the input buffer (do not send extra byte)
  2346. */
  2347. req->InputBufferOffset = 0;
  2348. req->OutputBufferLength = cpu_to_le32(output_len);
  2349. iov[0].iov_base = (char *)req;
  2350. /* 1 for Buffer */
  2351. iov[0].iov_len = total_len - 1;
  2352. return 0;
  2353. }
  2354. void
  2355. SMB2_query_info_free(struct smb_rqst *rqst)
  2356. {
  2357. if (rqst && rqst->rq_iov)
  2358. cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
  2359. }
  2360. static int
  2361. query_info(const unsigned int xid, struct cifs_tcon *tcon,
  2362. u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
  2363. u32 additional_info, size_t output_len, size_t min_len, void **data,
  2364. u32 *dlen)
  2365. {
  2366. struct smb_rqst rqst;
  2367. struct smb2_query_info_rsp *rsp = NULL;
  2368. struct kvec iov[1];
  2369. struct kvec rsp_iov;
  2370. int rc = 0;
  2371. int resp_buftype = CIFS_NO_BUFFER;
  2372. struct cifs_ses *ses = tcon->ses;
  2373. int flags = 0;
  2374. cifs_dbg(FYI, "Query Info\n");
  2375. if (!ses || !(ses->server))
  2376. return -EIO;
  2377. if (smb3_encryption_required(tcon))
  2378. flags |= CIFS_TRANSFORM_REQ;
  2379. memset(&rqst, 0, sizeof(struct smb_rqst));
  2380. memset(&iov, 0, sizeof(iov));
  2381. rqst.rq_iov = iov;
  2382. rqst.rq_nvec = 1;
  2383. rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
  2384. info_class, info_type, additional_info,
  2385. output_len);
  2386. if (rc)
  2387. goto qinf_exit;
  2388. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  2389. rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
  2390. if (rc) {
  2391. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  2392. trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
  2393. ses->Suid, info_class, (__u32)info_type, rc);
  2394. goto qinf_exit;
  2395. }
  2396. if (dlen) {
  2397. *dlen = le32_to_cpu(rsp->OutputBufferLength);
  2398. if (!*data) {
  2399. *data = kmalloc(*dlen, GFP_KERNEL);
  2400. if (!*data) {
  2401. cifs_dbg(VFS,
  2402. "Error %d allocating memory for acl\n",
  2403. rc);
  2404. *dlen = 0;
  2405. goto qinf_exit;
  2406. }
  2407. }
  2408. }
  2409. rc = validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
  2410. le32_to_cpu(rsp->OutputBufferLength),
  2411. &rsp_iov, min_len, *data);
  2412. qinf_exit:
  2413. SMB2_query_info_free(&rqst);
  2414. free_rsp_buf(resp_buftype, rsp);
  2415. return rc;
  2416. }
  2417. int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
  2418. u64 persistent_fid, u64 volatile_fid,
  2419. int ea_buf_size, struct smb2_file_full_ea_info *data)
  2420. {
  2421. return query_info(xid, tcon, persistent_fid, volatile_fid,
  2422. FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0,
  2423. ea_buf_size,
  2424. sizeof(struct smb2_file_full_ea_info),
  2425. (void **)&data,
  2426. NULL);
  2427. }
  2428. int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
  2429. u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
  2430. {
  2431. return query_info(xid, tcon, persistent_fid, volatile_fid,
  2432. FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
  2433. sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
  2434. sizeof(struct smb2_file_all_info), (void **)&data,
  2435. NULL);
  2436. }
  2437. int
  2438. SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
  2439. u64 persistent_fid, u64 volatile_fid,
  2440. void **data, u32 *plen)
  2441. {
  2442. __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO;
  2443. *plen = 0;
  2444. return query_info(xid, tcon, persistent_fid, volatile_fid,
  2445. 0, SMB2_O_INFO_SECURITY, additional_info,
  2446. SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
  2447. }
  2448. int
  2449. SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
  2450. u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
  2451. {
  2452. return query_info(xid, tcon, persistent_fid, volatile_fid,
  2453. FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
  2454. sizeof(struct smb2_file_internal_info),
  2455. sizeof(struct smb2_file_internal_info),
  2456. (void **)&uniqueid, NULL);
  2457. }
  2458. /*
  2459. * This is a no-op for now. We're not really interested in the reply, but
  2460. * rather in the fact that the server sent one and that server->lstrp
  2461. * gets updated.
  2462. *
  2463. * FIXME: maybe we should consider checking that the reply matches request?
  2464. */
  2465. static void
  2466. smb2_echo_callback(struct mid_q_entry *mid)
  2467. {
  2468. struct TCP_Server_Info *server = mid->callback_data;
  2469. struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
  2470. unsigned int credits_received = 0;
  2471. if (mid->mid_state == MID_RESPONSE_RECEIVED
  2472. || mid->mid_state == MID_RESPONSE_MALFORMED)
  2473. credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
  2474. DeleteMidQEntry(mid);
  2475. add_credits(server, credits_received, CIFS_ECHO_OP);
  2476. }
  2477. void smb2_reconnect_server(struct work_struct *work)
  2478. {
  2479. struct TCP_Server_Info *server = container_of(work,
  2480. struct TCP_Server_Info, reconnect.work);
  2481. struct cifs_ses *ses;
  2482. struct cifs_tcon *tcon, *tcon2;
  2483. struct list_head tmp_list;
  2484. int tcon_exist = false;
  2485. int rc;
  2486. int resched = false;
  2487. /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
  2488. mutex_lock(&server->reconnect_mutex);
  2489. INIT_LIST_HEAD(&tmp_list);
  2490. cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
  2491. spin_lock(&cifs_tcp_ses_lock);
  2492. list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
  2493. list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
  2494. if (tcon->need_reconnect || tcon->need_reopen_files) {
  2495. tcon->tc_count++;
  2496. list_add_tail(&tcon->rlist, &tmp_list);
  2497. tcon_exist = true;
  2498. }
  2499. }
  2500. if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
  2501. list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
  2502. tcon_exist = true;
  2503. }
  2504. }
  2505. /*
  2506. * Get the reference to server struct to be sure that the last call of
  2507. * cifs_put_tcon() in the loop below won't release the server pointer.
  2508. */
  2509. if (tcon_exist)
  2510. server->srv_count++;
  2511. spin_unlock(&cifs_tcp_ses_lock);
  2512. list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
  2513. rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
  2514. if (!rc)
  2515. cifs_reopen_persistent_handles(tcon);
  2516. else
  2517. resched = true;
  2518. list_del_init(&tcon->rlist);
  2519. cifs_put_tcon(tcon);
  2520. }
  2521. cifs_dbg(FYI, "Reconnecting tcons finished\n");
  2522. if (resched)
  2523. queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
  2524. mutex_unlock(&server->reconnect_mutex);
  2525. /* now we can safely release srv struct */
  2526. if (tcon_exist)
  2527. cifs_put_tcp_session(server, 1);
  2528. }
  2529. int
  2530. SMB2_echo(struct TCP_Server_Info *server)
  2531. {
  2532. struct smb2_echo_req *req;
  2533. int rc = 0;
  2534. struct kvec iov[1];
  2535. struct smb_rqst rqst = { .rq_iov = iov,
  2536. .rq_nvec = 1 };
  2537. unsigned int total_len;
  2538. cifs_dbg(FYI, "In echo request\n");
  2539. if (server->tcpStatus == CifsNeedNegotiate) {
  2540. /* No need to send echo on newly established connections */
  2541. queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
  2542. return rc;
  2543. }
  2544. rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
  2545. if (rc)
  2546. return rc;
  2547. req->sync_hdr.CreditRequest = cpu_to_le16(1);
  2548. iov[0].iov_len = total_len;
  2549. iov[0].iov_base = (char *)req;
  2550. rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
  2551. server, CIFS_ECHO_OP);
  2552. if (rc)
  2553. cifs_dbg(FYI, "Echo request failed: %d\n", rc);
  2554. cifs_small_buf_release(req);
  2555. return rc;
  2556. }
  2557. int
  2558. SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  2559. u64 volatile_fid)
  2560. {
  2561. struct smb_rqst rqst;
  2562. struct smb2_flush_req *req;
  2563. struct cifs_ses *ses = tcon->ses;
  2564. struct kvec iov[1];
  2565. struct kvec rsp_iov;
  2566. int resp_buftype;
  2567. int rc = 0;
  2568. int flags = 0;
  2569. unsigned int total_len;
  2570. cifs_dbg(FYI, "Flush\n");
  2571. if (!ses || !(ses->server))
  2572. return -EIO;
  2573. rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
  2574. if (rc)
  2575. return rc;
  2576. if (smb3_encryption_required(tcon))
  2577. flags |= CIFS_TRANSFORM_REQ;
  2578. req->PersistentFileId = persistent_fid;
  2579. req->VolatileFileId = volatile_fid;
  2580. iov[0].iov_base = (char *)req;
  2581. iov[0].iov_len = total_len;
  2582. memset(&rqst, 0, sizeof(struct smb_rqst));
  2583. rqst.rq_iov = iov;
  2584. rqst.rq_nvec = 1;
  2585. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  2586. cifs_small_buf_release(req);
  2587. if (rc != 0) {
  2588. cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
  2589. trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
  2590. rc);
  2591. }
  2592. free_rsp_buf(resp_buftype, rsp_iov.iov_base);
  2593. return rc;
  2594. }
  2595. /*
  2596. * To form a chain of read requests, any read requests after the first should
  2597. * have the end_of_chain boolean set to true.
  2598. */
  2599. static int
  2600. smb2_new_read_req(void **buf, unsigned int *total_len,
  2601. struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
  2602. unsigned int remaining_bytes, int request_type)
  2603. {
  2604. int rc = -EACCES;
  2605. struct smb2_read_plain_req *req = NULL;
  2606. struct smb2_sync_hdr *shdr;
  2607. struct TCP_Server_Info *server;
  2608. rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
  2609. total_len);
  2610. if (rc)
  2611. return rc;
  2612. server = io_parms->tcon->ses->server;
  2613. if (server == NULL)
  2614. return -ECONNABORTED;
  2615. shdr = &req->sync_hdr;
  2616. shdr->ProcessId = cpu_to_le32(io_parms->pid);
  2617. req->PersistentFileId = io_parms->persistent_fid;
  2618. req->VolatileFileId = io_parms->volatile_fid;
  2619. req->ReadChannelInfoOffset = 0; /* reserved */
  2620. req->ReadChannelInfoLength = 0; /* reserved */
  2621. req->Channel = 0; /* reserved */
  2622. req->MinimumCount = 0;
  2623. req->Length = cpu_to_le32(io_parms->length);
  2624. req->Offset = cpu_to_le64(io_parms->offset);
  2625. #ifdef CONFIG_CIFS_SMB_DIRECT
  2626. /*
  2627. * If we want to do a RDMA write, fill in and append
  2628. * smbd_buffer_descriptor_v1 to the end of read request
  2629. */
  2630. if (server->rdma && rdata && !server->sign &&
  2631. rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
  2632. struct smbd_buffer_descriptor_v1 *v1;
  2633. bool need_invalidate =
  2634. io_parms->tcon->ses->server->dialect == SMB30_PROT_ID;
  2635. rdata->mr = smbd_register_mr(
  2636. server->smbd_conn, rdata->pages,
  2637. rdata->nr_pages, rdata->page_offset,
  2638. rdata->tailsz, true, need_invalidate);
  2639. if (!rdata->mr)
  2640. return -ENOBUFS;
  2641. req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
  2642. if (need_invalidate)
  2643. req->Channel = SMB2_CHANNEL_RDMA_V1;
  2644. req->ReadChannelInfoOffset =
  2645. cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer));
  2646. req->ReadChannelInfoLength =
  2647. cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
  2648. v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
  2649. v1->offset = cpu_to_le64(rdata->mr->mr->iova);
  2650. v1->token = cpu_to_le32(rdata->mr->mr->rkey);
  2651. v1->length = cpu_to_le32(rdata->mr->mr->length);
  2652. *total_len += sizeof(*v1) - 1;
  2653. }
  2654. #endif
  2655. if (request_type & CHAINED_REQUEST) {
  2656. if (!(request_type & END_OF_CHAIN)) {
  2657. /* next 8-byte aligned request */
  2658. *total_len = DIV_ROUND_UP(*total_len, 8) * 8;
  2659. shdr->NextCommand = cpu_to_le32(*total_len);
  2660. } else /* END_OF_CHAIN */
  2661. shdr->NextCommand = 0;
  2662. if (request_type & RELATED_REQUEST) {
  2663. shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
  2664. /*
  2665. * Related requests use info from previous read request
  2666. * in chain.
  2667. */
  2668. shdr->SessionId = 0xFFFFFFFF;
  2669. shdr->TreeId = 0xFFFFFFFF;
  2670. req->PersistentFileId = 0xFFFFFFFF;
  2671. req->VolatileFileId = 0xFFFFFFFF;
  2672. }
  2673. }
  2674. if (remaining_bytes > io_parms->length)
  2675. req->RemainingBytes = cpu_to_le32(remaining_bytes);
  2676. else
  2677. req->RemainingBytes = 0;
  2678. *buf = req;
  2679. return rc;
  2680. }
  2681. static void
  2682. smb2_readv_callback(struct mid_q_entry *mid)
  2683. {
  2684. struct cifs_readdata *rdata = mid->callback_data;
  2685. struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
  2686. struct TCP_Server_Info *server = tcon->ses->server;
  2687. struct smb2_sync_hdr *shdr =
  2688. (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
  2689. unsigned int credits_received = 0;
  2690. struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
  2691. .rq_nvec = 1,
  2692. .rq_pages = rdata->pages,
  2693. .rq_offset = rdata->page_offset,
  2694. .rq_npages = rdata->nr_pages,
  2695. .rq_pagesz = rdata->pagesz,
  2696. .rq_tailsz = rdata->tailsz };
  2697. cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
  2698. __func__, mid->mid, mid->mid_state, rdata->result,
  2699. rdata->bytes);
  2700. switch (mid->mid_state) {
  2701. case MID_RESPONSE_RECEIVED:
  2702. credits_received = le16_to_cpu(shdr->CreditRequest);
  2703. /* result already set, check signature */
  2704. if (server->sign && !mid->decrypted) {
  2705. int rc;
  2706. rc = smb2_verify_signature(&rqst, server);
  2707. if (rc)
  2708. cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
  2709. rc);
  2710. }
  2711. /* FIXME: should this be counted toward the initiating task? */
  2712. task_io_account_read(rdata->got_bytes);
  2713. cifs_stats_bytes_read(tcon, rdata->got_bytes);
  2714. break;
  2715. case MID_REQUEST_SUBMITTED:
  2716. case MID_RETRY_NEEDED:
  2717. rdata->result = -EAGAIN;
  2718. if (server->sign && rdata->got_bytes)
  2719. /* reset bytes number since we can not check a sign */
  2720. rdata->got_bytes = 0;
  2721. /* FIXME: should this be counted toward the initiating task? */
  2722. task_io_account_read(rdata->got_bytes);
  2723. cifs_stats_bytes_read(tcon, rdata->got_bytes);
  2724. break;
  2725. case MID_RESPONSE_MALFORMED:
  2726. credits_received = le16_to_cpu(shdr->CreditRequest);
  2727. /* fall through */
  2728. default:
  2729. if (rdata->result != -ENODATA)
  2730. rdata->result = -EIO;
  2731. }
  2732. #ifdef CONFIG_CIFS_SMB_DIRECT
  2733. /*
  2734. * If this rdata has a memmory registered, the MR can be freed
  2735. * MR needs to be freed as soon as I/O finishes to prevent deadlock
  2736. * because they have limited number and are used for future I/Os
  2737. */
  2738. if (rdata->mr) {
  2739. smbd_deregister_mr(rdata->mr);
  2740. rdata->mr = NULL;
  2741. }
  2742. #endif
  2743. if (rdata->result && rdata->result != -ENODATA) {
  2744. cifs_stats_fail_inc(tcon, SMB2_READ_HE);
  2745. trace_smb3_read_err(0 /* xid */,
  2746. rdata->cfile->fid.persistent_fid,
  2747. tcon->tid, tcon->ses->Suid, rdata->offset,
  2748. rdata->bytes, rdata->result);
  2749. } else
  2750. trace_smb3_read_done(0 /* xid */,
  2751. rdata->cfile->fid.persistent_fid,
  2752. tcon->tid, tcon->ses->Suid,
  2753. rdata->offset, rdata->got_bytes);
  2754. queue_work(cifsiod_wq, &rdata->work);
  2755. DeleteMidQEntry(mid);
  2756. add_credits(server, credits_received, 0);
  2757. }
  2758. /* smb2_async_readv - send an async read, and set up mid to handle result */
  2759. int
  2760. smb2_async_readv(struct cifs_readdata *rdata)
  2761. {
  2762. int rc, flags = 0;
  2763. char *buf;
  2764. struct smb2_sync_hdr *shdr;
  2765. struct cifs_io_parms io_parms;
  2766. struct smb_rqst rqst = { .rq_iov = rdata->iov,
  2767. .rq_nvec = 1 };
  2768. struct TCP_Server_Info *server;
  2769. unsigned int total_len;
  2770. cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
  2771. __func__, rdata->offset, rdata->bytes);
  2772. io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
  2773. io_parms.offset = rdata->offset;
  2774. io_parms.length = rdata->bytes;
  2775. io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
  2776. io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
  2777. io_parms.pid = rdata->pid;
  2778. server = io_parms.tcon->ses->server;
  2779. rc = smb2_new_read_req(
  2780. (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
  2781. if (rc) {
  2782. if (rc == -EAGAIN && rdata->credits) {
  2783. /* credits was reset by reconnect */
  2784. rdata->credits = 0;
  2785. /* reduce in_flight value since we won't send the req */
  2786. spin_lock(&server->req_lock);
  2787. server->in_flight--;
  2788. spin_unlock(&server->req_lock);
  2789. }
  2790. return rc;
  2791. }
  2792. if (smb3_encryption_required(io_parms.tcon))
  2793. flags |= CIFS_TRANSFORM_REQ;
  2794. rdata->iov[0].iov_base = buf;
  2795. rdata->iov[0].iov_len = total_len;
  2796. shdr = (struct smb2_sync_hdr *)buf;
  2797. if (rdata->credits) {
  2798. shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
  2799. SMB2_MAX_BUFFER_SIZE));
  2800. shdr->CreditRequest =
  2801. cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
  2802. spin_lock(&server->req_lock);
  2803. server->credits += rdata->credits -
  2804. le16_to_cpu(shdr->CreditCharge);
  2805. spin_unlock(&server->req_lock);
  2806. wake_up(&server->request_q);
  2807. rdata->credits = le16_to_cpu(shdr->CreditCharge);
  2808. flags |= CIFS_HAS_CREDITS;
  2809. }
  2810. kref_get(&rdata->refcount);
  2811. rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
  2812. cifs_readv_receive, smb2_readv_callback,
  2813. smb3_handle_read_data, rdata, flags);
  2814. if (rc) {
  2815. kref_put(&rdata->refcount, cifs_readdata_release);
  2816. cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
  2817. trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
  2818. io_parms.tcon->tid,
  2819. io_parms.tcon->ses->Suid,
  2820. io_parms.offset, io_parms.length, rc);
  2821. }
  2822. cifs_small_buf_release(buf);
  2823. return rc;
  2824. }
  2825. int
  2826. SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
  2827. unsigned int *nbytes, char **buf, int *buf_type)
  2828. {
  2829. struct smb_rqst rqst;
  2830. int resp_buftype, rc = -EACCES;
  2831. struct smb2_read_plain_req *req = NULL;
  2832. struct smb2_read_rsp *rsp = NULL;
  2833. struct kvec iov[1];
  2834. struct kvec rsp_iov;
  2835. unsigned int total_len;
  2836. int flags = CIFS_LOG_ERROR;
  2837. struct cifs_ses *ses = io_parms->tcon->ses;
  2838. *nbytes = 0;
  2839. rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
  2840. if (rc)
  2841. return rc;
  2842. if (smb3_encryption_required(io_parms->tcon))
  2843. flags |= CIFS_TRANSFORM_REQ;
  2844. iov[0].iov_base = (char *)req;
  2845. iov[0].iov_len = total_len;
  2846. memset(&rqst, 0, sizeof(struct smb_rqst));
  2847. rqst.rq_iov = iov;
  2848. rqst.rq_nvec = 1;
  2849. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  2850. rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
  2851. if (rc) {
  2852. if (rc != -ENODATA) {
  2853. cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
  2854. cifs_dbg(VFS, "Send error in read = %d\n", rc);
  2855. trace_smb3_read_err(xid, req->PersistentFileId,
  2856. io_parms->tcon->tid, ses->Suid,
  2857. io_parms->offset, io_parms->length,
  2858. rc);
  2859. }
  2860. free_rsp_buf(resp_buftype, rsp_iov.iov_base);
  2861. cifs_small_buf_release(req);
  2862. return rc == -ENODATA ? 0 : rc;
  2863. } else
  2864. trace_smb3_read_done(xid, req->PersistentFileId,
  2865. io_parms->tcon->tid, ses->Suid,
  2866. io_parms->offset, io_parms->length);
  2867. cifs_small_buf_release(req);
  2868. *nbytes = le32_to_cpu(rsp->DataLength);
  2869. if ((*nbytes > CIFS_MAX_MSGSIZE) ||
  2870. (*nbytes > io_parms->length)) {
  2871. cifs_dbg(FYI, "bad length %d for count %d\n",
  2872. *nbytes, io_parms->length);
  2873. rc = -EIO;
  2874. *nbytes = 0;
  2875. }
  2876. if (*buf) {
  2877. memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
  2878. free_rsp_buf(resp_buftype, rsp_iov.iov_base);
  2879. } else if (resp_buftype != CIFS_NO_BUFFER) {
  2880. *buf = rsp_iov.iov_base;
  2881. if (resp_buftype == CIFS_SMALL_BUFFER)
  2882. *buf_type = CIFS_SMALL_BUFFER;
  2883. else if (resp_buftype == CIFS_LARGE_BUFFER)
  2884. *buf_type = CIFS_LARGE_BUFFER;
  2885. }
  2886. return rc;
  2887. }
  2888. /*
  2889. * Check the mid_state and signature on received buffer (if any), and queue the
  2890. * workqueue completion task.
  2891. */
  2892. static void
  2893. smb2_writev_callback(struct mid_q_entry *mid)
  2894. {
  2895. struct cifs_writedata *wdata = mid->callback_data;
  2896. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  2897. unsigned int written;
  2898. struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
  2899. unsigned int credits_received = 0;
  2900. switch (mid->mid_state) {
  2901. case MID_RESPONSE_RECEIVED:
  2902. credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
  2903. wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
  2904. if (wdata->result != 0)
  2905. break;
  2906. written = le32_to_cpu(rsp->DataLength);
  2907. /*
  2908. * Mask off high 16 bits when bytes written as returned
  2909. * by the server is greater than bytes requested by the
  2910. * client. OS/2 servers are known to set incorrect
  2911. * CountHigh values.
  2912. */
  2913. if (written > wdata->bytes)
  2914. written &= 0xFFFF;
  2915. if (written < wdata->bytes)
  2916. wdata->result = -ENOSPC;
  2917. else
  2918. wdata->bytes = written;
  2919. break;
  2920. case MID_REQUEST_SUBMITTED:
  2921. case MID_RETRY_NEEDED:
  2922. wdata->result = -EAGAIN;
  2923. break;
  2924. case MID_RESPONSE_MALFORMED:
  2925. credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
  2926. /* fall through */
  2927. default:
  2928. wdata->result = -EIO;
  2929. break;
  2930. }
  2931. #ifdef CONFIG_CIFS_SMB_DIRECT
  2932. /*
  2933. * If this wdata has a memory registered, the MR can be freed
  2934. * The number of MRs available is limited, it's important to recover
  2935. * used MR as soon as I/O is finished. Hold MR longer in the later
  2936. * I/O process can possibly result in I/O deadlock due to lack of MR
  2937. * to send request on I/O retry
  2938. */
  2939. if (wdata->mr) {
  2940. smbd_deregister_mr(wdata->mr);
  2941. wdata->mr = NULL;
  2942. }
  2943. #endif
  2944. if (wdata->result) {
  2945. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  2946. trace_smb3_write_err(0 /* no xid */,
  2947. wdata->cfile->fid.persistent_fid,
  2948. tcon->tid, tcon->ses->Suid, wdata->offset,
  2949. wdata->bytes, wdata->result);
  2950. if (wdata->result == -ENOSPC)
  2951. printk_once(KERN_WARNING "Out of space writing to %s\n",
  2952. tcon->treeName);
  2953. } else
  2954. trace_smb3_write_done(0 /* no xid */,
  2955. wdata->cfile->fid.persistent_fid,
  2956. tcon->tid, tcon->ses->Suid,
  2957. wdata->offset, wdata->bytes);
  2958. queue_work(cifsiod_wq, &wdata->work);
  2959. DeleteMidQEntry(mid);
  2960. add_credits(tcon->ses->server, credits_received, 0);
  2961. }
  2962. /* smb2_async_writev - send an async write, and set up mid to handle result */
  2963. int
  2964. smb2_async_writev(struct cifs_writedata *wdata,
  2965. void (*release)(struct kref *kref))
  2966. {
  2967. int rc = -EACCES, flags = 0;
  2968. struct smb2_write_req *req = NULL;
  2969. struct smb2_sync_hdr *shdr;
  2970. struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
  2971. struct TCP_Server_Info *server = tcon->ses->server;
  2972. struct kvec iov[1];
  2973. struct smb_rqst rqst = { };
  2974. unsigned int total_len;
  2975. rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
  2976. if (rc) {
  2977. if (rc == -EAGAIN && wdata->credits) {
  2978. /* credits was reset by reconnect */
  2979. wdata->credits = 0;
  2980. /* reduce in_flight value since we won't send the req */
  2981. spin_lock(&server->req_lock);
  2982. server->in_flight--;
  2983. spin_unlock(&server->req_lock);
  2984. }
  2985. goto async_writev_out;
  2986. }
  2987. if (smb3_encryption_required(tcon))
  2988. flags |= CIFS_TRANSFORM_REQ;
  2989. shdr = (struct smb2_sync_hdr *)req;
  2990. shdr->ProcessId = cpu_to_le32(wdata->cfile->pid);
  2991. req->PersistentFileId = wdata->cfile->fid.persistent_fid;
  2992. req->VolatileFileId = wdata->cfile->fid.volatile_fid;
  2993. req->WriteChannelInfoOffset = 0;
  2994. req->WriteChannelInfoLength = 0;
  2995. req->Channel = 0;
  2996. req->Offset = cpu_to_le64(wdata->offset);
  2997. req->DataOffset = cpu_to_le16(
  2998. offsetof(struct smb2_write_req, Buffer));
  2999. req->RemainingBytes = 0;
  3000. #ifdef CONFIG_CIFS_SMB_DIRECT
  3001. /*
  3002. * If we want to do a server RDMA read, fill in and append
  3003. * smbd_buffer_descriptor_v1 to the end of write request
  3004. */
  3005. if (server->rdma && !server->sign && wdata->bytes >=
  3006. server->smbd_conn->rdma_readwrite_threshold) {
  3007. struct smbd_buffer_descriptor_v1 *v1;
  3008. bool need_invalidate = server->dialect == SMB30_PROT_ID;
  3009. wdata->mr = smbd_register_mr(
  3010. server->smbd_conn, wdata->pages,
  3011. wdata->nr_pages, wdata->page_offset,
  3012. wdata->tailsz, false, need_invalidate);
  3013. if (!wdata->mr) {
  3014. rc = -ENOBUFS;
  3015. goto async_writev_out;
  3016. }
  3017. req->Length = 0;
  3018. req->DataOffset = 0;
  3019. if (wdata->nr_pages > 1)
  3020. req->RemainingBytes =
  3021. cpu_to_le32(
  3022. (wdata->nr_pages - 1) * wdata->pagesz -
  3023. wdata->page_offset + wdata->tailsz
  3024. );
  3025. else
  3026. req->RemainingBytes = cpu_to_le32(wdata->tailsz);
  3027. req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
  3028. if (need_invalidate)
  3029. req->Channel = SMB2_CHANNEL_RDMA_V1;
  3030. req->WriteChannelInfoOffset =
  3031. cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
  3032. req->WriteChannelInfoLength =
  3033. cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
  3034. v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
  3035. v1->offset = cpu_to_le64(wdata->mr->mr->iova);
  3036. v1->token = cpu_to_le32(wdata->mr->mr->rkey);
  3037. v1->length = cpu_to_le32(wdata->mr->mr->length);
  3038. }
  3039. #endif
  3040. iov[0].iov_len = total_len - 1;
  3041. iov[0].iov_base = (char *)req;
  3042. rqst.rq_iov = iov;
  3043. rqst.rq_nvec = 1;
  3044. rqst.rq_pages = wdata->pages;
  3045. rqst.rq_offset = wdata->page_offset;
  3046. rqst.rq_npages = wdata->nr_pages;
  3047. rqst.rq_pagesz = wdata->pagesz;
  3048. rqst.rq_tailsz = wdata->tailsz;
  3049. #ifdef CONFIG_CIFS_SMB_DIRECT
  3050. if (wdata->mr) {
  3051. iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
  3052. rqst.rq_npages = 0;
  3053. }
  3054. #endif
  3055. cifs_dbg(FYI, "async write at %llu %u bytes\n",
  3056. wdata->offset, wdata->bytes);
  3057. #ifdef CONFIG_CIFS_SMB_DIRECT
  3058. /* For RDMA read, I/O size is in RemainingBytes not in Length */
  3059. if (!wdata->mr)
  3060. req->Length = cpu_to_le32(wdata->bytes);
  3061. #else
  3062. req->Length = cpu_to_le32(wdata->bytes);
  3063. #endif
  3064. if (wdata->credits) {
  3065. shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
  3066. SMB2_MAX_BUFFER_SIZE));
  3067. shdr->CreditRequest =
  3068. cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
  3069. spin_lock(&server->req_lock);
  3070. server->credits += wdata->credits -
  3071. le16_to_cpu(shdr->CreditCharge);
  3072. spin_unlock(&server->req_lock);
  3073. wake_up(&server->request_q);
  3074. wdata->credits = le16_to_cpu(shdr->CreditCharge);
  3075. flags |= CIFS_HAS_CREDITS;
  3076. }
  3077. kref_get(&wdata->refcount);
  3078. rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
  3079. wdata, flags);
  3080. if (rc) {
  3081. trace_smb3_write_err(0 /* no xid */, req->PersistentFileId,
  3082. tcon->tid, tcon->ses->Suid, wdata->offset,
  3083. wdata->bytes, rc);
  3084. kref_put(&wdata->refcount, release);
  3085. cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
  3086. }
  3087. async_writev_out:
  3088. cifs_small_buf_release(req);
  3089. return rc;
  3090. }
  3091. /*
  3092. * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
  3093. * The length field from io_parms must be at least 1 and indicates a number of
  3094. * elements with data to write that begins with position 1 in iov array. All
  3095. * data length is specified by count.
  3096. */
  3097. int
  3098. SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
  3099. unsigned int *nbytes, struct kvec *iov, int n_vec)
  3100. {
  3101. struct smb_rqst rqst;
  3102. int rc = 0;
  3103. struct smb2_write_req *req = NULL;
  3104. struct smb2_write_rsp *rsp = NULL;
  3105. int resp_buftype;
  3106. struct kvec rsp_iov;
  3107. int flags = 0;
  3108. unsigned int total_len;
  3109. *nbytes = 0;
  3110. if (n_vec < 1)
  3111. return rc;
  3112. rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
  3113. &total_len);
  3114. if (rc)
  3115. return rc;
  3116. if (io_parms->tcon->ses->server == NULL)
  3117. return -ECONNABORTED;
  3118. if (smb3_encryption_required(io_parms->tcon))
  3119. flags |= CIFS_TRANSFORM_REQ;
  3120. req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
  3121. req->PersistentFileId = io_parms->persistent_fid;
  3122. req->VolatileFileId = io_parms->volatile_fid;
  3123. req->WriteChannelInfoOffset = 0;
  3124. req->WriteChannelInfoLength = 0;
  3125. req->Channel = 0;
  3126. req->Length = cpu_to_le32(io_parms->length);
  3127. req->Offset = cpu_to_le64(io_parms->offset);
  3128. req->DataOffset = cpu_to_le16(
  3129. offsetof(struct smb2_write_req, Buffer));
  3130. req->RemainingBytes = 0;
  3131. iov[0].iov_base = (char *)req;
  3132. /* 1 for Buffer */
  3133. iov[0].iov_len = total_len - 1;
  3134. memset(&rqst, 0, sizeof(struct smb_rqst));
  3135. rqst.rq_iov = iov;
  3136. rqst.rq_nvec = n_vec + 1;
  3137. rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
  3138. &resp_buftype, flags, &rsp_iov);
  3139. rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
  3140. if (rc) {
  3141. trace_smb3_write_err(xid, req->PersistentFileId,
  3142. io_parms->tcon->tid,
  3143. io_parms->tcon->ses->Suid,
  3144. io_parms->offset, io_parms->length, rc);
  3145. cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
  3146. cifs_dbg(VFS, "Send error in write = %d\n", rc);
  3147. } else {
  3148. *nbytes = le32_to_cpu(rsp->DataLength);
  3149. trace_smb3_write_done(xid, req->PersistentFileId,
  3150. io_parms->tcon->tid,
  3151. io_parms->tcon->ses->Suid,
  3152. io_parms->offset, *nbytes);
  3153. }
  3154. cifs_small_buf_release(req);
  3155. free_rsp_buf(resp_buftype, rsp);
  3156. return rc;
  3157. }
  3158. static unsigned int
  3159. num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
  3160. {
  3161. int len;
  3162. unsigned int entrycount = 0;
  3163. unsigned int next_offset = 0;
  3164. char *entryptr;
  3165. FILE_DIRECTORY_INFO *dir_info;
  3166. if (bufstart == NULL)
  3167. return 0;
  3168. entryptr = bufstart;
  3169. while (1) {
  3170. if (entryptr + next_offset < entryptr ||
  3171. entryptr + next_offset > end_of_buf ||
  3172. entryptr + next_offset + size > end_of_buf) {
  3173. cifs_dbg(VFS, "malformed search entry would overflow\n");
  3174. break;
  3175. }
  3176. entryptr = entryptr + next_offset;
  3177. dir_info = (FILE_DIRECTORY_INFO *)entryptr;
  3178. len = le32_to_cpu(dir_info->FileNameLength);
  3179. if (entryptr + len < entryptr ||
  3180. entryptr + len > end_of_buf ||
  3181. entryptr + len + size > end_of_buf) {
  3182. cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
  3183. end_of_buf);
  3184. break;
  3185. }
  3186. *lastentry = entryptr;
  3187. entrycount++;
  3188. next_offset = le32_to_cpu(dir_info->NextEntryOffset);
  3189. if (!next_offset)
  3190. break;
  3191. }
  3192. return entrycount;
  3193. }
  3194. /*
  3195. * Readdir/FindFirst
  3196. */
  3197. int
  3198. SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
  3199. u64 persistent_fid, u64 volatile_fid, int index,
  3200. struct cifs_search_info *srch_inf)
  3201. {
  3202. struct smb_rqst rqst;
  3203. struct smb2_query_directory_req *req;
  3204. struct smb2_query_directory_rsp *rsp = NULL;
  3205. struct kvec iov[2];
  3206. struct kvec rsp_iov;
  3207. int rc = 0;
  3208. int len;
  3209. int resp_buftype = CIFS_NO_BUFFER;
  3210. unsigned char *bufptr;
  3211. struct TCP_Server_Info *server;
  3212. struct cifs_ses *ses = tcon->ses;
  3213. __le16 asteriks = cpu_to_le16('*');
  3214. char *end_of_smb;
  3215. unsigned int output_size = CIFSMaxBufSize;
  3216. size_t info_buf_size;
  3217. int flags = 0;
  3218. unsigned int total_len;
  3219. if (ses && (ses->server))
  3220. server = ses->server;
  3221. else
  3222. return -EIO;
  3223. rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
  3224. &total_len);
  3225. if (rc)
  3226. return rc;
  3227. if (smb3_encryption_required(tcon))
  3228. flags |= CIFS_TRANSFORM_REQ;
  3229. switch (srch_inf->info_level) {
  3230. case SMB_FIND_FILE_DIRECTORY_INFO:
  3231. req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
  3232. info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
  3233. break;
  3234. case SMB_FIND_FILE_ID_FULL_DIR_INFO:
  3235. req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
  3236. info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
  3237. break;
  3238. default:
  3239. cifs_dbg(VFS, "info level %u isn't supported\n",
  3240. srch_inf->info_level);
  3241. rc = -EINVAL;
  3242. goto qdir_exit;
  3243. }
  3244. req->FileIndex = cpu_to_le32(index);
  3245. req->PersistentFileId = persistent_fid;
  3246. req->VolatileFileId = volatile_fid;
  3247. len = 0x2;
  3248. bufptr = req->Buffer;
  3249. memcpy(bufptr, &asteriks, len);
  3250. req->FileNameOffset =
  3251. cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
  3252. req->FileNameLength = cpu_to_le16(len);
  3253. /*
  3254. * BB could be 30 bytes or so longer if we used SMB2 specific
  3255. * buffer lengths, but this is safe and close enough.
  3256. */
  3257. output_size = min_t(unsigned int, output_size, server->maxBuf);
  3258. output_size = min_t(unsigned int, output_size, 2 << 15);
  3259. req->OutputBufferLength = cpu_to_le32(output_size);
  3260. iov[0].iov_base = (char *)req;
  3261. /* 1 for Buffer */
  3262. iov[0].iov_len = total_len - 1;
  3263. iov[1].iov_base = (char *)(req->Buffer);
  3264. iov[1].iov_len = len;
  3265. memset(&rqst, 0, sizeof(struct smb_rqst));
  3266. rqst.rq_iov = iov;
  3267. rqst.rq_nvec = 2;
  3268. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  3269. cifs_small_buf_release(req);
  3270. rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
  3271. if (rc) {
  3272. if (rc == -ENODATA &&
  3273. rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
  3274. srch_inf->endOfSearch = true;
  3275. rc = 0;
  3276. } else
  3277. cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
  3278. goto qdir_exit;
  3279. }
  3280. rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
  3281. le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
  3282. info_buf_size);
  3283. if (rc)
  3284. goto qdir_exit;
  3285. srch_inf->unicode = true;
  3286. if (srch_inf->ntwrk_buf_start) {
  3287. if (srch_inf->smallBuf)
  3288. cifs_small_buf_release(srch_inf->ntwrk_buf_start);
  3289. else
  3290. cifs_buf_release(srch_inf->ntwrk_buf_start);
  3291. }
  3292. srch_inf->ntwrk_buf_start = (char *)rsp;
  3293. srch_inf->srch_entries_start = srch_inf->last_entry =
  3294. (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
  3295. end_of_smb = rsp_iov.iov_len + (char *)rsp;
  3296. srch_inf->entries_in_buffer =
  3297. num_entries(srch_inf->srch_entries_start, end_of_smb,
  3298. &srch_inf->last_entry, info_buf_size);
  3299. srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
  3300. cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
  3301. srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
  3302. srch_inf->srch_entries_start, srch_inf->last_entry);
  3303. if (resp_buftype == CIFS_LARGE_BUFFER)
  3304. srch_inf->smallBuf = false;
  3305. else if (resp_buftype == CIFS_SMALL_BUFFER)
  3306. srch_inf->smallBuf = true;
  3307. else
  3308. cifs_dbg(VFS, "illegal search buffer type\n");
  3309. return rc;
  3310. qdir_exit:
  3311. free_rsp_buf(resp_buftype, rsp);
  3312. return rc;
  3313. }
  3314. static int
  3315. send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  3316. u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
  3317. u8 info_type, u32 additional_info, unsigned int num,
  3318. void **data, unsigned int *size)
  3319. {
  3320. struct smb_rqst rqst;
  3321. struct smb2_set_info_req *req;
  3322. struct smb2_set_info_rsp *rsp = NULL;
  3323. struct kvec *iov;
  3324. struct kvec rsp_iov;
  3325. int rc = 0;
  3326. int resp_buftype;
  3327. unsigned int i;
  3328. struct cifs_ses *ses = tcon->ses;
  3329. int flags = 0;
  3330. unsigned int total_len;
  3331. if (!ses || !(ses->server))
  3332. return -EIO;
  3333. if (!num)
  3334. return -EINVAL;
  3335. iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
  3336. if (!iov)
  3337. return -ENOMEM;
  3338. rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
  3339. if (rc) {
  3340. kfree(iov);
  3341. return rc;
  3342. }
  3343. if (smb3_encryption_required(tcon))
  3344. flags |= CIFS_TRANSFORM_REQ;
  3345. req->sync_hdr.ProcessId = cpu_to_le32(pid);
  3346. req->InfoType = info_type;
  3347. req->FileInfoClass = info_class;
  3348. req->PersistentFileId = persistent_fid;
  3349. req->VolatileFileId = volatile_fid;
  3350. req->AdditionalInformation = cpu_to_le32(additional_info);
  3351. req->BufferOffset =
  3352. cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
  3353. req->BufferLength = cpu_to_le32(*size);
  3354. memcpy(req->Buffer, *data, *size);
  3355. total_len += *size;
  3356. iov[0].iov_base = (char *)req;
  3357. /* 1 for Buffer */
  3358. iov[0].iov_len = total_len - 1;
  3359. for (i = 1; i < num; i++) {
  3360. le32_add_cpu(&req->BufferLength, size[i]);
  3361. iov[i].iov_base = (char *)data[i];
  3362. iov[i].iov_len = size[i];
  3363. }
  3364. memset(&rqst, 0, sizeof(struct smb_rqst));
  3365. rqst.rq_iov = iov;
  3366. rqst.rq_nvec = num;
  3367. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
  3368. &rsp_iov);
  3369. cifs_buf_release(req);
  3370. rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
  3371. if (rc != 0) {
  3372. cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
  3373. trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
  3374. ses->Suid, info_class, (__u32)info_type, rc);
  3375. }
  3376. free_rsp_buf(resp_buftype, rsp);
  3377. kfree(iov);
  3378. return rc;
  3379. }
  3380. int
  3381. SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
  3382. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  3383. {
  3384. struct smb2_file_rename_info info;
  3385. void **data;
  3386. unsigned int size[2];
  3387. int rc;
  3388. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  3389. data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
  3390. if (!data)
  3391. return -ENOMEM;
  3392. info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
  3393. /* 0 = fail if target already exists */
  3394. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  3395. info.FileNameLength = cpu_to_le32(len);
  3396. data[0] = &info;
  3397. size[0] = sizeof(struct smb2_file_rename_info);
  3398. data[1] = target_file;
  3399. size[1] = len + 2 /* null */;
  3400. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3401. current->tgid, FILE_RENAME_INFORMATION, SMB2_O_INFO_FILE,
  3402. 0, 2, data, size);
  3403. kfree(data);
  3404. return rc;
  3405. }
  3406. int
  3407. SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
  3408. u64 persistent_fid, u64 volatile_fid)
  3409. {
  3410. __u8 delete_pending = 1;
  3411. void *data;
  3412. unsigned int size;
  3413. data = &delete_pending;
  3414. size = 1; /* sizeof __u8 */
  3415. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3416. current->tgid, FILE_DISPOSITION_INFORMATION, SMB2_O_INFO_FILE,
  3417. 0, 1, &data, &size);
  3418. }
  3419. int
  3420. SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
  3421. u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
  3422. {
  3423. struct smb2_file_link_info info;
  3424. void **data;
  3425. unsigned int size[2];
  3426. int rc;
  3427. int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
  3428. data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
  3429. if (!data)
  3430. return -ENOMEM;
  3431. info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
  3432. /* 0 = fail if link already exists */
  3433. info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
  3434. info.FileNameLength = cpu_to_le32(len);
  3435. data[0] = &info;
  3436. size[0] = sizeof(struct smb2_file_link_info);
  3437. data[1] = target_file;
  3438. size[1] = len + 2 /* null */;
  3439. rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3440. current->tgid, FILE_LINK_INFORMATION, SMB2_O_INFO_FILE,
  3441. 0, 2, data, size);
  3442. kfree(data);
  3443. return rc;
  3444. }
  3445. int
  3446. SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  3447. u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
  3448. {
  3449. struct smb2_file_eof_info info;
  3450. void *data;
  3451. unsigned int size;
  3452. info.EndOfFile = *eof;
  3453. data = &info;
  3454. size = sizeof(struct smb2_file_eof_info);
  3455. if (is_falloc)
  3456. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3457. pid, FILE_ALLOCATION_INFORMATION, SMB2_O_INFO_FILE,
  3458. 0, 1, &data, &size);
  3459. else
  3460. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3461. pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
  3462. 0, 1, &data, &size);
  3463. }
  3464. int
  3465. SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
  3466. u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
  3467. {
  3468. unsigned int size;
  3469. size = sizeof(FILE_BASIC_INFO);
  3470. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3471. current->tgid, FILE_BASIC_INFORMATION, SMB2_O_INFO_FILE,
  3472. 0, 1, (void **)&buf, &size);
  3473. }
  3474. int
  3475. SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
  3476. u64 persistent_fid, u64 volatile_fid,
  3477. struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
  3478. {
  3479. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3480. current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
  3481. 1, (void **)&pnntsd, &pacllen);
  3482. }
  3483. int
  3484. SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
  3485. u64 persistent_fid, u64 volatile_fid,
  3486. struct smb2_file_full_ea_info *buf, int len)
  3487. {
  3488. return send_set_info(xid, tcon, persistent_fid, volatile_fid,
  3489. current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
  3490. 0, 1, (void **)&buf, &len);
  3491. }
  3492. int
  3493. SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
  3494. const u64 persistent_fid, const u64 volatile_fid,
  3495. __u8 oplock_level)
  3496. {
  3497. struct smb_rqst rqst;
  3498. int rc;
  3499. struct smb2_oplock_break *req = NULL;
  3500. struct cifs_ses *ses = tcon->ses;
  3501. int flags = CIFS_OBREAK_OP;
  3502. unsigned int total_len;
  3503. struct kvec iov[1];
  3504. struct kvec rsp_iov;
  3505. int resp_buf_type;
  3506. cifs_dbg(FYI, "SMB2_oplock_break\n");
  3507. rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
  3508. &total_len);
  3509. if (rc)
  3510. return rc;
  3511. if (smb3_encryption_required(tcon))
  3512. flags |= CIFS_TRANSFORM_REQ;
  3513. req->VolatileFid = volatile_fid;
  3514. req->PersistentFid = persistent_fid;
  3515. req->OplockLevel = oplock_level;
  3516. req->sync_hdr.CreditRequest = cpu_to_le16(1);
  3517. flags |= CIFS_NO_RESP;
  3518. iov[0].iov_base = (char *)req;
  3519. iov[0].iov_len = total_len;
  3520. memset(&rqst, 0, sizeof(struct smb_rqst));
  3521. rqst.rq_iov = iov;
  3522. rqst.rq_nvec = 1;
  3523. rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
  3524. cifs_small_buf_release(req);
  3525. if (rc) {
  3526. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  3527. cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
  3528. }
  3529. return rc;
  3530. }
  3531. void
  3532. smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
  3533. struct kstatfs *kst)
  3534. {
  3535. kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
  3536. le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
  3537. kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
  3538. kst->f_bfree = kst->f_bavail =
  3539. le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
  3540. return;
  3541. }
  3542. static void
  3543. copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
  3544. struct kstatfs *kst)
  3545. {
  3546. kst->f_bsize = le32_to_cpu(response_data->BlockSize);
  3547. kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
  3548. kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
  3549. if (response_data->UserBlocksAvail == cpu_to_le64(-1))
  3550. kst->f_bavail = kst->f_bfree;
  3551. else
  3552. kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
  3553. if (response_data->TotalFileNodes != cpu_to_le64(-1))
  3554. kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
  3555. if (response_data->FreeFileNodes != cpu_to_le64(-1))
  3556. kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
  3557. return;
  3558. }
  3559. static int
  3560. build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
  3561. int outbuf_len, u64 persistent_fid, u64 volatile_fid)
  3562. {
  3563. struct TCP_Server_Info *server;
  3564. int rc;
  3565. struct smb2_query_info_req *req;
  3566. unsigned int total_len;
  3567. cifs_dbg(FYI, "Query FSInfo level %d\n", level);
  3568. if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
  3569. return -EIO;
  3570. server = tcon->ses->server;
  3571. rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
  3572. &total_len);
  3573. if (rc)
  3574. return rc;
  3575. req->InfoType = SMB2_O_INFO_FILESYSTEM;
  3576. req->FileInfoClass = level;
  3577. req->PersistentFileId = persistent_fid;
  3578. req->VolatileFileId = volatile_fid;
  3579. /* 1 for pad */
  3580. req->InputBufferOffset =
  3581. cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
  3582. req->OutputBufferLength = cpu_to_le32(
  3583. outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
  3584. iov->iov_base = (char *)req;
  3585. iov->iov_len = total_len;
  3586. return 0;
  3587. }
  3588. int
  3589. SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
  3590. u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
  3591. {
  3592. struct smb_rqst rqst;
  3593. struct smb2_query_info_rsp *rsp = NULL;
  3594. struct kvec iov;
  3595. struct kvec rsp_iov;
  3596. int rc = 0;
  3597. int resp_buftype;
  3598. struct cifs_ses *ses = tcon->ses;
  3599. FILE_SYSTEM_POSIX_INFO *info = NULL;
  3600. int flags = 0;
  3601. rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
  3602. sizeof(FILE_SYSTEM_POSIX_INFO),
  3603. persistent_fid, volatile_fid);
  3604. if (rc)
  3605. return rc;
  3606. if (smb3_encryption_required(tcon))
  3607. flags |= CIFS_TRANSFORM_REQ;
  3608. memset(&rqst, 0, sizeof(struct smb_rqst));
  3609. rqst.rq_iov = &iov;
  3610. rqst.rq_nvec = 1;
  3611. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  3612. cifs_small_buf_release(iov.iov_base);
  3613. if (rc) {
  3614. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  3615. goto posix_qfsinf_exit;
  3616. }
  3617. rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
  3618. info = (FILE_SYSTEM_POSIX_INFO *)(
  3619. le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
  3620. rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
  3621. le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
  3622. sizeof(FILE_SYSTEM_POSIX_INFO));
  3623. if (!rc)
  3624. copy_posix_fs_info_to_kstatfs(info, fsdata);
  3625. posix_qfsinf_exit:
  3626. free_rsp_buf(resp_buftype, rsp_iov.iov_base);
  3627. return rc;
  3628. }
  3629. int
  3630. SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
  3631. u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
  3632. {
  3633. struct smb_rqst rqst;
  3634. struct smb2_query_info_rsp *rsp = NULL;
  3635. struct kvec iov;
  3636. struct kvec rsp_iov;
  3637. int rc = 0;
  3638. int resp_buftype;
  3639. struct cifs_ses *ses = tcon->ses;
  3640. struct smb2_fs_full_size_info *info = NULL;
  3641. int flags = 0;
  3642. rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
  3643. sizeof(struct smb2_fs_full_size_info),
  3644. persistent_fid, volatile_fid);
  3645. if (rc)
  3646. return rc;
  3647. if (smb3_encryption_required(tcon))
  3648. flags |= CIFS_TRANSFORM_REQ;
  3649. memset(&rqst, 0, sizeof(struct smb_rqst));
  3650. rqst.rq_iov = &iov;
  3651. rqst.rq_nvec = 1;
  3652. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  3653. cifs_small_buf_release(iov.iov_base);
  3654. if (rc) {
  3655. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  3656. goto qfsinf_exit;
  3657. }
  3658. rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
  3659. info = (struct smb2_fs_full_size_info *)(
  3660. le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
  3661. rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
  3662. le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
  3663. sizeof(struct smb2_fs_full_size_info));
  3664. if (!rc)
  3665. smb2_copy_fs_info_to_kstatfs(info, fsdata);
  3666. qfsinf_exit:
  3667. free_rsp_buf(resp_buftype, rsp_iov.iov_base);
  3668. return rc;
  3669. }
  3670. int
  3671. SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
  3672. u64 persistent_fid, u64 volatile_fid, int level)
  3673. {
  3674. struct smb_rqst rqst;
  3675. struct smb2_query_info_rsp *rsp = NULL;
  3676. struct kvec iov;
  3677. struct kvec rsp_iov;
  3678. int rc = 0;
  3679. int resp_buftype, max_len, min_len;
  3680. struct cifs_ses *ses = tcon->ses;
  3681. unsigned int rsp_len, offset;
  3682. int flags = 0;
  3683. if (level == FS_DEVICE_INFORMATION) {
  3684. max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
  3685. min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
  3686. } else if (level == FS_ATTRIBUTE_INFORMATION) {
  3687. max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
  3688. min_len = MIN_FS_ATTR_INFO_SIZE;
  3689. } else if (level == FS_SECTOR_SIZE_INFORMATION) {
  3690. max_len = sizeof(struct smb3_fs_ss_info);
  3691. min_len = sizeof(struct smb3_fs_ss_info);
  3692. } else if (level == FS_VOLUME_INFORMATION) {
  3693. max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
  3694. min_len = sizeof(struct smb3_fs_vol_info);
  3695. } else {
  3696. cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
  3697. return -EINVAL;
  3698. }
  3699. rc = build_qfs_info_req(&iov, tcon, level, max_len,
  3700. persistent_fid, volatile_fid);
  3701. if (rc)
  3702. return rc;
  3703. if (smb3_encryption_required(tcon))
  3704. flags |= CIFS_TRANSFORM_REQ;
  3705. memset(&rqst, 0, sizeof(struct smb_rqst));
  3706. rqst.rq_iov = &iov;
  3707. rqst.rq_nvec = 1;
  3708. rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
  3709. cifs_small_buf_release(iov.iov_base);
  3710. if (rc) {
  3711. cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
  3712. goto qfsattr_exit;
  3713. }
  3714. rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
  3715. rsp_len = le32_to_cpu(rsp->OutputBufferLength);
  3716. offset = le16_to_cpu(rsp->OutputBufferOffset);
  3717. rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
  3718. if (rc)
  3719. goto qfsattr_exit;
  3720. if (level == FS_ATTRIBUTE_INFORMATION)
  3721. memcpy(&tcon->fsAttrInfo, offset
  3722. + (char *)rsp, min_t(unsigned int,
  3723. rsp_len, max_len));
  3724. else if (level == FS_DEVICE_INFORMATION)
  3725. memcpy(&tcon->fsDevInfo, offset
  3726. + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
  3727. else if (level == FS_SECTOR_SIZE_INFORMATION) {
  3728. struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
  3729. (offset + (char *)rsp);
  3730. tcon->ss_flags = le32_to_cpu(ss_info->Flags);
  3731. tcon->perf_sector_size =
  3732. le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
  3733. } else if (level == FS_VOLUME_INFORMATION) {
  3734. struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
  3735. (offset + (char *)rsp);
  3736. tcon->vol_serial_number = vol_info->VolumeSerialNumber;
  3737. tcon->vol_create_time = vol_info->VolumeCreationTime;
  3738. }
  3739. qfsattr_exit:
  3740. free_rsp_buf(resp_buftype, rsp_iov.iov_base);
  3741. return rc;
  3742. }
  3743. int
  3744. smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
  3745. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  3746. const __u32 num_lock, struct smb2_lock_element *buf)
  3747. {
  3748. struct smb_rqst rqst;
  3749. int rc = 0;
  3750. struct smb2_lock_req *req = NULL;
  3751. struct kvec iov[2];
  3752. struct kvec rsp_iov;
  3753. int resp_buf_type;
  3754. unsigned int count;
  3755. int flags = CIFS_NO_RESP;
  3756. unsigned int total_len;
  3757. cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
  3758. rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
  3759. if (rc)
  3760. return rc;
  3761. if (smb3_encryption_required(tcon))
  3762. flags |= CIFS_TRANSFORM_REQ;
  3763. req->sync_hdr.ProcessId = cpu_to_le32(pid);
  3764. req->LockCount = cpu_to_le16(num_lock);
  3765. req->PersistentFileId = persist_fid;
  3766. req->VolatileFileId = volatile_fid;
  3767. count = num_lock * sizeof(struct smb2_lock_element);
  3768. iov[0].iov_base = (char *)req;
  3769. iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
  3770. iov[1].iov_base = (char *)buf;
  3771. iov[1].iov_len = count;
  3772. cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
  3773. memset(&rqst, 0, sizeof(struct smb_rqst));
  3774. rqst.rq_iov = iov;
  3775. rqst.rq_nvec = 2;
  3776. rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
  3777. &rsp_iov);
  3778. cifs_small_buf_release(req);
  3779. if (rc) {
  3780. cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
  3781. cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
  3782. trace_smb3_lock_err(xid, persist_fid, tcon->tid,
  3783. tcon->ses->Suid, rc);
  3784. }
  3785. return rc;
  3786. }
  3787. int
  3788. SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
  3789. const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
  3790. const __u64 length, const __u64 offset, const __u32 lock_flags,
  3791. const bool wait)
  3792. {
  3793. struct smb2_lock_element lock;
  3794. lock.Offset = cpu_to_le64(offset);
  3795. lock.Length = cpu_to_le64(length);
  3796. lock.Flags = cpu_to_le32(lock_flags);
  3797. if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
  3798. lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
  3799. return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
  3800. }
  3801. int
  3802. SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
  3803. __u8 *lease_key, const __le32 lease_state)
  3804. {
  3805. struct smb_rqst rqst;
  3806. int rc;
  3807. struct smb2_lease_ack *req = NULL;
  3808. struct cifs_ses *ses = tcon->ses;
  3809. int flags = CIFS_OBREAK_OP;
  3810. unsigned int total_len;
  3811. struct kvec iov[1];
  3812. struct kvec rsp_iov;
  3813. int resp_buf_type;
  3814. cifs_dbg(FYI, "SMB2_lease_break\n");
  3815. rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
  3816. &total_len);
  3817. if (rc)
  3818. return rc;
  3819. if (smb3_encryption_required(tcon))
  3820. flags |= CIFS_TRANSFORM_REQ;
  3821. req->sync_hdr.CreditRequest = cpu_to_le16(1);
  3822. req->StructureSize = cpu_to_le16(36);
  3823. total_len += 12;
  3824. memcpy(req->LeaseKey, lease_key, 16);
  3825. req->LeaseState = lease_state;
  3826. flags |= CIFS_NO_RESP;
  3827. iov[0].iov_base = (char *)req;
  3828. iov[0].iov_len = total_len;
  3829. memset(&rqst, 0, sizeof(struct smb_rqst));
  3830. rqst.rq_iov = iov;
  3831. rqst.rq_nvec = 1;
  3832. rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
  3833. cifs_small_buf_release(req);
  3834. if (rc) {
  3835. cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
  3836. cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
  3837. }
  3838. return rc;
  3839. }