dquot.c 82 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Implementation of the diskquota system for the LINUX operating system. QUOTA
  4. * is implemented using the BSD system call interface as the means of
  5. * communication with the user level. This file contains the generic routines
  6. * called by the different filesystems on allocation of an inode or block.
  7. * These routines take care of the administration needed to have a consistent
  8. * diskquota tracking system. The ideas of both user and group quotas are based
  9. * on the Melbourne quota system as used on BSD derived systems. The internal
  10. * implementation is based on one of the several variants of the LINUX
  11. * inode-subsystem with added complexity of the diskquota system.
  12. *
  13. * Author: Marco van Wieringen <mvw@planets.elm.net>
  14. *
  15. * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
  16. *
  17. * Revised list management to avoid races
  18. * -- Bill Hawes, <whawes@star.net>, 9/98
  19. *
  20. * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
  21. * As the consequence the locking was moved from dquot_decr_...(),
  22. * dquot_incr_...() to calling functions.
  23. * invalidate_dquots() now writes modified dquots.
  24. * Serialized quota_off() and quota_on() for mount point.
  25. * Fixed a few bugs in grow_dquots().
  26. * Fixed deadlock in write_dquot() - we no longer account quotas on
  27. * quota files
  28. * remove_dquot_ref() moved to inode.c - it now traverses through inodes
  29. * add_dquot_ref() restarts after blocking
  30. * Added check for bogus uid and fixed check for group in quotactl.
  31. * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
  32. *
  33. * Used struct list_head instead of own list struct
  34. * Invalidation of referenced dquots is no longer possible
  35. * Improved free_dquots list management
  36. * Quota and i_blocks are now updated in one place to avoid races
  37. * Warnings are now delayed so we won't block in critical section
  38. * Write updated not to require dquot lock
  39. * Jan Kara, <jack@suse.cz>, 9/2000
  40. *
  41. * Added dynamic quota structure allocation
  42. * Jan Kara <jack@suse.cz> 12/2000
  43. *
  44. * Rewritten quota interface. Implemented new quota format and
  45. * formats registering.
  46. * Jan Kara, <jack@suse.cz>, 2001,2002
  47. *
  48. * New SMP locking.
  49. * Jan Kara, <jack@suse.cz>, 10/2002
  50. *
  51. * Added journalled quota support, fix lock inversion problems
  52. * Jan Kara, <jack@suse.cz>, 2003,2004
  53. *
  54. * (C) Copyright 1994 - 1997 Marco van Wieringen
  55. */
  56. #include <linux/errno.h>
  57. #include <linux/kernel.h>
  58. #include <linux/fs.h>
  59. #include <linux/mount.h>
  60. #include <linux/mm.h>
  61. #include <linux/time.h>
  62. #include <linux/types.h>
  63. #include <linux/string.h>
  64. #include <linux/fcntl.h>
  65. #include <linux/stat.h>
  66. #include <linux/tty.h>
  67. #include <linux/file.h>
  68. #include <linux/slab.h>
  69. #include <linux/sysctl.h>
  70. #include <linux/init.h>
  71. #include <linux/module.h>
  72. #include <linux/proc_fs.h>
  73. #include <linux/security.h>
  74. #include <linux/sched.h>
  75. #include <linux/cred.h>
  76. #include <linux/kmod.h>
  77. #include <linux/namei.h>
  78. #include <linux/capability.h>
  79. #include <linux/quotaops.h>
  80. #include <linux/blkdev.h>
  81. #include <linux/sched/mm.h>
  82. #include "../internal.h" /* ugh */
  83. #include <linux/uaccess.h>
  84. /*
  85. * There are five quota SMP locks:
  86. * * dq_list_lock protects all lists with quotas and quota formats.
  87. * * dquot->dq_dqb_lock protects data from dq_dqb
  88. * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
  89. * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
  90. * dquot_transfer() can stabilize amount it transfers
  91. * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
  92. * pointers in the inode
  93. * * dq_state_lock protects modifications of quota state (on quotaon and
  94. * quotaoff) and readers who care about latest values take it as well.
  95. *
  96. * The spinlock ordering is hence:
  97. * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
  98. * dq_list_lock > dq_state_lock
  99. *
  100. * Note that some things (eg. sb pointer, type, id) doesn't change during
  101. * the life of the dquot structure and so needn't to be protected by a lock
  102. *
  103. * Operation accessing dquots via inode pointers are protected by dquot_srcu.
  104. * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
  105. * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
  106. * inode and before dropping dquot references to avoid use of dquots after
  107. * they are freed. dq_data_lock is used to serialize the pointer setting and
  108. * clearing operations.
  109. * Special care needs to be taken about S_NOQUOTA inode flag (marking that
  110. * inode is a quota file). Functions adding pointers from inode to dquots have
  111. * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
  112. * have to do all pointer modifications before dropping dq_data_lock. This makes
  113. * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
  114. * then drops all pointers to dquots from an inode.
  115. *
  116. * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
  117. * memory (or space for it is being allocated) on the first dqget(), when it is
  118. * being written out, and when it is being released on the last dqput(). The
  119. * allocation and release operations are serialized by the dq_lock and by
  120. * checking the use count in dquot_release().
  121. *
  122. * Lock ordering (including related VFS locks) is the following:
  123. * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
  124. */
  125. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
  126. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
  127. __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
  128. EXPORT_SYMBOL(dq_data_lock);
  129. DEFINE_STATIC_SRCU(dquot_srcu);
  130. static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
  131. void __quota_error(struct super_block *sb, const char *func,
  132. const char *fmt, ...)
  133. {
  134. if (printk_ratelimit()) {
  135. va_list args;
  136. struct va_format vaf;
  137. va_start(args, fmt);
  138. vaf.fmt = fmt;
  139. vaf.va = &args;
  140. printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
  141. sb->s_id, func, &vaf);
  142. va_end(args);
  143. }
  144. }
  145. EXPORT_SYMBOL(__quota_error);
  146. #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
  147. static char *quotatypes[] = INITQFNAMES;
  148. #endif
  149. static struct quota_format_type *quota_formats; /* List of registered formats */
  150. static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
  151. /* SLAB cache for dquot structures */
  152. static struct kmem_cache *dquot_cachep;
  153. /* workqueue for work quota_release_work*/
  154. static struct workqueue_struct *quota_unbound_wq;
  155. void register_quota_format(struct quota_format_type *fmt)
  156. {
  157. spin_lock(&dq_list_lock);
  158. fmt->qf_next = quota_formats;
  159. quota_formats = fmt;
  160. spin_unlock(&dq_list_lock);
  161. }
  162. EXPORT_SYMBOL(register_quota_format);
  163. void unregister_quota_format(struct quota_format_type *fmt)
  164. {
  165. struct quota_format_type **actqf;
  166. spin_lock(&dq_list_lock);
  167. for (actqf = &quota_formats; *actqf && *actqf != fmt;
  168. actqf = &(*actqf)->qf_next)
  169. ;
  170. if (*actqf)
  171. *actqf = (*actqf)->qf_next;
  172. spin_unlock(&dq_list_lock);
  173. }
  174. EXPORT_SYMBOL(unregister_quota_format);
  175. static struct quota_format_type *find_quota_format(int id)
  176. {
  177. struct quota_format_type *actqf;
  178. spin_lock(&dq_list_lock);
  179. for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
  180. actqf = actqf->qf_next)
  181. ;
  182. if (!actqf || !try_module_get(actqf->qf_owner)) {
  183. int qm;
  184. spin_unlock(&dq_list_lock);
  185. for (qm = 0; module_names[qm].qm_fmt_id &&
  186. module_names[qm].qm_fmt_id != id; qm++)
  187. ;
  188. if (!module_names[qm].qm_fmt_id ||
  189. request_module(module_names[qm].qm_mod_name))
  190. return NULL;
  191. spin_lock(&dq_list_lock);
  192. for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
  193. actqf = actqf->qf_next)
  194. ;
  195. if (actqf && !try_module_get(actqf->qf_owner))
  196. actqf = NULL;
  197. }
  198. spin_unlock(&dq_list_lock);
  199. return actqf;
  200. }
  201. static void put_quota_format(struct quota_format_type *fmt)
  202. {
  203. module_put(fmt->qf_owner);
  204. }
  205. /*
  206. * Dquot List Management:
  207. * The quota code uses five lists for dquot management: the inuse_list,
  208. * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array.
  209. * A single dquot structure may be on some of those lists, depending on
  210. * its current state.
  211. *
  212. * All dquots are placed to the end of inuse_list when first created, and this
  213. * list is used for invalidate operation, which must look at every dquot.
  214. *
  215. * When the last reference of a dquot is dropped, the dquot is added to
  216. * releasing_dquots. We'll then queue work item which will call
  217. * synchronize_srcu() and after that perform the final cleanup of all the
  218. * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
  219. * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
  220. * struct.
  221. *
  222. * Unused and cleaned up dquots are in the free_dquots list and this list is
  223. * searched whenever we need an available dquot. Dquots are removed from the
  224. * list as soon as they are used again and dqstats.free_dquots gives the number
  225. * of dquots on the list. When dquot is invalidated it's completely released
  226. * from memory.
  227. *
  228. * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
  229. * dirtied, and this list is searched when writing dirty dquots back to
  230. * quota file. Note that some filesystems do dirty dquot tracking on their
  231. * own (e.g. in a journal) and thus don't use dqi_dirty_list.
  232. *
  233. * Dquots with a specific identity (device, type and id) are placed on
  234. * one of the dquot_hash[] hash chains. The provides an efficient search
  235. * mechanism to locate a specific dquot.
  236. */
  237. static LIST_HEAD(inuse_list);
  238. static LIST_HEAD(free_dquots);
  239. static LIST_HEAD(releasing_dquots);
  240. static unsigned int dq_hash_bits, dq_hash_mask;
  241. static struct hlist_head *dquot_hash;
  242. struct dqstats dqstats;
  243. EXPORT_SYMBOL(dqstats);
  244. static qsize_t inode_get_rsv_space(struct inode *inode);
  245. static qsize_t __inode_get_rsv_space(struct inode *inode);
  246. static int __dquot_initialize(struct inode *inode, int type);
  247. static void quota_release_workfn(struct work_struct *work);
  248. static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn);
  249. static inline unsigned int
  250. hashfn(const struct super_block *sb, struct kqid qid)
  251. {
  252. unsigned int id = from_kqid(&init_user_ns, qid);
  253. int type = qid.type;
  254. unsigned long tmp;
  255. tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
  256. return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
  257. }
  258. /*
  259. * Following list functions expect dq_list_lock to be held
  260. */
  261. static inline void insert_dquot_hash(struct dquot *dquot)
  262. {
  263. struct hlist_head *head;
  264. head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
  265. hlist_add_head(&dquot->dq_hash, head);
  266. }
  267. static inline void remove_dquot_hash(struct dquot *dquot)
  268. {
  269. hlist_del_init(&dquot->dq_hash);
  270. }
  271. static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
  272. struct kqid qid)
  273. {
  274. struct dquot *dquot;
  275. hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash)
  276. if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
  277. return dquot;
  278. return NULL;
  279. }
  280. /* Add a dquot to the tail of the free list */
  281. static inline void put_dquot_last(struct dquot *dquot)
  282. {
  283. list_add_tail(&dquot->dq_free, &free_dquots);
  284. dqstats_inc(DQST_FREE_DQUOTS);
  285. }
  286. static inline void put_releasing_dquots(struct dquot *dquot)
  287. {
  288. list_add_tail(&dquot->dq_free, &releasing_dquots);
  289. set_bit(DQ_RELEASING_B, &dquot->dq_flags);
  290. }
  291. static inline void remove_free_dquot(struct dquot *dquot)
  292. {
  293. if (list_empty(&dquot->dq_free))
  294. return;
  295. list_del_init(&dquot->dq_free);
  296. if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
  297. dqstats_dec(DQST_FREE_DQUOTS);
  298. else
  299. clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
  300. }
  301. static inline void put_inuse(struct dquot *dquot)
  302. {
  303. /* We add to the back of inuse list so we don't have to restart
  304. * when traversing this list and we block */
  305. list_add_tail(&dquot->dq_inuse, &inuse_list);
  306. dqstats_inc(DQST_ALLOC_DQUOTS);
  307. }
  308. static inline void remove_inuse(struct dquot *dquot)
  309. {
  310. dqstats_dec(DQST_ALLOC_DQUOTS);
  311. list_del(&dquot->dq_inuse);
  312. }
  313. /*
  314. * End of list functions needing dq_list_lock
  315. */
  316. static void wait_on_dquot(struct dquot *dquot)
  317. {
  318. mutex_lock(&dquot->dq_lock);
  319. mutex_unlock(&dquot->dq_lock);
  320. }
  321. static inline int dquot_active(struct dquot *dquot)
  322. {
  323. return test_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  324. }
  325. static inline int dquot_dirty(struct dquot *dquot)
  326. {
  327. return test_bit(DQ_MOD_B, &dquot->dq_flags);
  328. }
  329. static inline int mark_dquot_dirty(struct dquot *dquot)
  330. {
  331. return dquot->dq_sb->dq_op->mark_dirty(dquot);
  332. }
  333. /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
  334. int dquot_mark_dquot_dirty(struct dquot *dquot)
  335. {
  336. int ret = 1;
  337. if (!dquot_active(dquot))
  338. return 0;
  339. if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
  340. return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
  341. /* If quota is dirty already, we don't have to acquire dq_list_lock */
  342. if (dquot_dirty(dquot))
  343. return 1;
  344. spin_lock(&dq_list_lock);
  345. if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
  346. list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
  347. info[dquot->dq_id.type].dqi_dirty_list);
  348. ret = 0;
  349. }
  350. spin_unlock(&dq_list_lock);
  351. return ret;
  352. }
  353. EXPORT_SYMBOL(dquot_mark_dquot_dirty);
  354. /* Dirtify all the dquots - this can block when journalling */
  355. static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
  356. {
  357. int ret, err, cnt;
  358. struct dquot *dquot;
  359. ret = err = 0;
  360. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  361. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  362. if (dquot)
  363. /* Even in case of error we have to continue */
  364. ret = mark_dquot_dirty(dquot);
  365. if (!err && ret < 0)
  366. err = ret;
  367. }
  368. return err;
  369. }
  370. static inline void dqput_all(struct dquot **dquot)
  371. {
  372. unsigned int cnt;
  373. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  374. dqput(dquot[cnt]);
  375. }
  376. static inline int clear_dquot_dirty(struct dquot *dquot)
  377. {
  378. if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
  379. return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
  380. spin_lock(&dq_list_lock);
  381. if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
  382. spin_unlock(&dq_list_lock);
  383. return 0;
  384. }
  385. list_del_init(&dquot->dq_dirty);
  386. spin_unlock(&dq_list_lock);
  387. return 1;
  388. }
  389. void mark_info_dirty(struct super_block *sb, int type)
  390. {
  391. spin_lock(&dq_data_lock);
  392. sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
  393. spin_unlock(&dq_data_lock);
  394. }
  395. EXPORT_SYMBOL(mark_info_dirty);
  396. /*
  397. * Read dquot from disk and alloc space for it
  398. */
  399. int dquot_acquire(struct dquot *dquot)
  400. {
  401. int ret = 0, ret2 = 0;
  402. unsigned int memalloc;
  403. struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
  404. mutex_lock(&dquot->dq_lock);
  405. memalloc = memalloc_nofs_save();
  406. if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
  407. ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
  408. if (ret < 0)
  409. goto out_iolock;
  410. }
  411. /* Make sure flags update is visible after dquot has been filled */
  412. smp_mb__before_atomic();
  413. set_bit(DQ_READ_B, &dquot->dq_flags);
  414. /* Instantiate dquot if needed */
  415. if (!dquot_active(dquot) && !dquot->dq_off) {
  416. ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
  417. /* Write the info if needed */
  418. if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
  419. ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
  420. dquot->dq_sb, dquot->dq_id.type);
  421. }
  422. if (ret < 0)
  423. goto out_iolock;
  424. if (ret2 < 0) {
  425. ret = ret2;
  426. goto out_iolock;
  427. }
  428. }
  429. /*
  430. * Make sure flags update is visible after on-disk struct has been
  431. * allocated. Paired with smp_rmb() in dqget().
  432. */
  433. smp_mb__before_atomic();
  434. set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  435. out_iolock:
  436. memalloc_nofs_restore(memalloc);
  437. mutex_unlock(&dquot->dq_lock);
  438. return ret;
  439. }
  440. EXPORT_SYMBOL(dquot_acquire);
  441. /*
  442. * Write dquot to disk
  443. */
  444. int dquot_commit(struct dquot *dquot)
  445. {
  446. int ret = 0;
  447. unsigned int memalloc;
  448. struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
  449. mutex_lock(&dquot->dq_lock);
  450. memalloc = memalloc_nofs_save();
  451. if (!clear_dquot_dirty(dquot))
  452. goto out_lock;
  453. /* Inactive dquot can be only if there was error during read/init
  454. * => we have better not writing it */
  455. if (dquot_active(dquot))
  456. ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
  457. else
  458. ret = -EIO;
  459. out_lock:
  460. memalloc_nofs_restore(memalloc);
  461. mutex_unlock(&dquot->dq_lock);
  462. return ret;
  463. }
  464. EXPORT_SYMBOL(dquot_commit);
  465. /*
  466. * Release dquot
  467. */
  468. int dquot_release(struct dquot *dquot)
  469. {
  470. int ret = 0, ret2 = 0;
  471. unsigned int memalloc;
  472. struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
  473. mutex_lock(&dquot->dq_lock);
  474. memalloc = memalloc_nofs_save();
  475. /* Check whether we are not racing with some other dqget() */
  476. if (dquot_is_busy(dquot))
  477. goto out_dqlock;
  478. if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
  479. ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
  480. /* Write the info */
  481. if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
  482. ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
  483. dquot->dq_sb, dquot->dq_id.type);
  484. }
  485. if (ret >= 0)
  486. ret = ret2;
  487. }
  488. clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  489. out_dqlock:
  490. memalloc_nofs_restore(memalloc);
  491. mutex_unlock(&dquot->dq_lock);
  492. return ret;
  493. }
  494. EXPORT_SYMBOL(dquot_release);
  495. void dquot_destroy(struct dquot *dquot)
  496. {
  497. kmem_cache_free(dquot_cachep, dquot);
  498. }
  499. EXPORT_SYMBOL(dquot_destroy);
  500. static inline void do_destroy_dquot(struct dquot *dquot)
  501. {
  502. dquot->dq_sb->dq_op->destroy_dquot(dquot);
  503. }
  504. /* Invalidate all dquots on the list. Note that this function is called after
  505. * quota is disabled and pointers from inodes removed so there cannot be new
  506. * quota users. There can still be some users of quotas due to inodes being
  507. * just deleted or pruned by prune_icache() (those are not attached to any
  508. * list) or parallel quotactl call. We have to wait for such users.
  509. */
  510. static void invalidate_dquots(struct super_block *sb, int type)
  511. {
  512. struct dquot *dquot, *tmp;
  513. restart:
  514. flush_delayed_work(&quota_release_work);
  515. spin_lock(&dq_list_lock);
  516. list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
  517. if (dquot->dq_sb != sb)
  518. continue;
  519. if (dquot->dq_id.type != type)
  520. continue;
  521. /* Wait for dquot users */
  522. if (atomic_read(&dquot->dq_count)) {
  523. atomic_inc(&dquot->dq_count);
  524. spin_unlock(&dq_list_lock);
  525. /*
  526. * Once dqput() wakes us up, we know it's time to free
  527. * the dquot.
  528. * IMPORTANT: we rely on the fact that there is always
  529. * at most one process waiting for dquot to free.
  530. * Otherwise dq_count would be > 1 and we would never
  531. * wake up.
  532. */
  533. wait_event(dquot_ref_wq,
  534. atomic_read(&dquot->dq_count) == 1);
  535. dqput(dquot);
  536. /* At this moment dquot() need not exist (it could be
  537. * reclaimed by prune_dqcache(). Hence we must
  538. * restart. */
  539. goto restart;
  540. }
  541. /*
  542. * The last user already dropped its reference but dquot didn't
  543. * get fully cleaned up yet. Restart the scan which flushes the
  544. * work cleaning up released dquots.
  545. */
  546. if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
  547. spin_unlock(&dq_list_lock);
  548. goto restart;
  549. }
  550. /*
  551. * Quota now has no users and it has been written on last
  552. * dqput()
  553. */
  554. remove_dquot_hash(dquot);
  555. remove_free_dquot(dquot);
  556. remove_inuse(dquot);
  557. do_destroy_dquot(dquot);
  558. }
  559. spin_unlock(&dq_list_lock);
  560. }
  561. /* Call callback for every active dquot on given filesystem */
  562. int dquot_scan_active(struct super_block *sb,
  563. int (*fn)(struct dquot *dquot, unsigned long priv),
  564. unsigned long priv)
  565. {
  566. struct dquot *dquot, *old_dquot = NULL;
  567. int ret = 0;
  568. WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
  569. spin_lock(&dq_list_lock);
  570. list_for_each_entry(dquot, &inuse_list, dq_inuse) {
  571. if (!dquot_active(dquot))
  572. continue;
  573. if (dquot->dq_sb != sb)
  574. continue;
  575. /* Now we have active dquot so we can just increase use count */
  576. atomic_inc(&dquot->dq_count);
  577. spin_unlock(&dq_list_lock);
  578. dqput(old_dquot);
  579. old_dquot = dquot;
  580. /*
  581. * ->release_dquot() can be racing with us. Our reference
  582. * protects us from new calls to it so just wait for any
  583. * outstanding call and recheck the DQ_ACTIVE_B after that.
  584. */
  585. wait_on_dquot(dquot);
  586. if (dquot_active(dquot)) {
  587. ret = fn(dquot, priv);
  588. if (ret < 0)
  589. goto out;
  590. }
  591. spin_lock(&dq_list_lock);
  592. /* We are safe to continue now because our dquot could not
  593. * be moved out of the inuse list while we hold the reference */
  594. }
  595. spin_unlock(&dq_list_lock);
  596. out:
  597. dqput(old_dquot);
  598. return ret;
  599. }
  600. EXPORT_SYMBOL(dquot_scan_active);
  601. static inline int dquot_write_dquot(struct dquot *dquot)
  602. {
  603. int ret = dquot->dq_sb->dq_op->write_dquot(dquot);
  604. if (ret < 0) {
  605. quota_error(dquot->dq_sb, "Can't write quota structure "
  606. "(error %d). Quota may get out of sync!", ret);
  607. /* Clear dirty bit anyway to avoid infinite loop. */
  608. clear_dquot_dirty(dquot);
  609. }
  610. return ret;
  611. }
  612. /* Write all dquot structures to quota files */
  613. int dquot_writeback_dquots(struct super_block *sb, int type)
  614. {
  615. struct list_head dirty;
  616. struct dquot *dquot;
  617. struct quota_info *dqopt = sb_dqopt(sb);
  618. int cnt;
  619. int err, ret = 0;
  620. WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
  621. flush_delayed_work(&quota_release_work);
  622. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  623. if (type != -1 && cnt != type)
  624. continue;
  625. if (!sb_has_quota_active(sb, cnt))
  626. continue;
  627. spin_lock(&dq_list_lock);
  628. /* Move list away to avoid livelock. */
  629. list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
  630. while (!list_empty(&dirty)) {
  631. dquot = list_first_entry(&dirty, struct dquot,
  632. dq_dirty);
  633. WARN_ON(!dquot_active(dquot));
  634. /* If the dquot is releasing we should not touch it */
  635. if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
  636. spin_unlock(&dq_list_lock);
  637. flush_delayed_work(&quota_release_work);
  638. spin_lock(&dq_list_lock);
  639. continue;
  640. }
  641. /* Now we have active dquot from which someone is
  642. * holding reference so we can safely just increase
  643. * use count */
  644. dqgrab(dquot);
  645. spin_unlock(&dq_list_lock);
  646. err = dquot_write_dquot(dquot);
  647. if (err && !ret)
  648. ret = err;
  649. dqput(dquot);
  650. spin_lock(&dq_list_lock);
  651. }
  652. spin_unlock(&dq_list_lock);
  653. }
  654. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  655. if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
  656. && info_dirty(&dqopt->info[cnt]))
  657. sb->dq_op->write_info(sb, cnt);
  658. dqstats_inc(DQST_SYNCS);
  659. return ret;
  660. }
  661. EXPORT_SYMBOL(dquot_writeback_dquots);
  662. /* Write all dquot structures to disk and make them visible from userspace */
  663. int dquot_quota_sync(struct super_block *sb, int type)
  664. {
  665. struct quota_info *dqopt = sb_dqopt(sb);
  666. int cnt;
  667. int ret;
  668. ret = dquot_writeback_dquots(sb, type);
  669. if (ret)
  670. return ret;
  671. if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
  672. return 0;
  673. /* This is not very clever (and fast) but currently I don't know about
  674. * any other simple way of getting quota data to disk and we must get
  675. * them there for userspace to be visible... */
  676. if (sb->s_op->sync_fs) {
  677. ret = sb->s_op->sync_fs(sb, 1);
  678. if (ret)
  679. return ret;
  680. }
  681. ret = sync_blockdev(sb->s_bdev);
  682. if (ret)
  683. return ret;
  684. /*
  685. * Now when everything is written we can discard the pagecache so
  686. * that userspace sees the changes.
  687. */
  688. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  689. if (type != -1 && cnt != type)
  690. continue;
  691. if (!sb_has_quota_active(sb, cnt))
  692. continue;
  693. inode_lock(dqopt->files[cnt]);
  694. truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
  695. inode_unlock(dqopt->files[cnt]);
  696. }
  697. return 0;
  698. }
  699. EXPORT_SYMBOL(dquot_quota_sync);
  700. static unsigned long
  701. dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  702. {
  703. struct dquot *dquot;
  704. unsigned long freed = 0;
  705. spin_lock(&dq_list_lock);
  706. while (!list_empty(&free_dquots) && sc->nr_to_scan) {
  707. dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
  708. remove_dquot_hash(dquot);
  709. remove_free_dquot(dquot);
  710. remove_inuse(dquot);
  711. do_destroy_dquot(dquot);
  712. sc->nr_to_scan--;
  713. freed++;
  714. }
  715. spin_unlock(&dq_list_lock);
  716. return freed;
  717. }
  718. static unsigned long
  719. dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  720. {
  721. return vfs_pressure_ratio(
  722. percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
  723. }
  724. /*
  725. * Safely release dquot and put reference to dquot.
  726. */
  727. static void quota_release_workfn(struct work_struct *work)
  728. {
  729. struct dquot *dquot;
  730. struct list_head rls_head;
  731. spin_lock(&dq_list_lock);
  732. /* Exchange the list head to avoid livelock. */
  733. list_replace_init(&releasing_dquots, &rls_head);
  734. spin_unlock(&dq_list_lock);
  735. synchronize_srcu(&dquot_srcu);
  736. restart:
  737. spin_lock(&dq_list_lock);
  738. while (!list_empty(&rls_head)) {
  739. dquot = list_first_entry(&rls_head, struct dquot, dq_free);
  740. WARN_ON_ONCE(atomic_read(&dquot->dq_count));
  741. /*
  742. * Note that DQ_RELEASING_B protects us from racing with
  743. * invalidate_dquots() calls so we are safe to work with the
  744. * dquot even after we drop dq_list_lock.
  745. */
  746. if (dquot_dirty(dquot)) {
  747. spin_unlock(&dq_list_lock);
  748. /* Commit dquot before releasing */
  749. dquot_write_dquot(dquot);
  750. goto restart;
  751. }
  752. if (dquot_active(dquot)) {
  753. spin_unlock(&dq_list_lock);
  754. dquot->dq_sb->dq_op->release_dquot(dquot);
  755. goto restart;
  756. }
  757. /* Dquot is inactive and clean, now move it to free list */
  758. remove_free_dquot(dquot);
  759. put_dquot_last(dquot);
  760. }
  761. spin_unlock(&dq_list_lock);
  762. }
  763. /*
  764. * Put reference to dquot
  765. */
  766. void dqput(struct dquot *dquot)
  767. {
  768. if (!dquot)
  769. return;
  770. #ifdef CONFIG_QUOTA_DEBUG
  771. if (!atomic_read(&dquot->dq_count)) {
  772. quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
  773. quotatypes[dquot->dq_id.type],
  774. from_kqid(&init_user_ns, dquot->dq_id));
  775. BUG();
  776. }
  777. #endif
  778. dqstats_inc(DQST_DROPS);
  779. spin_lock(&dq_list_lock);
  780. if (atomic_read(&dquot->dq_count) > 1) {
  781. /* We have more than one user... nothing to do */
  782. atomic_dec(&dquot->dq_count);
  783. /* Releasing dquot during quotaoff phase? */
  784. if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
  785. atomic_read(&dquot->dq_count) == 1)
  786. wake_up(&dquot_ref_wq);
  787. spin_unlock(&dq_list_lock);
  788. return;
  789. }
  790. /* Need to release dquot? */
  791. WARN_ON_ONCE(!list_empty(&dquot->dq_free));
  792. put_releasing_dquots(dquot);
  793. atomic_dec(&dquot->dq_count);
  794. spin_unlock(&dq_list_lock);
  795. queue_delayed_work(quota_unbound_wq, &quota_release_work, 1);
  796. }
  797. EXPORT_SYMBOL(dqput);
  798. struct dquot *dquot_alloc(struct super_block *sb, int type)
  799. {
  800. return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
  801. }
  802. EXPORT_SYMBOL(dquot_alloc);
  803. static struct dquot *get_empty_dquot(struct super_block *sb, int type)
  804. {
  805. struct dquot *dquot;
  806. dquot = sb->dq_op->alloc_dquot(sb, type);
  807. if(!dquot)
  808. return NULL;
  809. mutex_init(&dquot->dq_lock);
  810. INIT_LIST_HEAD(&dquot->dq_free);
  811. INIT_LIST_HEAD(&dquot->dq_inuse);
  812. INIT_HLIST_NODE(&dquot->dq_hash);
  813. INIT_LIST_HEAD(&dquot->dq_dirty);
  814. dquot->dq_sb = sb;
  815. dquot->dq_id = make_kqid_invalid(type);
  816. atomic_set(&dquot->dq_count, 1);
  817. spin_lock_init(&dquot->dq_dqb_lock);
  818. return dquot;
  819. }
  820. /*
  821. * Get reference to dquot
  822. *
  823. * Locking is slightly tricky here. We are guarded from parallel quotaoff()
  824. * destroying our dquot by:
  825. * a) checking for quota flags under dq_list_lock and
  826. * b) getting a reference to dquot before we release dq_list_lock
  827. */
  828. struct dquot *dqget(struct super_block *sb, struct kqid qid)
  829. {
  830. unsigned int hashent = hashfn(sb, qid);
  831. struct dquot *dquot, *empty = NULL;
  832. if (!qid_has_mapping(sb->s_user_ns, qid))
  833. return ERR_PTR(-EINVAL);
  834. if (!sb_has_quota_active(sb, qid.type))
  835. return ERR_PTR(-ESRCH);
  836. we_slept:
  837. spin_lock(&dq_list_lock);
  838. spin_lock(&dq_state_lock);
  839. if (!sb_has_quota_active(sb, qid.type)) {
  840. spin_unlock(&dq_state_lock);
  841. spin_unlock(&dq_list_lock);
  842. dquot = ERR_PTR(-ESRCH);
  843. goto out;
  844. }
  845. spin_unlock(&dq_state_lock);
  846. dquot = find_dquot(hashent, sb, qid);
  847. if (!dquot) {
  848. if (!empty) {
  849. spin_unlock(&dq_list_lock);
  850. empty = get_empty_dquot(sb, qid.type);
  851. if (!empty)
  852. schedule(); /* Try to wait for a moment... */
  853. goto we_slept;
  854. }
  855. dquot = empty;
  856. empty = NULL;
  857. dquot->dq_id = qid;
  858. /* all dquots go on the inuse_list */
  859. put_inuse(dquot);
  860. /* hash it first so it can be found */
  861. insert_dquot_hash(dquot);
  862. spin_unlock(&dq_list_lock);
  863. dqstats_inc(DQST_LOOKUPS);
  864. } else {
  865. if (!atomic_read(&dquot->dq_count))
  866. remove_free_dquot(dquot);
  867. atomic_inc(&dquot->dq_count);
  868. spin_unlock(&dq_list_lock);
  869. dqstats_inc(DQST_CACHE_HITS);
  870. dqstats_inc(DQST_LOOKUPS);
  871. }
  872. /* Wait for dq_lock - after this we know that either dquot_release() is
  873. * already finished or it will be canceled due to dq_count > 0 test */
  874. wait_on_dquot(dquot);
  875. /* Read the dquot / allocate space in quota file */
  876. if (!dquot_active(dquot)) {
  877. int err;
  878. err = sb->dq_op->acquire_dquot(dquot);
  879. if (err < 0) {
  880. dqput(dquot);
  881. dquot = ERR_PTR(err);
  882. goto out;
  883. }
  884. }
  885. /*
  886. * Make sure following reads see filled structure - paired with
  887. * smp_mb__before_atomic() in dquot_acquire().
  888. */
  889. smp_rmb();
  890. /* Has somebody invalidated entry under us? */
  891. WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
  892. out:
  893. if (empty)
  894. do_destroy_dquot(empty);
  895. return dquot;
  896. }
  897. EXPORT_SYMBOL(dqget);
  898. static inline struct dquot __rcu **i_dquot(struct inode *inode)
  899. {
  900. return inode->i_sb->s_op->get_dquots(inode);
  901. }
  902. static int dqinit_needed(struct inode *inode, int type)
  903. {
  904. struct dquot __rcu * const *dquots;
  905. int cnt;
  906. if (IS_NOQUOTA(inode))
  907. return 0;
  908. dquots = i_dquot(inode);
  909. if (type != -1)
  910. return !dquots[type];
  911. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  912. if (!dquots[cnt])
  913. return 1;
  914. return 0;
  915. }
  916. /* This routine is guarded by s_umount semaphore */
  917. static int add_dquot_ref(struct super_block *sb, int type)
  918. {
  919. struct inode *inode, *old_inode = NULL;
  920. #ifdef CONFIG_QUOTA_DEBUG
  921. int reserved = 0;
  922. #endif
  923. int err = 0;
  924. spin_lock(&sb->s_inode_list_lock);
  925. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  926. spin_lock(&inode->i_lock);
  927. if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
  928. !atomic_read(&inode->i_writecount) ||
  929. !dqinit_needed(inode, type)) {
  930. spin_unlock(&inode->i_lock);
  931. continue;
  932. }
  933. __iget(inode);
  934. spin_unlock(&inode->i_lock);
  935. spin_unlock(&sb->s_inode_list_lock);
  936. #ifdef CONFIG_QUOTA_DEBUG
  937. if (unlikely(inode_get_rsv_space(inode) > 0))
  938. reserved = 1;
  939. #endif
  940. iput(old_inode);
  941. err = __dquot_initialize(inode, type);
  942. if (err) {
  943. iput(inode);
  944. goto out;
  945. }
  946. /*
  947. * We hold a reference to 'inode' so it couldn't have been
  948. * removed from s_inodes list while we dropped the
  949. * s_inode_list_lock. We cannot iput the inode now as we can be
  950. * holding the last reference and we cannot iput it under
  951. * s_inode_list_lock. So we keep the reference and iput it
  952. * later.
  953. */
  954. old_inode = inode;
  955. cond_resched();
  956. spin_lock(&sb->s_inode_list_lock);
  957. }
  958. spin_unlock(&sb->s_inode_list_lock);
  959. iput(old_inode);
  960. out:
  961. #ifdef CONFIG_QUOTA_DEBUG
  962. if (reserved) {
  963. quota_error(sb, "Writes happened before quota was turned on "
  964. "thus quota information is probably inconsistent. "
  965. "Please run quotacheck(8)");
  966. }
  967. #endif
  968. return err;
  969. }
  970. static void remove_dquot_ref(struct super_block *sb, int type)
  971. {
  972. struct inode *inode;
  973. #ifdef CONFIG_QUOTA_DEBUG
  974. int reserved = 0;
  975. #endif
  976. spin_lock(&sb->s_inode_list_lock);
  977. list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
  978. /*
  979. * We have to scan also I_NEW inodes because they can already
  980. * have quota pointer initialized. Luckily, we need to touch
  981. * only quota pointers and these have separate locking
  982. * (dq_data_lock).
  983. */
  984. spin_lock(&dq_data_lock);
  985. if (!IS_NOQUOTA(inode)) {
  986. struct dquot __rcu **dquots = i_dquot(inode);
  987. struct dquot *dquot = srcu_dereference_check(
  988. dquots[type], &dquot_srcu,
  989. lockdep_is_held(&dq_data_lock));
  990. #ifdef CONFIG_QUOTA_DEBUG
  991. if (unlikely(inode_get_rsv_space(inode) > 0))
  992. reserved = 1;
  993. #endif
  994. rcu_assign_pointer(dquots[type], NULL);
  995. if (dquot)
  996. dqput(dquot);
  997. }
  998. spin_unlock(&dq_data_lock);
  999. }
  1000. spin_unlock(&sb->s_inode_list_lock);
  1001. #ifdef CONFIG_QUOTA_DEBUG
  1002. if (reserved) {
  1003. printk(KERN_WARNING "VFS (%s): Writes happened after quota"
  1004. " was disabled thus quota information is probably "
  1005. "inconsistent. Please run quotacheck(8).\n", sb->s_id);
  1006. }
  1007. #endif
  1008. }
  1009. /* Gather all references from inodes and drop them */
  1010. static void drop_dquot_ref(struct super_block *sb, int type)
  1011. {
  1012. if (sb->dq_op)
  1013. remove_dquot_ref(sb, type);
  1014. }
  1015. static inline
  1016. void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
  1017. {
  1018. if (dquot->dq_dqb.dqb_rsvspace >= number)
  1019. dquot->dq_dqb.dqb_rsvspace -= number;
  1020. else {
  1021. WARN_ON_ONCE(1);
  1022. dquot->dq_dqb.dqb_rsvspace = 0;
  1023. }
  1024. if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
  1025. dquot->dq_dqb.dqb_bsoftlimit)
  1026. dquot->dq_dqb.dqb_btime = (time64_t) 0;
  1027. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  1028. }
  1029. static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
  1030. {
  1031. if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
  1032. dquot->dq_dqb.dqb_curinodes >= number)
  1033. dquot->dq_dqb.dqb_curinodes -= number;
  1034. else
  1035. dquot->dq_dqb.dqb_curinodes = 0;
  1036. if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
  1037. dquot->dq_dqb.dqb_itime = (time64_t) 0;
  1038. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  1039. }
  1040. static void dquot_decr_space(struct dquot *dquot, qsize_t number)
  1041. {
  1042. if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
  1043. dquot->dq_dqb.dqb_curspace >= number)
  1044. dquot->dq_dqb.dqb_curspace -= number;
  1045. else
  1046. dquot->dq_dqb.dqb_curspace = 0;
  1047. if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
  1048. dquot->dq_dqb.dqb_bsoftlimit)
  1049. dquot->dq_dqb.dqb_btime = (time64_t) 0;
  1050. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  1051. }
  1052. struct dquot_warn {
  1053. struct super_block *w_sb;
  1054. struct kqid w_dq_id;
  1055. short w_type;
  1056. };
  1057. static int warning_issued(struct dquot *dquot, const int warntype)
  1058. {
  1059. int flag = (warntype == QUOTA_NL_BHARDWARN ||
  1060. warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
  1061. ((warntype == QUOTA_NL_IHARDWARN ||
  1062. warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
  1063. if (!flag)
  1064. return 0;
  1065. return test_and_set_bit(flag, &dquot->dq_flags);
  1066. }
  1067. #ifdef CONFIG_PRINT_QUOTA_WARNING
  1068. static int flag_print_warnings = 1;
  1069. static int need_print_warning(struct dquot_warn *warn)
  1070. {
  1071. if (!flag_print_warnings)
  1072. return 0;
  1073. switch (warn->w_dq_id.type) {
  1074. case USRQUOTA:
  1075. return uid_eq(current_fsuid(), warn->w_dq_id.uid);
  1076. case GRPQUOTA:
  1077. return in_group_p(warn->w_dq_id.gid);
  1078. case PRJQUOTA:
  1079. return 1;
  1080. }
  1081. return 0;
  1082. }
  1083. /* Print warning to user which exceeded quota */
  1084. static void print_warning(struct dquot_warn *warn)
  1085. {
  1086. char *msg = NULL;
  1087. struct tty_struct *tty;
  1088. int warntype = warn->w_type;
  1089. if (warntype == QUOTA_NL_IHARDBELOW ||
  1090. warntype == QUOTA_NL_ISOFTBELOW ||
  1091. warntype == QUOTA_NL_BHARDBELOW ||
  1092. warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
  1093. return;
  1094. tty = get_current_tty();
  1095. if (!tty)
  1096. return;
  1097. tty_write_message(tty, warn->w_sb->s_id);
  1098. if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
  1099. tty_write_message(tty, ": warning, ");
  1100. else
  1101. tty_write_message(tty, ": write failed, ");
  1102. tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
  1103. switch (warntype) {
  1104. case QUOTA_NL_IHARDWARN:
  1105. msg = " file limit reached.\r\n";
  1106. break;
  1107. case QUOTA_NL_ISOFTLONGWARN:
  1108. msg = " file quota exceeded too long.\r\n";
  1109. break;
  1110. case QUOTA_NL_ISOFTWARN:
  1111. msg = " file quota exceeded.\r\n";
  1112. break;
  1113. case QUOTA_NL_BHARDWARN:
  1114. msg = " block limit reached.\r\n";
  1115. break;
  1116. case QUOTA_NL_BSOFTLONGWARN:
  1117. msg = " block quota exceeded too long.\r\n";
  1118. break;
  1119. case QUOTA_NL_BSOFTWARN:
  1120. msg = " block quota exceeded.\r\n";
  1121. break;
  1122. }
  1123. tty_write_message(tty, msg);
  1124. tty_kref_put(tty);
  1125. }
  1126. #endif
  1127. static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
  1128. int warntype)
  1129. {
  1130. if (warning_issued(dquot, warntype))
  1131. return;
  1132. warn->w_type = warntype;
  1133. warn->w_sb = dquot->dq_sb;
  1134. warn->w_dq_id = dquot->dq_id;
  1135. }
  1136. /*
  1137. * Write warnings to the console and send warning messages over netlink.
  1138. *
  1139. * Note that this function can call into tty and networking code.
  1140. */
  1141. static void flush_warnings(struct dquot_warn *warn)
  1142. {
  1143. int i;
  1144. for (i = 0; i < MAXQUOTAS; i++) {
  1145. if (warn[i].w_type == QUOTA_NL_NOWARN)
  1146. continue;
  1147. #ifdef CONFIG_PRINT_QUOTA_WARNING
  1148. print_warning(&warn[i]);
  1149. #endif
  1150. quota_send_warning(warn[i].w_dq_id,
  1151. warn[i].w_sb->s_dev, warn[i].w_type);
  1152. }
  1153. }
  1154. static int ignore_hardlimit(struct dquot *dquot)
  1155. {
  1156. struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
  1157. return capable(CAP_SYS_RESOURCE) &&
  1158. (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
  1159. !(info->dqi_flags & DQF_ROOT_SQUASH));
  1160. }
  1161. static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
  1162. struct dquot_warn *warn)
  1163. {
  1164. qsize_t newinodes;
  1165. int ret = 0;
  1166. spin_lock(&dquot->dq_dqb_lock);
  1167. newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
  1168. if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
  1169. test_bit(DQ_FAKE_B, &dquot->dq_flags))
  1170. goto add;
  1171. if (dquot->dq_dqb.dqb_ihardlimit &&
  1172. newinodes > dquot->dq_dqb.dqb_ihardlimit &&
  1173. !ignore_hardlimit(dquot)) {
  1174. prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
  1175. ret = -EDQUOT;
  1176. goto out;
  1177. }
  1178. if (dquot->dq_dqb.dqb_isoftlimit &&
  1179. newinodes > dquot->dq_dqb.dqb_isoftlimit &&
  1180. dquot->dq_dqb.dqb_itime &&
  1181. ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
  1182. !ignore_hardlimit(dquot)) {
  1183. prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
  1184. ret = -EDQUOT;
  1185. goto out;
  1186. }
  1187. if (dquot->dq_dqb.dqb_isoftlimit &&
  1188. newinodes > dquot->dq_dqb.dqb_isoftlimit &&
  1189. dquot->dq_dqb.dqb_itime == 0) {
  1190. prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
  1191. dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
  1192. sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
  1193. }
  1194. add:
  1195. dquot->dq_dqb.dqb_curinodes = newinodes;
  1196. out:
  1197. spin_unlock(&dquot->dq_dqb_lock);
  1198. return ret;
  1199. }
  1200. static int dquot_add_space(struct dquot *dquot, qsize_t space,
  1201. qsize_t rsv_space, unsigned int flags,
  1202. struct dquot_warn *warn)
  1203. {
  1204. qsize_t tspace;
  1205. struct super_block *sb = dquot->dq_sb;
  1206. int ret = 0;
  1207. spin_lock(&dquot->dq_dqb_lock);
  1208. if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
  1209. test_bit(DQ_FAKE_B, &dquot->dq_flags))
  1210. goto finish;
  1211. tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
  1212. + space + rsv_space;
  1213. if (dquot->dq_dqb.dqb_bhardlimit &&
  1214. tspace > dquot->dq_dqb.dqb_bhardlimit &&
  1215. !ignore_hardlimit(dquot)) {
  1216. if (flags & DQUOT_SPACE_WARN)
  1217. prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
  1218. ret = -EDQUOT;
  1219. goto finish;
  1220. }
  1221. if (dquot->dq_dqb.dqb_bsoftlimit &&
  1222. tspace > dquot->dq_dqb.dqb_bsoftlimit &&
  1223. dquot->dq_dqb.dqb_btime &&
  1224. ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
  1225. !ignore_hardlimit(dquot)) {
  1226. if (flags & DQUOT_SPACE_WARN)
  1227. prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
  1228. ret = -EDQUOT;
  1229. goto finish;
  1230. }
  1231. if (dquot->dq_dqb.dqb_bsoftlimit &&
  1232. tspace > dquot->dq_dqb.dqb_bsoftlimit &&
  1233. dquot->dq_dqb.dqb_btime == 0) {
  1234. if (flags & DQUOT_SPACE_WARN) {
  1235. prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
  1236. dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
  1237. sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
  1238. } else {
  1239. /*
  1240. * We don't allow preallocation to exceed softlimit so exceeding will
  1241. * be always printed
  1242. */
  1243. ret = -EDQUOT;
  1244. goto finish;
  1245. }
  1246. }
  1247. finish:
  1248. /*
  1249. * We have to be careful and go through warning generation & grace time
  1250. * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
  1251. * only here...
  1252. */
  1253. if (flags & DQUOT_SPACE_NOFAIL)
  1254. ret = 0;
  1255. if (!ret) {
  1256. dquot->dq_dqb.dqb_rsvspace += rsv_space;
  1257. dquot->dq_dqb.dqb_curspace += space;
  1258. }
  1259. spin_unlock(&dquot->dq_dqb_lock);
  1260. return ret;
  1261. }
  1262. static int info_idq_free(struct dquot *dquot, qsize_t inodes)
  1263. {
  1264. qsize_t newinodes;
  1265. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
  1266. dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
  1267. !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
  1268. return QUOTA_NL_NOWARN;
  1269. newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
  1270. if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
  1271. return QUOTA_NL_ISOFTBELOW;
  1272. if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
  1273. newinodes < dquot->dq_dqb.dqb_ihardlimit)
  1274. return QUOTA_NL_IHARDBELOW;
  1275. return QUOTA_NL_NOWARN;
  1276. }
  1277. static int info_bdq_free(struct dquot *dquot, qsize_t space)
  1278. {
  1279. qsize_t tspace;
  1280. tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
  1281. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
  1282. tspace <= dquot->dq_dqb.dqb_bsoftlimit)
  1283. return QUOTA_NL_NOWARN;
  1284. if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
  1285. return QUOTA_NL_BSOFTBELOW;
  1286. if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
  1287. tspace - space < dquot->dq_dqb.dqb_bhardlimit)
  1288. return QUOTA_NL_BHARDBELOW;
  1289. return QUOTA_NL_NOWARN;
  1290. }
  1291. static int inode_quota_active(const struct inode *inode)
  1292. {
  1293. struct super_block *sb = inode->i_sb;
  1294. if (IS_NOQUOTA(inode))
  1295. return 0;
  1296. return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
  1297. }
  1298. /*
  1299. * Initialize quota pointers in inode
  1300. *
  1301. * It is better to call this function outside of any transaction as it
  1302. * might need a lot of space in journal for dquot structure allocation.
  1303. */
  1304. static int __dquot_initialize(struct inode *inode, int type)
  1305. {
  1306. int cnt, init_needed = 0;
  1307. struct dquot __rcu **dquots;
  1308. struct dquot *got[MAXQUOTAS] = {};
  1309. struct super_block *sb = inode->i_sb;
  1310. qsize_t rsv;
  1311. int ret = 0;
  1312. if (!inode_quota_active(inode))
  1313. return 0;
  1314. dquots = i_dquot(inode);
  1315. /* First get references to structures we might need. */
  1316. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1317. struct kqid qid;
  1318. kprojid_t projid;
  1319. int rc;
  1320. struct dquot *dquot;
  1321. if (type != -1 && cnt != type)
  1322. continue;
  1323. /*
  1324. * The i_dquot should have been initialized in most cases,
  1325. * we check it without locking here to avoid unnecessary
  1326. * dqget()/dqput() calls.
  1327. */
  1328. if (dquots[cnt])
  1329. continue;
  1330. if (!sb_has_quota_active(sb, cnt))
  1331. continue;
  1332. init_needed = 1;
  1333. switch (cnt) {
  1334. case USRQUOTA:
  1335. qid = make_kqid_uid(inode->i_uid);
  1336. break;
  1337. case GRPQUOTA:
  1338. qid = make_kqid_gid(inode->i_gid);
  1339. break;
  1340. case PRJQUOTA:
  1341. rc = inode->i_sb->dq_op->get_projid(inode, &projid);
  1342. if (rc)
  1343. continue;
  1344. qid = make_kqid_projid(projid);
  1345. break;
  1346. }
  1347. dquot = dqget(sb, qid);
  1348. if (IS_ERR(dquot)) {
  1349. /* We raced with somebody turning quotas off... */
  1350. if (PTR_ERR(dquot) != -ESRCH) {
  1351. ret = PTR_ERR(dquot);
  1352. goto out_put;
  1353. }
  1354. dquot = NULL;
  1355. }
  1356. got[cnt] = dquot;
  1357. }
  1358. /* All required i_dquot has been initialized */
  1359. if (!init_needed)
  1360. return 0;
  1361. spin_lock(&dq_data_lock);
  1362. if (IS_NOQUOTA(inode))
  1363. goto out_lock;
  1364. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1365. if (type != -1 && cnt != type)
  1366. continue;
  1367. /* Avoid races with quotaoff() */
  1368. if (!sb_has_quota_active(sb, cnt))
  1369. continue;
  1370. /* We could race with quotaon or dqget() could have failed */
  1371. if (!got[cnt])
  1372. continue;
  1373. if (!dquots[cnt]) {
  1374. rcu_assign_pointer(dquots[cnt], got[cnt]);
  1375. got[cnt] = NULL;
  1376. /*
  1377. * Make quota reservation system happy if someone
  1378. * did a write before quota was turned on
  1379. */
  1380. rsv = inode_get_rsv_space(inode);
  1381. if (unlikely(rsv)) {
  1382. struct dquot *dquot = srcu_dereference_check(
  1383. dquots[cnt], &dquot_srcu,
  1384. lockdep_is_held(&dq_data_lock));
  1385. spin_lock(&inode->i_lock);
  1386. /* Get reservation again under proper lock */
  1387. rsv = __inode_get_rsv_space(inode);
  1388. spin_lock(&dquot->dq_dqb_lock);
  1389. dquot->dq_dqb.dqb_rsvspace += rsv;
  1390. spin_unlock(&dquot->dq_dqb_lock);
  1391. spin_unlock(&inode->i_lock);
  1392. }
  1393. }
  1394. }
  1395. out_lock:
  1396. spin_unlock(&dq_data_lock);
  1397. out_put:
  1398. /* Drop unused references */
  1399. dqput_all(got);
  1400. return ret;
  1401. }
  1402. int dquot_initialize(struct inode *inode)
  1403. {
  1404. return __dquot_initialize(inode, -1);
  1405. }
  1406. EXPORT_SYMBOL(dquot_initialize);
  1407. bool dquot_initialize_needed(struct inode *inode)
  1408. {
  1409. struct dquot __rcu **dquots;
  1410. int i;
  1411. if (!inode_quota_active(inode))
  1412. return false;
  1413. dquots = i_dquot(inode);
  1414. for (i = 0; i < MAXQUOTAS; i++)
  1415. if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
  1416. return true;
  1417. return false;
  1418. }
  1419. EXPORT_SYMBOL(dquot_initialize_needed);
  1420. /*
  1421. * Release all quotas referenced by inode.
  1422. *
  1423. * This function only be called on inode free or converting
  1424. * a file to quota file, no other users for the i_dquot in
  1425. * both cases, so we needn't call synchronize_srcu() after
  1426. * clearing i_dquot.
  1427. */
  1428. static void __dquot_drop(struct inode *inode)
  1429. {
  1430. int cnt;
  1431. struct dquot __rcu **dquots = i_dquot(inode);
  1432. struct dquot *put[MAXQUOTAS];
  1433. spin_lock(&dq_data_lock);
  1434. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1435. put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
  1436. lockdep_is_held(&dq_data_lock));
  1437. rcu_assign_pointer(dquots[cnt], NULL);
  1438. }
  1439. spin_unlock(&dq_data_lock);
  1440. dqput_all(put);
  1441. }
  1442. void dquot_drop(struct inode *inode)
  1443. {
  1444. struct dquot __rcu * const *dquots;
  1445. int cnt;
  1446. if (IS_NOQUOTA(inode))
  1447. return;
  1448. /*
  1449. * Test before calling to rule out calls from proc and such
  1450. * where we are not allowed to block. Note that this is
  1451. * actually reliable test even without the lock - the caller
  1452. * must assure that nobody can come after the DQUOT_DROP and
  1453. * add quota pointers back anyway.
  1454. */
  1455. dquots = i_dquot(inode);
  1456. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1457. if (dquots[cnt])
  1458. break;
  1459. }
  1460. if (cnt < MAXQUOTAS)
  1461. __dquot_drop(inode);
  1462. }
  1463. EXPORT_SYMBOL(dquot_drop);
  1464. /*
  1465. * inode_reserved_space is managed internally by quota, and protected by
  1466. * i_lock similar to i_blocks+i_bytes.
  1467. */
  1468. static qsize_t *inode_reserved_space(struct inode * inode)
  1469. {
  1470. /* Filesystem must explicitly define it's own method in order to use
  1471. * quota reservation interface */
  1472. BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
  1473. return inode->i_sb->dq_op->get_reserved_space(inode);
  1474. }
  1475. static qsize_t __inode_get_rsv_space(struct inode *inode)
  1476. {
  1477. if (!inode->i_sb->dq_op->get_reserved_space)
  1478. return 0;
  1479. return *inode_reserved_space(inode);
  1480. }
  1481. static qsize_t inode_get_rsv_space(struct inode *inode)
  1482. {
  1483. qsize_t ret;
  1484. if (!inode->i_sb->dq_op->get_reserved_space)
  1485. return 0;
  1486. spin_lock(&inode->i_lock);
  1487. ret = __inode_get_rsv_space(inode);
  1488. spin_unlock(&inode->i_lock);
  1489. return ret;
  1490. }
  1491. /*
  1492. * This functions updates i_blocks+i_bytes fields and quota information
  1493. * (together with appropriate checks).
  1494. *
  1495. * NOTE: We absolutely rely on the fact that caller dirties the inode
  1496. * (usually helpers in quotaops.h care about this) and holds a handle for
  1497. * the current transaction so that dquot write and inode write go into the
  1498. * same transaction.
  1499. */
  1500. /*
  1501. * This operation can block, but only after everything is updated
  1502. */
  1503. int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
  1504. {
  1505. int cnt, ret = 0, index;
  1506. struct dquot_warn warn[MAXQUOTAS];
  1507. int reserve = flags & DQUOT_SPACE_RESERVE;
  1508. struct dquot __rcu **dquots;
  1509. struct dquot *dquot;
  1510. if (!inode_quota_active(inode)) {
  1511. if (reserve) {
  1512. spin_lock(&inode->i_lock);
  1513. *inode_reserved_space(inode) += number;
  1514. spin_unlock(&inode->i_lock);
  1515. } else {
  1516. inode_add_bytes(inode, number);
  1517. }
  1518. goto out;
  1519. }
  1520. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1521. warn[cnt].w_type = QUOTA_NL_NOWARN;
  1522. dquots = i_dquot(inode);
  1523. index = srcu_read_lock(&dquot_srcu);
  1524. spin_lock(&inode->i_lock);
  1525. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1526. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1527. if (!dquot)
  1528. continue;
  1529. if (reserve) {
  1530. ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
  1531. } else {
  1532. ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
  1533. }
  1534. if (ret) {
  1535. /* Back out changes we already did */
  1536. for (cnt--; cnt >= 0; cnt--) {
  1537. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1538. if (!dquot)
  1539. continue;
  1540. spin_lock(&dquot->dq_dqb_lock);
  1541. if (reserve)
  1542. dquot_free_reserved_space(dquot, number);
  1543. else
  1544. dquot_decr_space(dquot, number);
  1545. spin_unlock(&dquot->dq_dqb_lock);
  1546. }
  1547. spin_unlock(&inode->i_lock);
  1548. goto out_flush_warn;
  1549. }
  1550. }
  1551. if (reserve)
  1552. *inode_reserved_space(inode) += number;
  1553. else
  1554. __inode_add_bytes(inode, number);
  1555. spin_unlock(&inode->i_lock);
  1556. if (reserve)
  1557. goto out_flush_warn;
  1558. ret = mark_all_dquot_dirty(dquots);
  1559. out_flush_warn:
  1560. srcu_read_unlock(&dquot_srcu, index);
  1561. flush_warnings(warn);
  1562. out:
  1563. return ret;
  1564. }
  1565. EXPORT_SYMBOL(__dquot_alloc_space);
  1566. /*
  1567. * This operation can block, but only after everything is updated
  1568. */
  1569. int dquot_alloc_inode(struct inode *inode)
  1570. {
  1571. int cnt, ret = 0, index;
  1572. struct dquot_warn warn[MAXQUOTAS];
  1573. struct dquot __rcu * const *dquots;
  1574. struct dquot *dquot;
  1575. if (!inode_quota_active(inode))
  1576. return 0;
  1577. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1578. warn[cnt].w_type = QUOTA_NL_NOWARN;
  1579. dquots = i_dquot(inode);
  1580. index = srcu_read_lock(&dquot_srcu);
  1581. spin_lock(&inode->i_lock);
  1582. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1583. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1584. if (!dquot)
  1585. continue;
  1586. ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
  1587. if (ret) {
  1588. for (cnt--; cnt >= 0; cnt--) {
  1589. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1590. if (!dquot)
  1591. continue;
  1592. /* Back out changes we already did */
  1593. spin_lock(&dquot->dq_dqb_lock);
  1594. dquot_decr_inodes(dquot, 1);
  1595. spin_unlock(&dquot->dq_dqb_lock);
  1596. }
  1597. goto warn_put_all;
  1598. }
  1599. }
  1600. warn_put_all:
  1601. spin_unlock(&inode->i_lock);
  1602. if (ret == 0)
  1603. ret = mark_all_dquot_dirty(dquots);
  1604. srcu_read_unlock(&dquot_srcu, index);
  1605. flush_warnings(warn);
  1606. return ret;
  1607. }
  1608. EXPORT_SYMBOL(dquot_alloc_inode);
  1609. /*
  1610. * Convert in-memory reserved quotas to real consumed quotas
  1611. */
  1612. void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
  1613. {
  1614. struct dquot __rcu **dquots;
  1615. struct dquot *dquot;
  1616. int cnt, index;
  1617. if (!inode_quota_active(inode)) {
  1618. spin_lock(&inode->i_lock);
  1619. *inode_reserved_space(inode) -= number;
  1620. __inode_add_bytes(inode, number);
  1621. spin_unlock(&inode->i_lock);
  1622. return;
  1623. }
  1624. dquots = i_dquot(inode);
  1625. index = srcu_read_lock(&dquot_srcu);
  1626. spin_lock(&inode->i_lock);
  1627. /* Claim reserved quotas to allocated quotas */
  1628. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1629. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1630. if (dquot) {
  1631. spin_lock(&dquot->dq_dqb_lock);
  1632. if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
  1633. number = dquot->dq_dqb.dqb_rsvspace;
  1634. dquot->dq_dqb.dqb_curspace += number;
  1635. dquot->dq_dqb.dqb_rsvspace -= number;
  1636. spin_unlock(&dquot->dq_dqb_lock);
  1637. }
  1638. }
  1639. /* Update inode bytes */
  1640. *inode_reserved_space(inode) -= number;
  1641. __inode_add_bytes(inode, number);
  1642. spin_unlock(&inode->i_lock);
  1643. mark_all_dquot_dirty(dquots);
  1644. srcu_read_unlock(&dquot_srcu, index);
  1645. }
  1646. EXPORT_SYMBOL(dquot_claim_space_nodirty);
  1647. /*
  1648. * Convert allocated space back to in-memory reserved quotas
  1649. */
  1650. void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
  1651. {
  1652. struct dquot __rcu **dquots;
  1653. struct dquot *dquot;
  1654. int cnt, index;
  1655. if (!inode_quota_active(inode)) {
  1656. spin_lock(&inode->i_lock);
  1657. *inode_reserved_space(inode) += number;
  1658. __inode_sub_bytes(inode, number);
  1659. spin_unlock(&inode->i_lock);
  1660. return;
  1661. }
  1662. dquots = i_dquot(inode);
  1663. index = srcu_read_lock(&dquot_srcu);
  1664. spin_lock(&inode->i_lock);
  1665. /* Claim reserved quotas to allocated quotas */
  1666. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1667. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1668. if (dquot) {
  1669. spin_lock(&dquot->dq_dqb_lock);
  1670. if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
  1671. number = dquot->dq_dqb.dqb_curspace;
  1672. dquot->dq_dqb.dqb_rsvspace += number;
  1673. dquot->dq_dqb.dqb_curspace -= number;
  1674. spin_unlock(&dquot->dq_dqb_lock);
  1675. }
  1676. }
  1677. /* Update inode bytes */
  1678. *inode_reserved_space(inode) += number;
  1679. __inode_sub_bytes(inode, number);
  1680. spin_unlock(&inode->i_lock);
  1681. mark_all_dquot_dirty(dquots);
  1682. srcu_read_unlock(&dquot_srcu, index);
  1683. }
  1684. EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
  1685. /*
  1686. * This operation can block, but only after everything is updated
  1687. */
  1688. void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
  1689. {
  1690. unsigned int cnt;
  1691. struct dquot_warn warn[MAXQUOTAS];
  1692. struct dquot __rcu **dquots;
  1693. struct dquot *dquot;
  1694. int reserve = flags & DQUOT_SPACE_RESERVE, index;
  1695. if (!inode_quota_active(inode)) {
  1696. if (reserve) {
  1697. spin_lock(&inode->i_lock);
  1698. *inode_reserved_space(inode) -= number;
  1699. spin_unlock(&inode->i_lock);
  1700. } else {
  1701. inode_sub_bytes(inode, number);
  1702. }
  1703. return;
  1704. }
  1705. dquots = i_dquot(inode);
  1706. index = srcu_read_lock(&dquot_srcu);
  1707. spin_lock(&inode->i_lock);
  1708. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1709. int wtype;
  1710. warn[cnt].w_type = QUOTA_NL_NOWARN;
  1711. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1712. if (!dquot)
  1713. continue;
  1714. spin_lock(&dquot->dq_dqb_lock);
  1715. wtype = info_bdq_free(dquot, number);
  1716. if (wtype != QUOTA_NL_NOWARN)
  1717. prepare_warning(&warn[cnt], dquot, wtype);
  1718. if (reserve)
  1719. dquot_free_reserved_space(dquot, number);
  1720. else
  1721. dquot_decr_space(dquot, number);
  1722. spin_unlock(&dquot->dq_dqb_lock);
  1723. }
  1724. if (reserve)
  1725. *inode_reserved_space(inode) -= number;
  1726. else
  1727. __inode_sub_bytes(inode, number);
  1728. spin_unlock(&inode->i_lock);
  1729. if (reserve)
  1730. goto out_unlock;
  1731. mark_all_dquot_dirty(dquots);
  1732. out_unlock:
  1733. srcu_read_unlock(&dquot_srcu, index);
  1734. flush_warnings(warn);
  1735. }
  1736. EXPORT_SYMBOL(__dquot_free_space);
  1737. /*
  1738. * This operation can block, but only after everything is updated
  1739. */
  1740. void dquot_free_inode(struct inode *inode)
  1741. {
  1742. unsigned int cnt;
  1743. struct dquot_warn warn[MAXQUOTAS];
  1744. struct dquot __rcu * const *dquots;
  1745. struct dquot *dquot;
  1746. int index;
  1747. if (!inode_quota_active(inode))
  1748. return;
  1749. dquots = i_dquot(inode);
  1750. index = srcu_read_lock(&dquot_srcu);
  1751. spin_lock(&inode->i_lock);
  1752. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1753. int wtype;
  1754. warn[cnt].w_type = QUOTA_NL_NOWARN;
  1755. dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
  1756. if (!dquot)
  1757. continue;
  1758. spin_lock(&dquot->dq_dqb_lock);
  1759. wtype = info_idq_free(dquot, 1);
  1760. if (wtype != QUOTA_NL_NOWARN)
  1761. prepare_warning(&warn[cnt], dquot, wtype);
  1762. dquot_decr_inodes(dquot, 1);
  1763. spin_unlock(&dquot->dq_dqb_lock);
  1764. }
  1765. spin_unlock(&inode->i_lock);
  1766. mark_all_dquot_dirty(dquots);
  1767. srcu_read_unlock(&dquot_srcu, index);
  1768. flush_warnings(warn);
  1769. }
  1770. EXPORT_SYMBOL(dquot_free_inode);
  1771. /*
  1772. * Transfer the number of inode and blocks from one diskquota to an other.
  1773. * On success, dquot references in transfer_to are consumed and references
  1774. * to original dquots that need to be released are placed there. On failure,
  1775. * references are kept untouched.
  1776. *
  1777. * This operation can block, but only after everything is updated
  1778. * A transaction must be started when entering this function.
  1779. *
  1780. * We are holding reference on transfer_from & transfer_to, no need to
  1781. * protect them by srcu_read_lock().
  1782. */
  1783. int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
  1784. {
  1785. qsize_t cur_space;
  1786. qsize_t rsv_space = 0;
  1787. qsize_t inode_usage = 1;
  1788. struct dquot __rcu **dquots;
  1789. struct dquot *transfer_from[MAXQUOTAS] = {};
  1790. int cnt, index, ret = 0, err;
  1791. char is_valid[MAXQUOTAS] = {};
  1792. struct dquot_warn warn_to[MAXQUOTAS];
  1793. struct dquot_warn warn_from_inodes[MAXQUOTAS];
  1794. struct dquot_warn warn_from_space[MAXQUOTAS];
  1795. if (IS_NOQUOTA(inode))
  1796. return 0;
  1797. if (inode->i_sb->dq_op->get_inode_usage) {
  1798. ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
  1799. if (ret)
  1800. return ret;
  1801. }
  1802. /* Initialize the arrays */
  1803. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1804. warn_to[cnt].w_type = QUOTA_NL_NOWARN;
  1805. warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
  1806. warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
  1807. }
  1808. spin_lock(&dq_data_lock);
  1809. spin_lock(&inode->i_lock);
  1810. if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
  1811. spin_unlock(&inode->i_lock);
  1812. spin_unlock(&dq_data_lock);
  1813. return 0;
  1814. }
  1815. cur_space = __inode_get_bytes(inode);
  1816. rsv_space = __inode_get_rsv_space(inode);
  1817. dquots = i_dquot(inode);
  1818. /*
  1819. * Build the transfer_from list, check limits, and update usage in
  1820. * the target structures.
  1821. */
  1822. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1823. /*
  1824. * Skip changes for same uid or gid or for turned off quota-type.
  1825. */
  1826. if (!transfer_to[cnt])
  1827. continue;
  1828. /* Avoid races with quotaoff() */
  1829. if (!sb_has_quota_active(inode->i_sb, cnt))
  1830. continue;
  1831. is_valid[cnt] = 1;
  1832. transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
  1833. &dquot_srcu, lockdep_is_held(&dq_data_lock));
  1834. ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
  1835. &warn_to[cnt]);
  1836. if (ret)
  1837. goto over_quota;
  1838. ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
  1839. DQUOT_SPACE_WARN, &warn_to[cnt]);
  1840. if (ret) {
  1841. spin_lock(&transfer_to[cnt]->dq_dqb_lock);
  1842. dquot_decr_inodes(transfer_to[cnt], inode_usage);
  1843. spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
  1844. goto over_quota;
  1845. }
  1846. }
  1847. /* Decrease usage for source structures and update quota pointers */
  1848. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  1849. if (!is_valid[cnt])
  1850. continue;
  1851. /* Due to IO error we might not have transfer_from[] structure */
  1852. if (transfer_from[cnt]) {
  1853. int wtype;
  1854. spin_lock(&transfer_from[cnt]->dq_dqb_lock);
  1855. wtype = info_idq_free(transfer_from[cnt], inode_usage);
  1856. if (wtype != QUOTA_NL_NOWARN)
  1857. prepare_warning(&warn_from_inodes[cnt],
  1858. transfer_from[cnt], wtype);
  1859. wtype = info_bdq_free(transfer_from[cnt],
  1860. cur_space + rsv_space);
  1861. if (wtype != QUOTA_NL_NOWARN)
  1862. prepare_warning(&warn_from_space[cnt],
  1863. transfer_from[cnt], wtype);
  1864. dquot_decr_inodes(transfer_from[cnt], inode_usage);
  1865. dquot_decr_space(transfer_from[cnt], cur_space);
  1866. dquot_free_reserved_space(transfer_from[cnt],
  1867. rsv_space);
  1868. spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
  1869. }
  1870. rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
  1871. }
  1872. spin_unlock(&inode->i_lock);
  1873. spin_unlock(&dq_data_lock);
  1874. /*
  1875. * These arrays are local and we hold dquot references so we don't need
  1876. * the srcu protection but still take dquot_srcu to avoid warning in
  1877. * mark_all_dquot_dirty().
  1878. */
  1879. index = srcu_read_lock(&dquot_srcu);
  1880. err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
  1881. if (err < 0)
  1882. ret = err;
  1883. err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
  1884. if (err < 0)
  1885. ret = err;
  1886. srcu_read_unlock(&dquot_srcu, index);
  1887. flush_warnings(warn_to);
  1888. flush_warnings(warn_from_inodes);
  1889. flush_warnings(warn_from_space);
  1890. /* Pass back references to put */
  1891. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  1892. if (is_valid[cnt])
  1893. transfer_to[cnt] = transfer_from[cnt];
  1894. return ret;
  1895. over_quota:
  1896. /* Back out changes we already did */
  1897. for (cnt--; cnt >= 0; cnt--) {
  1898. if (!is_valid[cnt])
  1899. continue;
  1900. spin_lock(&transfer_to[cnt]->dq_dqb_lock);
  1901. dquot_decr_inodes(transfer_to[cnt], inode_usage);
  1902. dquot_decr_space(transfer_to[cnt], cur_space);
  1903. dquot_free_reserved_space(transfer_to[cnt], rsv_space);
  1904. spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
  1905. }
  1906. spin_unlock(&inode->i_lock);
  1907. spin_unlock(&dq_data_lock);
  1908. flush_warnings(warn_to);
  1909. return ret;
  1910. }
  1911. EXPORT_SYMBOL(__dquot_transfer);
  1912. /* Wrapper for transferring ownership of an inode for uid/gid only
  1913. * Called from FSXXX_setattr()
  1914. */
  1915. int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
  1916. struct iattr *iattr)
  1917. {
  1918. struct dquot *transfer_to[MAXQUOTAS] = {};
  1919. struct dquot *dquot;
  1920. struct super_block *sb = inode->i_sb;
  1921. int ret;
  1922. if (!inode_quota_active(inode))
  1923. return 0;
  1924. if (i_uid_needs_update(idmap, iattr, inode)) {
  1925. kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode),
  1926. iattr->ia_vfsuid);
  1927. dquot = dqget(sb, make_kqid_uid(kuid));
  1928. if (IS_ERR(dquot)) {
  1929. if (PTR_ERR(dquot) != -ESRCH) {
  1930. ret = PTR_ERR(dquot);
  1931. goto out_put;
  1932. }
  1933. dquot = NULL;
  1934. }
  1935. transfer_to[USRQUOTA] = dquot;
  1936. }
  1937. if (i_gid_needs_update(idmap, iattr, inode)) {
  1938. kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode),
  1939. iattr->ia_vfsgid);
  1940. dquot = dqget(sb, make_kqid_gid(kgid));
  1941. if (IS_ERR(dquot)) {
  1942. if (PTR_ERR(dquot) != -ESRCH) {
  1943. ret = PTR_ERR(dquot);
  1944. goto out_put;
  1945. }
  1946. dquot = NULL;
  1947. }
  1948. transfer_to[GRPQUOTA] = dquot;
  1949. }
  1950. ret = __dquot_transfer(inode, transfer_to);
  1951. out_put:
  1952. dqput_all(transfer_to);
  1953. return ret;
  1954. }
  1955. EXPORT_SYMBOL(dquot_transfer);
  1956. /*
  1957. * Write info of quota file to disk
  1958. */
  1959. int dquot_commit_info(struct super_block *sb, int type)
  1960. {
  1961. struct quota_info *dqopt = sb_dqopt(sb);
  1962. return dqopt->ops[type]->write_file_info(sb, type);
  1963. }
  1964. EXPORT_SYMBOL(dquot_commit_info);
  1965. int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
  1966. {
  1967. struct quota_info *dqopt = sb_dqopt(sb);
  1968. if (!sb_has_quota_active(sb, qid->type))
  1969. return -ESRCH;
  1970. if (!dqopt->ops[qid->type]->get_next_id)
  1971. return -ENOSYS;
  1972. return dqopt->ops[qid->type]->get_next_id(sb, qid);
  1973. }
  1974. EXPORT_SYMBOL(dquot_get_next_id);
  1975. /*
  1976. * Definitions of diskquota operations.
  1977. */
  1978. const struct dquot_operations dquot_operations = {
  1979. .write_dquot = dquot_commit,
  1980. .acquire_dquot = dquot_acquire,
  1981. .release_dquot = dquot_release,
  1982. .mark_dirty = dquot_mark_dquot_dirty,
  1983. .write_info = dquot_commit_info,
  1984. .alloc_dquot = dquot_alloc,
  1985. .destroy_dquot = dquot_destroy,
  1986. .get_next_id = dquot_get_next_id,
  1987. };
  1988. EXPORT_SYMBOL(dquot_operations);
  1989. /*
  1990. * Generic helper for ->open on filesystems supporting disk quotas.
  1991. */
  1992. int dquot_file_open(struct inode *inode, struct file *file)
  1993. {
  1994. int error;
  1995. error = generic_file_open(inode, file);
  1996. if (!error && (file->f_mode & FMODE_WRITE))
  1997. error = dquot_initialize(inode);
  1998. return error;
  1999. }
  2000. EXPORT_SYMBOL(dquot_file_open);
  2001. static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
  2002. {
  2003. struct quota_info *dqopt = sb_dqopt(sb);
  2004. struct inode *inode = dqopt->files[type];
  2005. if (!inode)
  2006. return;
  2007. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
  2008. inode_lock(inode);
  2009. inode->i_flags &= ~S_NOQUOTA;
  2010. inode_unlock(inode);
  2011. }
  2012. dqopt->files[type] = NULL;
  2013. iput(inode);
  2014. }
  2015. /*
  2016. * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
  2017. */
  2018. int dquot_disable(struct super_block *sb, int type, unsigned int flags)
  2019. {
  2020. int cnt;
  2021. struct quota_info *dqopt = sb_dqopt(sb);
  2022. rwsem_assert_held_write(&sb->s_umount);
  2023. /* Cannot turn off usage accounting without turning off limits, or
  2024. * suspend quotas and simultaneously turn quotas off. */
  2025. if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
  2026. || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
  2027. DQUOT_USAGE_ENABLED)))
  2028. return -EINVAL;
  2029. /*
  2030. * Skip everything if there's nothing to do. We have to do this because
  2031. * sometimes we are called when fill_super() failed and calling
  2032. * sync_fs() in such cases does no good.
  2033. */
  2034. if (!sb_any_quota_loaded(sb))
  2035. return 0;
  2036. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  2037. if (type != -1 && cnt != type)
  2038. continue;
  2039. if (!sb_has_quota_loaded(sb, cnt))
  2040. continue;
  2041. if (flags & DQUOT_SUSPENDED) {
  2042. spin_lock(&dq_state_lock);
  2043. dqopt->flags |=
  2044. dquot_state_flag(DQUOT_SUSPENDED, cnt);
  2045. spin_unlock(&dq_state_lock);
  2046. } else {
  2047. spin_lock(&dq_state_lock);
  2048. dqopt->flags &= ~dquot_state_flag(flags, cnt);
  2049. /* Turning off suspended quotas? */
  2050. if (!sb_has_quota_loaded(sb, cnt) &&
  2051. sb_has_quota_suspended(sb, cnt)) {
  2052. dqopt->flags &= ~dquot_state_flag(
  2053. DQUOT_SUSPENDED, cnt);
  2054. spin_unlock(&dq_state_lock);
  2055. vfs_cleanup_quota_inode(sb, cnt);
  2056. continue;
  2057. }
  2058. spin_unlock(&dq_state_lock);
  2059. }
  2060. /* We still have to keep quota loaded? */
  2061. if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
  2062. continue;
  2063. /* Note: these are blocking operations */
  2064. drop_dquot_ref(sb, cnt);
  2065. invalidate_dquots(sb, cnt);
  2066. /*
  2067. * Now all dquots should be invalidated, all writes done so we
  2068. * should be only users of the info. No locks needed.
  2069. */
  2070. if (info_dirty(&dqopt->info[cnt]))
  2071. sb->dq_op->write_info(sb, cnt);
  2072. if (dqopt->ops[cnt]->free_file_info)
  2073. dqopt->ops[cnt]->free_file_info(sb, cnt);
  2074. put_quota_format(dqopt->info[cnt].dqi_format);
  2075. dqopt->info[cnt].dqi_flags = 0;
  2076. dqopt->info[cnt].dqi_igrace = 0;
  2077. dqopt->info[cnt].dqi_bgrace = 0;
  2078. dqopt->ops[cnt] = NULL;
  2079. }
  2080. /* Skip syncing and setting flags if quota files are hidden */
  2081. if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
  2082. goto put_inodes;
  2083. /* Sync the superblock so that buffers with quota data are written to
  2084. * disk (and so userspace sees correct data afterwards). */
  2085. if (sb->s_op->sync_fs)
  2086. sb->s_op->sync_fs(sb, 1);
  2087. sync_blockdev(sb->s_bdev);
  2088. /* Now the quota files are just ordinary files and we can set the
  2089. * inode flags back. Moreover we discard the pagecache so that
  2090. * userspace sees the writes we did bypassing the pagecache. We
  2091. * must also discard the blockdev buffers so that we see the
  2092. * changes done by userspace on the next quotaon() */
  2093. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  2094. if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
  2095. inode_lock(dqopt->files[cnt]);
  2096. truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
  2097. inode_unlock(dqopt->files[cnt]);
  2098. }
  2099. if (sb->s_bdev)
  2100. invalidate_bdev(sb->s_bdev);
  2101. put_inodes:
  2102. /* We are done when suspending quotas */
  2103. if (flags & DQUOT_SUSPENDED)
  2104. return 0;
  2105. for (cnt = 0; cnt < MAXQUOTAS; cnt++)
  2106. if (!sb_has_quota_loaded(sb, cnt))
  2107. vfs_cleanup_quota_inode(sb, cnt);
  2108. return 0;
  2109. }
  2110. EXPORT_SYMBOL(dquot_disable);
  2111. int dquot_quota_off(struct super_block *sb, int type)
  2112. {
  2113. return dquot_disable(sb, type,
  2114. DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  2115. }
  2116. EXPORT_SYMBOL(dquot_quota_off);
  2117. /*
  2118. * Turn quotas on on a device
  2119. */
  2120. static int vfs_setup_quota_inode(struct inode *inode, int type)
  2121. {
  2122. struct super_block *sb = inode->i_sb;
  2123. struct quota_info *dqopt = sb_dqopt(sb);
  2124. if (is_bad_inode(inode))
  2125. return -EUCLEAN;
  2126. if (!S_ISREG(inode->i_mode))
  2127. return -EACCES;
  2128. if (IS_RDONLY(inode))
  2129. return -EROFS;
  2130. if (sb_has_quota_loaded(sb, type))
  2131. return -EBUSY;
  2132. /*
  2133. * Quota files should never be encrypted. They should be thought of as
  2134. * filesystem metadata, not user data. New-style internal quota files
  2135. * cannot be encrypted by users anyway, but old-style external quota
  2136. * files could potentially be incorrectly created in an encrypted
  2137. * directory, hence this explicit check. Some reasons why encrypted
  2138. * quota files don't work include: (1) some filesystems that support
  2139. * encryption don't handle it in their quota_read and quota_write, and
  2140. * (2) cleaning up encrypted quota files at unmount would need special
  2141. * consideration, as quota files are cleaned up later than user files.
  2142. */
  2143. if (IS_ENCRYPTED(inode))
  2144. return -EINVAL;
  2145. dqopt->files[type] = igrab(inode);
  2146. if (!dqopt->files[type])
  2147. return -EIO;
  2148. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
  2149. /* We don't want quota and atime on quota files (deadlocks
  2150. * possible) Also nobody should write to the file - we use
  2151. * special IO operations which ignore the immutable bit. */
  2152. inode_lock(inode);
  2153. inode->i_flags |= S_NOQUOTA;
  2154. inode_unlock(inode);
  2155. /*
  2156. * When S_NOQUOTA is set, remove dquot references as no more
  2157. * references can be added
  2158. */
  2159. __dquot_drop(inode);
  2160. }
  2161. return 0;
  2162. }
  2163. int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
  2164. unsigned int flags)
  2165. {
  2166. struct quota_format_type *fmt;
  2167. struct quota_info *dqopt = sb_dqopt(sb);
  2168. int error;
  2169. lockdep_assert_held_write(&sb->s_umount);
  2170. /* Just unsuspend quotas? */
  2171. if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED))
  2172. return -EINVAL;
  2173. fmt = find_quota_format(format_id);
  2174. if (!fmt)
  2175. return -ESRCH;
  2176. if (!sb->dq_op || !sb->s_qcop ||
  2177. (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
  2178. error = -EINVAL;
  2179. goto out_fmt;
  2180. }
  2181. /* Filesystems outside of init_user_ns not yet supported */
  2182. if (sb->s_user_ns != &init_user_ns) {
  2183. error = -EINVAL;
  2184. goto out_fmt;
  2185. }
  2186. /* Usage always has to be set... */
  2187. if (!(flags & DQUOT_USAGE_ENABLED)) {
  2188. error = -EINVAL;
  2189. goto out_fmt;
  2190. }
  2191. if (sb_has_quota_loaded(sb, type)) {
  2192. error = -EBUSY;
  2193. goto out_fmt;
  2194. }
  2195. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
  2196. /* As we bypass the pagecache we must now flush all the
  2197. * dirty data and invalidate caches so that kernel sees
  2198. * changes from userspace. It is not enough to just flush
  2199. * the quota file since if blocksize < pagesize, invalidation
  2200. * of the cache could fail because of other unrelated dirty
  2201. * data */
  2202. sync_filesystem(sb);
  2203. invalidate_bdev(sb->s_bdev);
  2204. }
  2205. error = -EINVAL;
  2206. if (!fmt->qf_ops->check_quota_file(sb, type))
  2207. goto out_fmt;
  2208. dqopt->ops[type] = fmt->qf_ops;
  2209. dqopt->info[type].dqi_format = fmt;
  2210. dqopt->info[type].dqi_fmt_id = format_id;
  2211. INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
  2212. error = dqopt->ops[type]->read_file_info(sb, type);
  2213. if (error < 0)
  2214. goto out_fmt;
  2215. if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
  2216. spin_lock(&dq_data_lock);
  2217. dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
  2218. spin_unlock(&dq_data_lock);
  2219. }
  2220. spin_lock(&dq_state_lock);
  2221. dqopt->flags |= dquot_state_flag(flags, type);
  2222. spin_unlock(&dq_state_lock);
  2223. error = add_dquot_ref(sb, type);
  2224. if (error)
  2225. dquot_disable(sb, type,
  2226. DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  2227. return error;
  2228. out_fmt:
  2229. put_quota_format(fmt);
  2230. return error;
  2231. }
  2232. EXPORT_SYMBOL(dquot_load_quota_sb);
  2233. /*
  2234. * More powerful function for turning on quotas on given quota inode allowing
  2235. * setting of individual quota flags
  2236. */
  2237. int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
  2238. unsigned int flags)
  2239. {
  2240. int err;
  2241. err = vfs_setup_quota_inode(inode, type);
  2242. if (err < 0)
  2243. return err;
  2244. err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
  2245. if (err < 0)
  2246. vfs_cleanup_quota_inode(inode->i_sb, type);
  2247. return err;
  2248. }
  2249. EXPORT_SYMBOL(dquot_load_quota_inode);
  2250. /* Reenable quotas on remount RW */
  2251. int dquot_resume(struct super_block *sb, int type)
  2252. {
  2253. struct quota_info *dqopt = sb_dqopt(sb);
  2254. int ret = 0, cnt;
  2255. unsigned int flags;
  2256. rwsem_assert_held_write(&sb->s_umount);
  2257. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  2258. if (type != -1 && cnt != type)
  2259. continue;
  2260. if (!sb_has_quota_suspended(sb, cnt))
  2261. continue;
  2262. spin_lock(&dq_state_lock);
  2263. flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
  2264. DQUOT_LIMITS_ENABLED,
  2265. cnt);
  2266. dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
  2267. spin_unlock(&dq_state_lock);
  2268. flags = dquot_generic_flag(flags, cnt);
  2269. ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
  2270. flags);
  2271. if (ret < 0)
  2272. vfs_cleanup_quota_inode(sb, cnt);
  2273. }
  2274. return ret;
  2275. }
  2276. EXPORT_SYMBOL(dquot_resume);
  2277. int dquot_quota_on(struct super_block *sb, int type, int format_id,
  2278. const struct path *path)
  2279. {
  2280. int error = security_quota_on(path->dentry);
  2281. if (error)
  2282. return error;
  2283. /* Quota file not on the same filesystem? */
  2284. if (path->dentry->d_sb != sb)
  2285. error = -EXDEV;
  2286. else
  2287. error = dquot_load_quota_inode(d_inode(path->dentry), type,
  2288. format_id, DQUOT_USAGE_ENABLED |
  2289. DQUOT_LIMITS_ENABLED);
  2290. return error;
  2291. }
  2292. EXPORT_SYMBOL(dquot_quota_on);
  2293. /*
  2294. * This function is used when filesystem needs to initialize quotas
  2295. * during mount time.
  2296. */
  2297. int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
  2298. int format_id, int type)
  2299. {
  2300. struct dentry *dentry;
  2301. int error;
  2302. dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name));
  2303. if (IS_ERR(dentry))
  2304. return PTR_ERR(dentry);
  2305. error = security_quota_on(dentry);
  2306. if (!error)
  2307. error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
  2308. DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  2309. dput(dentry);
  2310. return error;
  2311. }
  2312. EXPORT_SYMBOL(dquot_quota_on_mount);
  2313. static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
  2314. {
  2315. int ret;
  2316. int type;
  2317. struct quota_info *dqopt = sb_dqopt(sb);
  2318. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
  2319. return -ENOSYS;
  2320. /* Accounting cannot be turned on while fs is mounted */
  2321. flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
  2322. if (!flags)
  2323. return -EINVAL;
  2324. for (type = 0; type < MAXQUOTAS; type++) {
  2325. if (!(flags & qtype_enforce_flag(type)))
  2326. continue;
  2327. /* Can't enforce without accounting */
  2328. if (!sb_has_quota_usage_enabled(sb, type)) {
  2329. ret = -EINVAL;
  2330. goto out_err;
  2331. }
  2332. if (sb_has_quota_limits_enabled(sb, type)) {
  2333. /* compatible with XFS */
  2334. ret = -EEXIST;
  2335. goto out_err;
  2336. }
  2337. spin_lock(&dq_state_lock);
  2338. dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
  2339. spin_unlock(&dq_state_lock);
  2340. }
  2341. return 0;
  2342. out_err:
  2343. /* Backout enforcement enablement we already did */
  2344. for (type--; type >= 0; type--) {
  2345. if (flags & qtype_enforce_flag(type))
  2346. dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
  2347. }
  2348. return ret;
  2349. }
  2350. static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
  2351. {
  2352. int ret;
  2353. int type;
  2354. struct quota_info *dqopt = sb_dqopt(sb);
  2355. if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
  2356. return -ENOSYS;
  2357. /*
  2358. * We don't support turning off accounting via quotactl. In principle
  2359. * quota infrastructure can do this but filesystems don't expect
  2360. * userspace to be able to do it.
  2361. */
  2362. if (flags &
  2363. (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
  2364. return -EOPNOTSUPP;
  2365. /* Filter out limits not enabled */
  2366. for (type = 0; type < MAXQUOTAS; type++)
  2367. if (!sb_has_quota_limits_enabled(sb, type))
  2368. flags &= ~qtype_enforce_flag(type);
  2369. /* Nothing left? */
  2370. if (!flags)
  2371. return -EEXIST;
  2372. for (type = 0; type < MAXQUOTAS; type++) {
  2373. if (flags & qtype_enforce_flag(type)) {
  2374. ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
  2375. if (ret < 0)
  2376. goto out_err;
  2377. }
  2378. }
  2379. return 0;
  2380. out_err:
  2381. /* Backout enforcement disabling we already did */
  2382. for (type--; type >= 0; type--) {
  2383. if (flags & qtype_enforce_flag(type)) {
  2384. spin_lock(&dq_state_lock);
  2385. dqopt->flags |=
  2386. dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
  2387. spin_unlock(&dq_state_lock);
  2388. }
  2389. }
  2390. return ret;
  2391. }
  2392. /* Generic routine for getting common part of quota structure */
  2393. static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
  2394. {
  2395. struct mem_dqblk *dm = &dquot->dq_dqb;
  2396. memset(di, 0, sizeof(*di));
  2397. spin_lock(&dquot->dq_dqb_lock);
  2398. di->d_spc_hardlimit = dm->dqb_bhardlimit;
  2399. di->d_spc_softlimit = dm->dqb_bsoftlimit;
  2400. di->d_ino_hardlimit = dm->dqb_ihardlimit;
  2401. di->d_ino_softlimit = dm->dqb_isoftlimit;
  2402. di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
  2403. di->d_ino_count = dm->dqb_curinodes;
  2404. di->d_spc_timer = dm->dqb_btime;
  2405. di->d_ino_timer = dm->dqb_itime;
  2406. spin_unlock(&dquot->dq_dqb_lock);
  2407. }
  2408. int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
  2409. struct qc_dqblk *di)
  2410. {
  2411. struct dquot *dquot;
  2412. dquot = dqget(sb, qid);
  2413. if (IS_ERR(dquot))
  2414. return PTR_ERR(dquot);
  2415. do_get_dqblk(dquot, di);
  2416. dqput(dquot);
  2417. return 0;
  2418. }
  2419. EXPORT_SYMBOL(dquot_get_dqblk);
  2420. int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
  2421. struct qc_dqblk *di)
  2422. {
  2423. struct dquot *dquot;
  2424. int err;
  2425. if (!sb->dq_op->get_next_id)
  2426. return -ENOSYS;
  2427. err = sb->dq_op->get_next_id(sb, qid);
  2428. if (err < 0)
  2429. return err;
  2430. dquot = dqget(sb, *qid);
  2431. if (IS_ERR(dquot))
  2432. return PTR_ERR(dquot);
  2433. do_get_dqblk(dquot, di);
  2434. dqput(dquot);
  2435. return 0;
  2436. }
  2437. EXPORT_SYMBOL(dquot_get_next_dqblk);
  2438. #define VFS_QC_MASK \
  2439. (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
  2440. QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
  2441. QC_SPC_TIMER | QC_INO_TIMER)
  2442. /* Generic routine for setting common part of quota structure */
  2443. static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
  2444. {
  2445. struct mem_dqblk *dm = &dquot->dq_dqb;
  2446. int check_blim = 0, check_ilim = 0;
  2447. struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
  2448. int ret;
  2449. if (di->d_fieldmask & ~VFS_QC_MASK)
  2450. return -EINVAL;
  2451. if (((di->d_fieldmask & QC_SPC_SOFT) &&
  2452. di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
  2453. ((di->d_fieldmask & QC_SPC_HARD) &&
  2454. di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
  2455. ((di->d_fieldmask & QC_INO_SOFT) &&
  2456. (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
  2457. ((di->d_fieldmask & QC_INO_HARD) &&
  2458. (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
  2459. return -ERANGE;
  2460. spin_lock(&dquot->dq_dqb_lock);
  2461. if (di->d_fieldmask & QC_SPACE) {
  2462. dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
  2463. check_blim = 1;
  2464. set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  2465. }
  2466. if (di->d_fieldmask & QC_SPC_SOFT)
  2467. dm->dqb_bsoftlimit = di->d_spc_softlimit;
  2468. if (di->d_fieldmask & QC_SPC_HARD)
  2469. dm->dqb_bhardlimit = di->d_spc_hardlimit;
  2470. if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
  2471. check_blim = 1;
  2472. set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  2473. }
  2474. if (di->d_fieldmask & QC_INO_COUNT) {
  2475. dm->dqb_curinodes = di->d_ino_count;
  2476. check_ilim = 1;
  2477. set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  2478. }
  2479. if (di->d_fieldmask & QC_INO_SOFT)
  2480. dm->dqb_isoftlimit = di->d_ino_softlimit;
  2481. if (di->d_fieldmask & QC_INO_HARD)
  2482. dm->dqb_ihardlimit = di->d_ino_hardlimit;
  2483. if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
  2484. check_ilim = 1;
  2485. set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  2486. }
  2487. if (di->d_fieldmask & QC_SPC_TIMER) {
  2488. dm->dqb_btime = di->d_spc_timer;
  2489. check_blim = 1;
  2490. set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  2491. }
  2492. if (di->d_fieldmask & QC_INO_TIMER) {
  2493. dm->dqb_itime = di->d_ino_timer;
  2494. check_ilim = 1;
  2495. set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  2496. }
  2497. if (check_blim) {
  2498. if (!dm->dqb_bsoftlimit ||
  2499. dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) {
  2500. dm->dqb_btime = 0;
  2501. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  2502. } else if (!(di->d_fieldmask & QC_SPC_TIMER))
  2503. /* Set grace only if user hasn't provided his own... */
  2504. dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
  2505. }
  2506. if (check_ilim) {
  2507. if (!dm->dqb_isoftlimit ||
  2508. dm->dqb_curinodes <= dm->dqb_isoftlimit) {
  2509. dm->dqb_itime = 0;
  2510. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  2511. } else if (!(di->d_fieldmask & QC_INO_TIMER))
  2512. /* Set grace only if user hasn't provided his own... */
  2513. dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
  2514. }
  2515. if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
  2516. dm->dqb_isoftlimit)
  2517. clear_bit(DQ_FAKE_B, &dquot->dq_flags);
  2518. else
  2519. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  2520. spin_unlock(&dquot->dq_dqb_lock);
  2521. ret = mark_dquot_dirty(dquot);
  2522. if (ret < 0)
  2523. return ret;
  2524. return 0;
  2525. }
  2526. int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
  2527. struct qc_dqblk *di)
  2528. {
  2529. struct dquot *dquot;
  2530. int rc;
  2531. dquot = dqget(sb, qid);
  2532. if (IS_ERR(dquot)) {
  2533. rc = PTR_ERR(dquot);
  2534. goto out;
  2535. }
  2536. rc = do_set_dqblk(dquot, di);
  2537. dqput(dquot);
  2538. out:
  2539. return rc;
  2540. }
  2541. EXPORT_SYMBOL(dquot_set_dqblk);
  2542. /* Generic routine for getting common part of quota file information */
  2543. int dquot_get_state(struct super_block *sb, struct qc_state *state)
  2544. {
  2545. struct mem_dqinfo *mi;
  2546. struct qc_type_state *tstate;
  2547. struct quota_info *dqopt = sb_dqopt(sb);
  2548. int type;
  2549. memset(state, 0, sizeof(*state));
  2550. for (type = 0; type < MAXQUOTAS; type++) {
  2551. if (!sb_has_quota_active(sb, type))
  2552. continue;
  2553. tstate = state->s_state + type;
  2554. mi = sb_dqopt(sb)->info + type;
  2555. tstate->flags = QCI_ACCT_ENABLED;
  2556. spin_lock(&dq_data_lock);
  2557. if (mi->dqi_flags & DQF_SYS_FILE)
  2558. tstate->flags |= QCI_SYSFILE;
  2559. if (mi->dqi_flags & DQF_ROOT_SQUASH)
  2560. tstate->flags |= QCI_ROOT_SQUASH;
  2561. if (sb_has_quota_limits_enabled(sb, type))
  2562. tstate->flags |= QCI_LIMITS_ENFORCED;
  2563. tstate->spc_timelimit = mi->dqi_bgrace;
  2564. tstate->ino_timelimit = mi->dqi_igrace;
  2565. if (dqopt->files[type]) {
  2566. tstate->ino = dqopt->files[type]->i_ino;
  2567. tstate->blocks = dqopt->files[type]->i_blocks;
  2568. }
  2569. tstate->nextents = 1; /* We don't know... */
  2570. spin_unlock(&dq_data_lock);
  2571. }
  2572. return 0;
  2573. }
  2574. EXPORT_SYMBOL(dquot_get_state);
  2575. /* Generic routine for setting common part of quota file information */
  2576. int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
  2577. {
  2578. struct mem_dqinfo *mi;
  2579. if ((ii->i_fieldmask & QC_WARNS_MASK) ||
  2580. (ii->i_fieldmask & QC_RT_SPC_TIMER))
  2581. return -EINVAL;
  2582. if (!sb_has_quota_active(sb, type))
  2583. return -ESRCH;
  2584. mi = sb_dqopt(sb)->info + type;
  2585. if (ii->i_fieldmask & QC_FLAGS) {
  2586. if ((ii->i_flags & QCI_ROOT_SQUASH &&
  2587. mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
  2588. return -EINVAL;
  2589. }
  2590. spin_lock(&dq_data_lock);
  2591. if (ii->i_fieldmask & QC_SPC_TIMER)
  2592. mi->dqi_bgrace = ii->i_spc_timelimit;
  2593. if (ii->i_fieldmask & QC_INO_TIMER)
  2594. mi->dqi_igrace = ii->i_ino_timelimit;
  2595. if (ii->i_fieldmask & QC_FLAGS) {
  2596. if (ii->i_flags & QCI_ROOT_SQUASH)
  2597. mi->dqi_flags |= DQF_ROOT_SQUASH;
  2598. else
  2599. mi->dqi_flags &= ~DQF_ROOT_SQUASH;
  2600. }
  2601. spin_unlock(&dq_data_lock);
  2602. mark_info_dirty(sb, type);
  2603. /* Force write to disk */
  2604. return sb->dq_op->write_info(sb, type);
  2605. }
  2606. EXPORT_SYMBOL(dquot_set_dqinfo);
  2607. const struct quotactl_ops dquot_quotactl_sysfile_ops = {
  2608. .quota_enable = dquot_quota_enable,
  2609. .quota_disable = dquot_quota_disable,
  2610. .quota_sync = dquot_quota_sync,
  2611. .get_state = dquot_get_state,
  2612. .set_info = dquot_set_dqinfo,
  2613. .get_dqblk = dquot_get_dqblk,
  2614. .get_nextdqblk = dquot_get_next_dqblk,
  2615. .set_dqblk = dquot_set_dqblk
  2616. };
  2617. EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
  2618. static int do_proc_dqstats(const struct ctl_table *table, int write,
  2619. void *buffer, size_t *lenp, loff_t *ppos)
  2620. {
  2621. unsigned int type = (unsigned long *)table->data - dqstats.stat;
  2622. s64 value = percpu_counter_sum(&dqstats.counter[type]);
  2623. /* Filter negative values for non-monotonic counters */
  2624. if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
  2625. type == DQST_FREE_DQUOTS))
  2626. value = 0;
  2627. /* Update global table */
  2628. dqstats.stat[type] = value;
  2629. return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  2630. }
  2631. static struct ctl_table fs_dqstats_table[] = {
  2632. {
  2633. .procname = "lookups",
  2634. .data = &dqstats.stat[DQST_LOOKUPS],
  2635. .maxlen = sizeof(unsigned long),
  2636. .mode = 0444,
  2637. .proc_handler = do_proc_dqstats,
  2638. },
  2639. {
  2640. .procname = "drops",
  2641. .data = &dqstats.stat[DQST_DROPS],
  2642. .maxlen = sizeof(unsigned long),
  2643. .mode = 0444,
  2644. .proc_handler = do_proc_dqstats,
  2645. },
  2646. {
  2647. .procname = "reads",
  2648. .data = &dqstats.stat[DQST_READS],
  2649. .maxlen = sizeof(unsigned long),
  2650. .mode = 0444,
  2651. .proc_handler = do_proc_dqstats,
  2652. },
  2653. {
  2654. .procname = "writes",
  2655. .data = &dqstats.stat[DQST_WRITES],
  2656. .maxlen = sizeof(unsigned long),
  2657. .mode = 0444,
  2658. .proc_handler = do_proc_dqstats,
  2659. },
  2660. {
  2661. .procname = "cache_hits",
  2662. .data = &dqstats.stat[DQST_CACHE_HITS],
  2663. .maxlen = sizeof(unsigned long),
  2664. .mode = 0444,
  2665. .proc_handler = do_proc_dqstats,
  2666. },
  2667. {
  2668. .procname = "allocated_dquots",
  2669. .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
  2670. .maxlen = sizeof(unsigned long),
  2671. .mode = 0444,
  2672. .proc_handler = do_proc_dqstats,
  2673. },
  2674. {
  2675. .procname = "free_dquots",
  2676. .data = &dqstats.stat[DQST_FREE_DQUOTS],
  2677. .maxlen = sizeof(unsigned long),
  2678. .mode = 0444,
  2679. .proc_handler = do_proc_dqstats,
  2680. },
  2681. {
  2682. .procname = "syncs",
  2683. .data = &dqstats.stat[DQST_SYNCS],
  2684. .maxlen = sizeof(unsigned long),
  2685. .mode = 0444,
  2686. .proc_handler = do_proc_dqstats,
  2687. },
  2688. #ifdef CONFIG_PRINT_QUOTA_WARNING
  2689. {
  2690. .procname = "warnings",
  2691. .data = &flag_print_warnings,
  2692. .maxlen = sizeof(int),
  2693. .mode = 0644,
  2694. .proc_handler = proc_dointvec,
  2695. },
  2696. #endif
  2697. };
  2698. static int __init dquot_init(void)
  2699. {
  2700. int i, ret;
  2701. unsigned long nr_hash, order;
  2702. struct shrinker *dqcache_shrinker;
  2703. printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
  2704. register_sysctl_init("fs/quota", fs_dqstats_table);
  2705. dquot_cachep = kmem_cache_create("dquot",
  2706. sizeof(struct dquot), sizeof(unsigned long) * 4,
  2707. (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
  2708. SLAB_PANIC),
  2709. NULL);
  2710. order = 0;
  2711. dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
  2712. if (!dquot_hash)
  2713. panic("Cannot create dquot hash table");
  2714. ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL,
  2715. _DQST_DQSTAT_LAST);
  2716. if (ret)
  2717. panic("Cannot create dquot stat counters");
  2718. /* Find power-of-two hlist_heads which can fit into allocation */
  2719. nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
  2720. dq_hash_bits = ilog2(nr_hash);
  2721. nr_hash = 1UL << dq_hash_bits;
  2722. dq_hash_mask = nr_hash - 1;
  2723. for (i = 0; i < nr_hash; i++)
  2724. INIT_HLIST_HEAD(dquot_hash + i);
  2725. pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
  2726. " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
  2727. dqcache_shrinker = shrinker_alloc(0, "dquota-cache");
  2728. if (!dqcache_shrinker)
  2729. panic("Cannot allocate dquot shrinker");
  2730. dqcache_shrinker->count_objects = dqcache_shrink_count;
  2731. dqcache_shrinker->scan_objects = dqcache_shrink_scan;
  2732. shrinker_register(dqcache_shrinker);
  2733. quota_unbound_wq = alloc_workqueue("quota_events_unbound",
  2734. WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
  2735. if (!quota_unbound_wq)
  2736. panic("Cannot create quota_unbound_wq\n");
  2737. return 0;
  2738. }
  2739. fs_initcall(dquot_init);