| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * linux/fs/locks.c
- *
- * We implement four types of file locks: BSD locks, posix locks, open
- * file description locks, and leases. For details about BSD locks,
- * see the flock(2) man page; for details about the other three, see
- * fcntl(2).
- *
- *
- * Locking conflicts and dependencies:
- * If multiple threads attempt to lock the same byte (or flock the same file)
- * only one can be granted the lock, and other must wait their turn.
- * The first lock has been "applied" or "granted", the others are "waiting"
- * and are "blocked" by the "applied" lock..
- *
- * Waiting and applied locks are all kept in trees whose properties are:
- *
- * - the root of a tree may be an applied or waiting lock.
- * - every other node in the tree is a waiting lock that
- * conflicts with every ancestor of that node.
- *
- * Every such tree begins life as a waiting singleton which obviously
- * satisfies the above properties.
- *
- * The only ways we modify trees preserve these properties:
- *
- * 1. We may add a new leaf node, but only after first verifying that it
- * conflicts with all of its ancestors.
- * 2. We may remove the root of a tree, creating a new singleton
- * tree from the root and N new trees rooted in the immediate
- * children.
- * 3. If the root of a tree is not currently an applied lock, we may
- * apply it (if possible).
- * 4. We may upgrade the root of the tree (either extend its range,
- * or upgrade its entire range from read to write).
- *
- * When an applied lock is modified in a way that reduces or downgrades any
- * part of its range, we remove all its children (2 above). This particularly
- * happens when a lock is unlocked.
- *
- * For each of those child trees we "wake up" the thread which is
- * waiting for the lock so it can continue handling as follows: if the
- * root of the tree applies, we do so (3). If it doesn't, it must
- * conflict with some applied lock. We remove (wake up) all of its children
- * (2), and add it is a new leaf to the tree rooted in the applied
- * lock (1). We then repeat the process recursively with those
- * children.
- *
- */
- #include <linux/capability.h>
- #include <linux/file.h>
- #include <linux/fdtable.h>
- #include <linux/filelock.h>
- #include <linux/fs.h>
- #include <linux/init.h>
- #include <linux/security.h>
- #include <linux/slab.h>
- #include <linux/syscalls.h>
- #include <linux/time.h>
- #include <linux/rcupdate.h>
- #include <linux/pid_namespace.h>
- #include <linux/hashtable.h>
- #include <linux/percpu.h>
- #include <linux/sysctl.h>
- #define CREATE_TRACE_POINTS
- #include <trace/events/filelock.h>
- #include <linux/uaccess.h>
- static struct file_lock *file_lock(struct file_lock_core *flc)
- {
- return container_of(flc, struct file_lock, c);
- }
- static struct file_lease *file_lease(struct file_lock_core *flc)
- {
- return container_of(flc, struct file_lease, c);
- }
- static bool lease_breaking(struct file_lease *fl)
- {
- return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
- }
- static int target_leasetype(struct file_lease *fl)
- {
- if (fl->c.flc_flags & FL_UNLOCK_PENDING)
- return F_UNLCK;
- if (fl->c.flc_flags & FL_DOWNGRADE_PENDING)
- return F_RDLCK;
- return fl->c.flc_type;
- }
- static int leases_enable = 1;
- static int lease_break_time = 45;
- #ifdef CONFIG_SYSCTL
- static struct ctl_table locks_sysctls[] = {
- {
- .procname = "leases-enable",
- .data = &leases_enable,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- #ifdef CONFIG_MMU
- {
- .procname = "lease-break-time",
- .data = &lease_break_time,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- #endif /* CONFIG_MMU */
- };
- static int __init init_fs_locks_sysctls(void)
- {
- register_sysctl_init("fs", locks_sysctls);
- return 0;
- }
- early_initcall(init_fs_locks_sysctls);
- #endif /* CONFIG_SYSCTL */
- /*
- * The global file_lock_list is only used for displaying /proc/locks, so we
- * keep a list on each CPU, with each list protected by its own spinlock.
- * Global serialization is done using file_rwsem.
- *
- * Note that alterations to the list also require that the relevant flc_lock is
- * held.
- */
- struct file_lock_list_struct {
- spinlock_t lock;
- struct hlist_head hlist;
- };
- static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
- DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
- /*
- * The blocked_hash is used to find POSIX lock loops for deadlock detection.
- * It is protected by blocked_lock_lock.
- *
- * We hash locks by lockowner in order to optimize searching for the lock a
- * particular lockowner is waiting on.
- *
- * FIXME: make this value scale via some heuristic? We generally will want more
- * buckets when we have more lockowners holding locks, but that's a little
- * difficult to determine without knowing what the workload will look like.
- */
- #define BLOCKED_HASH_BITS 7
- static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
- /*
- * This lock protects the blocked_hash. Generally, if you're accessing it, you
- * want to be holding this lock.
- *
- * In addition, it also protects the fl->fl_blocked_requests list, and the
- * fl->fl_blocker pointer for file_lock structures that are acting as lock
- * requests (in contrast to those that are acting as records of acquired locks).
- *
- * Note that when we acquire this lock in order to change the above fields,
- * we often hold the flc_lock as well. In certain cases, when reading the fields
- * protected by this lock, we can skip acquiring it iff we already hold the
- * flc_lock.
- */
- static DEFINE_SPINLOCK(blocked_lock_lock);
- static struct kmem_cache *flctx_cache __ro_after_init;
- static struct kmem_cache *filelock_cache __ro_after_init;
- static struct kmem_cache *filelease_cache __ro_after_init;
- static struct file_lock_context *
- locks_get_lock_context(struct inode *inode, int type)
- {
- struct file_lock_context *ctx;
- /* paired with cmpxchg() below */
- ctx = locks_inode_context(inode);
- if (likely(ctx) || type == F_UNLCK)
- goto out;
- ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
- if (!ctx)
- goto out;
- spin_lock_init(&ctx->flc_lock);
- INIT_LIST_HEAD(&ctx->flc_flock);
- INIT_LIST_HEAD(&ctx->flc_posix);
- INIT_LIST_HEAD(&ctx->flc_lease);
- /*
- * Assign the pointer if it's not already assigned. If it is, then
- * free the context we just allocated.
- */
- if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
- kmem_cache_free(flctx_cache, ctx);
- ctx = locks_inode_context(inode);
- }
- out:
- trace_locks_get_lock_context(inode, type, ctx);
- return ctx;
- }
- static void
- locks_dump_ctx_list(struct list_head *list, char *list_type)
- {
- struct file_lock_core *flc;
- list_for_each_entry(flc, list, flc_list)
- pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
- list_type, flc->flc_owner, flc->flc_flags,
- flc->flc_type, flc->flc_pid);
- }
- static void
- locks_check_ctx_lists(struct inode *inode)
- {
- struct file_lock_context *ctx = inode->i_flctx;
- if (unlikely(!list_empty(&ctx->flc_flock) ||
- !list_empty(&ctx->flc_posix) ||
- !list_empty(&ctx->flc_lease))) {
- pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
- MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
- inode->i_ino);
- locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
- locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
- locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
- }
- }
- static void
- locks_check_ctx_file_list(struct file *filp, struct list_head *list, char *list_type)
- {
- struct file_lock_core *flc;
- struct inode *inode = file_inode(filp);
- list_for_each_entry(flc, list, flc_list)
- if (flc->flc_file == filp)
- pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
- " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
- list_type, MAJOR(inode->i_sb->s_dev),
- MINOR(inode->i_sb->s_dev), inode->i_ino,
- flc->flc_owner, flc->flc_flags,
- flc->flc_type, flc->flc_pid);
- }
- void
- locks_free_lock_context(struct inode *inode)
- {
- struct file_lock_context *ctx = locks_inode_context(inode);
- if (unlikely(ctx)) {
- locks_check_ctx_lists(inode);
- kmem_cache_free(flctx_cache, ctx);
- }
- }
- static void locks_init_lock_heads(struct file_lock_core *flc)
- {
- INIT_HLIST_NODE(&flc->flc_link);
- INIT_LIST_HEAD(&flc->flc_list);
- INIT_LIST_HEAD(&flc->flc_blocked_requests);
- INIT_LIST_HEAD(&flc->flc_blocked_member);
- init_waitqueue_head(&flc->flc_wait);
- }
- /* Allocate an empty lock structure. */
- struct file_lock *locks_alloc_lock(void)
- {
- struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
- if (fl)
- locks_init_lock_heads(&fl->c);
- return fl;
- }
- EXPORT_SYMBOL_GPL(locks_alloc_lock);
- /* Allocate an empty lock structure. */
- struct file_lease *locks_alloc_lease(void)
- {
- struct file_lease *fl = kmem_cache_zalloc(filelease_cache, GFP_KERNEL);
- if (fl)
- locks_init_lock_heads(&fl->c);
- return fl;
- }
- EXPORT_SYMBOL_GPL(locks_alloc_lease);
- void locks_release_private(struct file_lock *fl)
- {
- struct file_lock_core *flc = &fl->c;
- BUG_ON(waitqueue_active(&flc->flc_wait));
- BUG_ON(!list_empty(&flc->flc_list));
- BUG_ON(!list_empty(&flc->flc_blocked_requests));
- BUG_ON(!list_empty(&flc->flc_blocked_member));
- BUG_ON(!hlist_unhashed(&flc->flc_link));
- if (fl->fl_ops) {
- if (fl->fl_ops->fl_release_private)
- fl->fl_ops->fl_release_private(fl);
- fl->fl_ops = NULL;
- }
- if (fl->fl_lmops) {
- if (fl->fl_lmops->lm_put_owner) {
- fl->fl_lmops->lm_put_owner(flc->flc_owner);
- flc->flc_owner = NULL;
- }
- fl->fl_lmops = NULL;
- }
- }
- EXPORT_SYMBOL_GPL(locks_release_private);
- /**
- * locks_owner_has_blockers - Check for blocking lock requests
- * @flctx: file lock context
- * @owner: lock owner
- *
- * Return values:
- * %true: @owner has at least one blocker
- * %false: @owner has no blockers
- */
- bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner)
- {
- struct file_lock_core *flc;
- spin_lock(&flctx->flc_lock);
- list_for_each_entry(flc, &flctx->flc_posix, flc_list) {
- if (flc->flc_owner != owner)
- continue;
- if (!list_empty(&flc->flc_blocked_requests)) {
- spin_unlock(&flctx->flc_lock);
- return true;
- }
- }
- spin_unlock(&flctx->flc_lock);
- return false;
- }
- EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
- /* Free a lock which is not in use. */
- void locks_free_lock(struct file_lock *fl)
- {
- locks_release_private(fl);
- kmem_cache_free(filelock_cache, fl);
- }
- EXPORT_SYMBOL(locks_free_lock);
- /* Free a lease which is not in use. */
- void locks_free_lease(struct file_lease *fl)
- {
- kmem_cache_free(filelease_cache, fl);
- }
- EXPORT_SYMBOL(locks_free_lease);
- static void
- locks_dispose_list(struct list_head *dispose)
- {
- struct file_lock_core *flc;
- while (!list_empty(dispose)) {
- flc = list_first_entry(dispose, struct file_lock_core, flc_list);
- list_del_init(&flc->flc_list);
- if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
- locks_free_lease(file_lease(flc));
- else
- locks_free_lock(file_lock(flc));
- }
- }
- void locks_init_lock(struct file_lock *fl)
- {
- memset(fl, 0, sizeof(struct file_lock));
- locks_init_lock_heads(&fl->c);
- }
- EXPORT_SYMBOL(locks_init_lock);
- void locks_init_lease(struct file_lease *fl)
- {
- memset(fl, 0, sizeof(*fl));
- locks_init_lock_heads(&fl->c);
- }
- EXPORT_SYMBOL(locks_init_lease);
- /*
- * Initialize a new lock from an existing file_lock structure.
- */
- void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
- {
- new->c.flc_owner = fl->c.flc_owner;
- new->c.flc_pid = fl->c.flc_pid;
- new->c.flc_file = NULL;
- new->c.flc_flags = fl->c.flc_flags;
- new->c.flc_type = fl->c.flc_type;
- new->fl_start = fl->fl_start;
- new->fl_end = fl->fl_end;
- new->fl_lmops = fl->fl_lmops;
- new->fl_ops = NULL;
- if (fl->fl_lmops) {
- if (fl->fl_lmops->lm_get_owner)
- fl->fl_lmops->lm_get_owner(fl->c.flc_owner);
- }
- }
- EXPORT_SYMBOL(locks_copy_conflock);
- void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
- {
- /* "new" must be a freshly-initialized lock */
- WARN_ON_ONCE(new->fl_ops);
- locks_copy_conflock(new, fl);
- new->c.flc_file = fl->c.flc_file;
- new->fl_ops = fl->fl_ops;
- if (fl->fl_ops) {
- if (fl->fl_ops->fl_copy_lock)
- fl->fl_ops->fl_copy_lock(new, fl);
- }
- }
- EXPORT_SYMBOL(locks_copy_lock);
- static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
- {
- struct file_lock *f;
- /*
- * As ctx->flc_lock is held, new requests cannot be added to
- * ->flc_blocked_requests, so we don't need a lock to check if it
- * is empty.
- */
- if (list_empty(&fl->c.flc_blocked_requests))
- return;
- spin_lock(&blocked_lock_lock);
- list_splice_init(&fl->c.flc_blocked_requests,
- &new->c.flc_blocked_requests);
- list_for_each_entry(f, &new->c.flc_blocked_requests,
- c.flc_blocked_member)
- f->c.flc_blocker = &new->c;
- spin_unlock(&blocked_lock_lock);
- }
- static inline int flock_translate_cmd(int cmd) {
- switch (cmd) {
- case LOCK_SH:
- return F_RDLCK;
- case LOCK_EX:
- return F_WRLCK;
- case LOCK_UN:
- return F_UNLCK;
- }
- return -EINVAL;
- }
- /* Fill in a file_lock structure with an appropriate FLOCK lock. */
- static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
- {
- locks_init_lock(fl);
- fl->c.flc_file = filp;
- fl->c.flc_owner = filp;
- fl->c.flc_pid = current->tgid;
- fl->c.flc_flags = FL_FLOCK;
- fl->c.flc_type = type;
- fl->fl_end = OFFSET_MAX;
- }
- static int assign_type(struct file_lock_core *flc, int type)
- {
- switch (type) {
- case F_RDLCK:
- case F_WRLCK:
- case F_UNLCK:
- flc->flc_type = type;
- break;
- default:
- return -EINVAL;
- }
- return 0;
- }
- static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
- struct flock64 *l)
- {
- switch (l->l_whence) {
- case SEEK_SET:
- fl->fl_start = 0;
- break;
- case SEEK_CUR:
- fl->fl_start = filp->f_pos;
- break;
- case SEEK_END:
- fl->fl_start = i_size_read(file_inode(filp));
- break;
- default:
- return -EINVAL;
- }
- if (l->l_start > OFFSET_MAX - fl->fl_start)
- return -EOVERFLOW;
- fl->fl_start += l->l_start;
- if (fl->fl_start < 0)
- return -EINVAL;
- /* POSIX-1996 leaves the case l->l_len < 0 undefined;
- POSIX-2001 defines it. */
- if (l->l_len > 0) {
- if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
- return -EOVERFLOW;
- fl->fl_end = fl->fl_start + (l->l_len - 1);
- } else if (l->l_len < 0) {
- if (fl->fl_start + l->l_len < 0)
- return -EINVAL;
- fl->fl_end = fl->fl_start - 1;
- fl->fl_start += l->l_len;
- } else
- fl->fl_end = OFFSET_MAX;
- fl->c.flc_owner = current->files;
- fl->c.flc_pid = current->tgid;
- fl->c.flc_file = filp;
- fl->c.flc_flags = FL_POSIX;
- fl->fl_ops = NULL;
- fl->fl_lmops = NULL;
- return assign_type(&fl->c, l->l_type);
- }
- /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
- * style lock.
- */
- static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
- struct flock *l)
- {
- struct flock64 ll = {
- .l_type = l->l_type,
- .l_whence = l->l_whence,
- .l_start = l->l_start,
- .l_len = l->l_len,
- };
- return flock64_to_posix_lock(filp, fl, &ll);
- }
- /* default lease lock manager operations */
- static bool
- lease_break_callback(struct file_lease *fl)
- {
- kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
- return false;
- }
- static void
- lease_setup(struct file_lease *fl, void **priv)
- {
- struct file *filp = fl->c.flc_file;
- struct fasync_struct *fa = *priv;
- /*
- * fasync_insert_entry() returns the old entry if any. If there was no
- * old entry, then it used "priv" and inserted it into the fasync list.
- * Clear the pointer to indicate that it shouldn't be freed.
- */
- if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
- *priv = NULL;
- __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
- }
- static const struct lease_manager_operations lease_manager_ops = {
- .lm_break = lease_break_callback,
- .lm_change = lease_modify,
- .lm_setup = lease_setup,
- };
- /*
- * Initialize a lease, use the default lock manager operations
- */
- static int lease_init(struct file *filp, int type, struct file_lease *fl)
- {
- if (assign_type(&fl->c, type) != 0)
- return -EINVAL;
- fl->c.flc_owner = filp;
- fl->c.flc_pid = current->tgid;
- fl->c.flc_file = filp;
- fl->c.flc_flags = FL_LEASE;
- fl->fl_lmops = &lease_manager_ops;
- return 0;
- }
- /* Allocate a file_lock initialised to this type of lease */
- static struct file_lease *lease_alloc(struct file *filp, int type)
- {
- struct file_lease *fl = locks_alloc_lease();
- int error = -ENOMEM;
- if (fl == NULL)
- return ERR_PTR(error);
- error = lease_init(filp, type, fl);
- if (error) {
- locks_free_lease(fl);
- return ERR_PTR(error);
- }
- return fl;
- }
- /* Check if two locks overlap each other.
- */
- static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
- {
- return ((fl1->fl_end >= fl2->fl_start) &&
- (fl2->fl_end >= fl1->fl_start));
- }
- /*
- * Check whether two locks have the same owner.
- */
- static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *fl2)
- {
- return fl1->flc_owner == fl2->flc_owner;
- }
- /* Must be called with the flc_lock held! */
- static void locks_insert_global_locks(struct file_lock_core *flc)
- {
- struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
- percpu_rwsem_assert_held(&file_rwsem);
- spin_lock(&fll->lock);
- flc->flc_link_cpu = smp_processor_id();
- hlist_add_head(&flc->flc_link, &fll->hlist);
- spin_unlock(&fll->lock);
- }
- /* Must be called with the flc_lock held! */
- static void locks_delete_global_locks(struct file_lock_core *flc)
- {
- struct file_lock_list_struct *fll;
- percpu_rwsem_assert_held(&file_rwsem);
- /*
- * Avoid taking lock if already unhashed. This is safe since this check
- * is done while holding the flc_lock, and new insertions into the list
- * also require that it be held.
- */
- if (hlist_unhashed(&flc->flc_link))
- return;
- fll = per_cpu_ptr(&file_lock_list, flc->flc_link_cpu);
- spin_lock(&fll->lock);
- hlist_del_init(&flc->flc_link);
- spin_unlock(&fll->lock);
- }
- static unsigned long
- posix_owner_key(struct file_lock_core *flc)
- {
- return (unsigned long) flc->flc_owner;
- }
- static void locks_insert_global_blocked(struct file_lock_core *waiter)
- {
- lockdep_assert_held(&blocked_lock_lock);
- hash_add(blocked_hash, &waiter->flc_link, posix_owner_key(waiter));
- }
- static void locks_delete_global_blocked(struct file_lock_core *waiter)
- {
- lockdep_assert_held(&blocked_lock_lock);
- hash_del(&waiter->flc_link);
- }
- /* Remove waiter from blocker's block list.
- * When blocker ends up pointing to itself then the list is empty.
- *
- * Must be called with blocked_lock_lock held.
- */
- static void __locks_unlink_block(struct file_lock_core *waiter)
- {
- locks_delete_global_blocked(waiter);
- list_del_init(&waiter->flc_blocked_member);
- }
- static void __locks_wake_up_blocks(struct file_lock_core *blocker)
- {
- while (!list_empty(&blocker->flc_blocked_requests)) {
- struct file_lock_core *waiter;
- struct file_lock *fl;
- waiter = list_first_entry(&blocker->flc_blocked_requests,
- struct file_lock_core, flc_blocked_member);
- fl = file_lock(waiter);
- __locks_unlink_block(waiter);
- if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) &&
- fl->fl_lmops && fl->fl_lmops->lm_notify)
- fl->fl_lmops->lm_notify(fl);
- else
- locks_wake_up(fl);
- /*
- * The setting of flc_blocker to NULL marks the "done"
- * point in deleting a block. Paired with acquire at the top
- * of locks_delete_block().
- */
- smp_store_release(&waiter->flc_blocker, NULL);
- }
- }
- static int __locks_delete_block(struct file_lock_core *waiter)
- {
- int status = -ENOENT;
- /*
- * If fl_blocker is NULL, it won't be set again as this thread "owns"
- * the lock and is the only one that might try to claim the lock.
- *
- * We use acquire/release to manage fl_blocker so that we can
- * optimize away taking the blocked_lock_lock in many cases.
- *
- * The smp_load_acquire guarantees two things:
- *
- * 1/ that fl_blocked_requests can be tested locklessly. If something
- * was recently added to that list it must have been in a locked region
- * *before* the locked region when fl_blocker was set to NULL.
- *
- * 2/ that no other thread is accessing 'waiter', so it is safe to free
- * it. __locks_wake_up_blocks is careful not to touch waiter after
- * fl_blocker is released.
- *
- * If a lockless check of fl_blocker shows it to be NULL, we know that
- * no new locks can be inserted into its fl_blocked_requests list, and
- * can avoid doing anything further if the list is empty.
- */
- if (!smp_load_acquire(&waiter->flc_blocker) &&
- list_empty(&waiter->flc_blocked_requests))
- return status;
- spin_lock(&blocked_lock_lock);
- if (waiter->flc_blocker)
- status = 0;
- __locks_wake_up_blocks(waiter);
- __locks_unlink_block(waiter);
- /*
- * The setting of fl_blocker to NULL marks the "done" point in deleting
- * a block. Paired with acquire at the top of this function.
- */
- smp_store_release(&waiter->flc_blocker, NULL);
- spin_unlock(&blocked_lock_lock);
- return status;
- }
- /**
- * locks_delete_block - stop waiting for a file lock
- * @waiter: the lock which was waiting
- *
- * lockd/nfsd need to disconnect the lock while working on it.
- */
- int locks_delete_block(struct file_lock *waiter)
- {
- return __locks_delete_block(&waiter->c);
- }
- EXPORT_SYMBOL(locks_delete_block);
- /* Insert waiter into blocker's block list.
- * We use a circular list so that processes can be easily woken up in
- * the order they blocked. The documentation doesn't require this but
- * it seems like the reasonable thing to do.
- *
- * Must be called with both the flc_lock and blocked_lock_lock held. The
- * fl_blocked_requests list itself is protected by the blocked_lock_lock,
- * but by ensuring that the flc_lock is also held on insertions we can avoid
- * taking the blocked_lock_lock in some cases when we see that the
- * fl_blocked_requests list is empty.
- *
- * Rather than just adding to the list, we check for conflicts with any existing
- * waiters, and add beneath any waiter that blocks the new waiter.
- * Thus wakeups don't happen until needed.
- */
- static void __locks_insert_block(struct file_lock_core *blocker,
- struct file_lock_core *waiter,
- bool conflict(struct file_lock_core *,
- struct file_lock_core *))
- {
- struct file_lock_core *flc;
- BUG_ON(!list_empty(&waiter->flc_blocked_member));
- new_blocker:
- list_for_each_entry(flc, &blocker->flc_blocked_requests, flc_blocked_member)
- if (conflict(flc, waiter)) {
- blocker = flc;
- goto new_blocker;
- }
- waiter->flc_blocker = blocker;
- list_add_tail(&waiter->flc_blocked_member,
- &blocker->flc_blocked_requests);
- if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == FL_POSIX)
- locks_insert_global_blocked(waiter);
- /* The requests in waiter->flc_blocked are known to conflict with
- * waiter, but might not conflict with blocker, or the requests
- * and lock which block it. So they all need to be woken.
- */
- __locks_wake_up_blocks(waiter);
- }
- /* Must be called with flc_lock held. */
- static void locks_insert_block(struct file_lock_core *blocker,
- struct file_lock_core *waiter,
- bool conflict(struct file_lock_core *,
- struct file_lock_core *))
- {
- spin_lock(&blocked_lock_lock);
- __locks_insert_block(blocker, waiter, conflict);
- spin_unlock(&blocked_lock_lock);
- }
- /*
- * Wake up processes blocked waiting for blocker.
- *
- * Must be called with the inode->flc_lock held!
- */
- static void locks_wake_up_blocks(struct file_lock_core *blocker)
- {
- /*
- * Avoid taking global lock if list is empty. This is safe since new
- * blocked requests are only added to the list under the flc_lock, and
- * the flc_lock is always held here. Note that removal from the
- * fl_blocked_requests list does not require the flc_lock, so we must
- * recheck list_empty() after acquiring the blocked_lock_lock.
- */
- if (list_empty(&blocker->flc_blocked_requests))
- return;
- spin_lock(&blocked_lock_lock);
- __locks_wake_up_blocks(blocker);
- spin_unlock(&blocked_lock_lock);
- }
- static void
- locks_insert_lock_ctx(struct file_lock_core *fl, struct list_head *before)
- {
- list_add_tail(&fl->flc_list, before);
- locks_insert_global_locks(fl);
- }
- static void
- locks_unlink_lock_ctx(struct file_lock_core *fl)
- {
- locks_delete_global_locks(fl);
- list_del_init(&fl->flc_list);
- locks_wake_up_blocks(fl);
- }
- static void
- locks_delete_lock_ctx(struct file_lock_core *fl, struct list_head *dispose)
- {
- locks_unlink_lock_ctx(fl);
- if (dispose)
- list_add(&fl->flc_list, dispose);
- else
- locks_free_lock(file_lock(fl));
- }
- /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
- * checks for shared/exclusive status of overlapping locks.
- */
- static bool locks_conflict(struct file_lock_core *caller_flc,
- struct file_lock_core *sys_flc)
- {
- if (sys_flc->flc_type == F_WRLCK)
- return true;
- if (caller_flc->flc_type == F_WRLCK)
- return true;
- return false;
- }
- /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
- * checking before calling the locks_conflict().
- */
- static bool posix_locks_conflict(struct file_lock_core *caller_flc,
- struct file_lock_core *sys_flc)
- {
- struct file_lock *caller_fl = file_lock(caller_flc);
- struct file_lock *sys_fl = file_lock(sys_flc);
- /* POSIX locks owned by the same process do not conflict with
- * each other.
- */
- if (posix_same_owner(caller_flc, sys_flc))
- return false;
- /* Check whether they overlap */
- if (!locks_overlap(caller_fl, sys_fl))
- return false;
- return locks_conflict(caller_flc, sys_flc);
- }
- /* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
- * path so checks for additional GETLK-specific things like F_UNLCK.
- */
- static bool posix_test_locks_conflict(struct file_lock *caller_fl,
- struct file_lock *sys_fl)
- {
- struct file_lock_core *caller = &caller_fl->c;
- struct file_lock_core *sys = &sys_fl->c;
- /* F_UNLCK checks any locks on the same fd. */
- if (lock_is_unlock(caller_fl)) {
- if (!posix_same_owner(caller, sys))
- return false;
- return locks_overlap(caller_fl, sys_fl);
- }
- return posix_locks_conflict(caller, sys);
- }
- /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
- * checking before calling the locks_conflict().
- */
- static bool flock_locks_conflict(struct file_lock_core *caller_flc,
- struct file_lock_core *sys_flc)
- {
- /* FLOCK locks referring to the same filp do not conflict with
- * each other.
- */
- if (caller_flc->flc_file == sys_flc->flc_file)
- return false;
- return locks_conflict(caller_flc, sys_flc);
- }
- void
- posix_test_lock(struct file *filp, struct file_lock *fl)
- {
- struct file_lock *cfl;
- struct file_lock_context *ctx;
- struct inode *inode = file_inode(filp);
- void *owner;
- void (*func)(void);
- ctx = locks_inode_context(inode);
- if (!ctx || list_empty_careful(&ctx->flc_posix)) {
- fl->c.flc_type = F_UNLCK;
- return;
- }
- retry:
- spin_lock(&ctx->flc_lock);
- list_for_each_entry(cfl, &ctx->flc_posix, c.flc_list) {
- if (!posix_test_locks_conflict(fl, cfl))
- continue;
- if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
- && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
- owner = cfl->fl_lmops->lm_mod_owner;
- func = cfl->fl_lmops->lm_expire_lock;
- __module_get(owner);
- spin_unlock(&ctx->flc_lock);
- (*func)();
- module_put(owner);
- goto retry;
- }
- locks_copy_conflock(fl, cfl);
- goto out;
- }
- fl->c.flc_type = F_UNLCK;
- out:
- spin_unlock(&ctx->flc_lock);
- return;
- }
- EXPORT_SYMBOL(posix_test_lock);
- /*
- * Deadlock detection:
- *
- * We attempt to detect deadlocks that are due purely to posix file
- * locks.
- *
- * We assume that a task can be waiting for at most one lock at a time.
- * So for any acquired lock, the process holding that lock may be
- * waiting on at most one other lock. That lock in turns may be held by
- * someone waiting for at most one other lock. Given a requested lock
- * caller_fl which is about to wait for a conflicting lock block_fl, we
- * follow this chain of waiters to ensure we are not about to create a
- * cycle.
- *
- * Since we do this before we ever put a process to sleep on a lock, we
- * are ensured that there is never a cycle; that is what guarantees that
- * the while() loop in posix_locks_deadlock() eventually completes.
- *
- * Note: the above assumption may not be true when handling lock
- * requests from a broken NFS client. It may also fail in the presence
- * of tasks (such as posix threads) sharing the same open file table.
- * To handle those cases, we just bail out after a few iterations.
- *
- * For FL_OFDLCK locks, the owner is the filp, not the files_struct.
- * Because the owner is not even nominally tied to a thread of
- * execution, the deadlock detection below can't reasonably work well. Just
- * skip it for those.
- *
- * In principle, we could do a more limited deadlock detection on FL_OFDLCK
- * locks that just checks for the case where two tasks are attempting to
- * upgrade from read to write locks on the same inode.
- */
- #define MAX_DEADLK_ITERATIONS 10
- /* Find a lock that the owner of the given @blocker is blocking on. */
- static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *blocker)
- {
- struct file_lock_core *flc;
- hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) {
- if (posix_same_owner(flc, blocker)) {
- while (flc->flc_blocker)
- flc = flc->flc_blocker;
- return flc;
- }
- }
- return NULL;
- }
- /* Must be called with the blocked_lock_lock held! */
- static bool posix_locks_deadlock(struct file_lock *caller_fl,
- struct file_lock *block_fl)
- {
- struct file_lock_core *caller = &caller_fl->c;
- struct file_lock_core *blocker = &block_fl->c;
- int i = 0;
- lockdep_assert_held(&blocked_lock_lock);
- /*
- * This deadlock detector can't reasonably detect deadlocks with
- * FL_OFDLCK locks, since they aren't owned by a process, per-se.
- */
- if (caller->flc_flags & FL_OFDLCK)
- return false;
- while ((blocker = what_owner_is_waiting_for(blocker))) {
- if (i++ > MAX_DEADLK_ITERATIONS)
- return false;
- if (posix_same_owner(caller, blocker))
- return true;
- }
- return false;
- }
- /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
- * after any leases, but before any posix locks.
- *
- * Note that if called with an FL_EXISTS argument, the caller may determine
- * whether or not a lock was successfully freed by testing the return
- * value for -ENOENT.
- */
- static int flock_lock_inode(struct inode *inode, struct file_lock *request)
- {
- struct file_lock *new_fl = NULL;
- struct file_lock *fl;
- struct file_lock_context *ctx;
- int error = 0;
- bool found = false;
- LIST_HEAD(dispose);
- ctx = locks_get_lock_context(inode, request->c.flc_type);
- if (!ctx) {
- if (request->c.flc_type != F_UNLCK)
- return -ENOMEM;
- return (request->c.flc_flags & FL_EXISTS) ? -ENOENT : 0;
- }
- if (!(request->c.flc_flags & FL_ACCESS) && (request->c.flc_type != F_UNLCK)) {
- new_fl = locks_alloc_lock();
- if (!new_fl)
- return -ENOMEM;
- }
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- if (request->c.flc_flags & FL_ACCESS)
- goto find_conflict;
- list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
- if (request->c.flc_file != fl->c.flc_file)
- continue;
- if (request->c.flc_type == fl->c.flc_type)
- goto out;
- found = true;
- locks_delete_lock_ctx(&fl->c, &dispose);
- break;
- }
- if (lock_is_unlock(request)) {
- if ((request->c.flc_flags & FL_EXISTS) && !found)
- error = -ENOENT;
- goto out;
- }
- find_conflict:
- list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) {
- if (!flock_locks_conflict(&request->c, &fl->c))
- continue;
- error = -EAGAIN;
- if (!(request->c.flc_flags & FL_SLEEP))
- goto out;
- error = FILE_LOCK_DEFERRED;
- locks_insert_block(&fl->c, &request->c, flock_locks_conflict);
- goto out;
- }
- if (request->c.flc_flags & FL_ACCESS)
- goto out;
- locks_copy_lock(new_fl, request);
- locks_move_blocks(new_fl, request);
- locks_insert_lock_ctx(&new_fl->c, &ctx->flc_flock);
- new_fl = NULL;
- error = 0;
- out:
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- if (new_fl)
- locks_free_lock(new_fl);
- locks_dispose_list(&dispose);
- trace_flock_lock_inode(inode, request, error);
- return error;
- }
- static int posix_lock_inode(struct inode *inode, struct file_lock *request,
- struct file_lock *conflock)
- {
- struct file_lock *fl, *tmp;
- struct file_lock *new_fl = NULL;
- struct file_lock *new_fl2 = NULL;
- struct file_lock *left = NULL;
- struct file_lock *right = NULL;
- struct file_lock_context *ctx;
- int error;
- bool added = false;
- LIST_HEAD(dispose);
- void *owner;
- void (*func)(void);
- ctx = locks_get_lock_context(inode, request->c.flc_type);
- if (!ctx)
- return lock_is_unlock(request) ? 0 : -ENOMEM;
- /*
- * We may need two file_lock structures for this operation,
- * so we get them in advance to avoid races.
- *
- * In some cases we can be sure, that no new locks will be needed
- */
- if (!(request->c.flc_flags & FL_ACCESS) &&
- (request->c.flc_type != F_UNLCK ||
- request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
- new_fl = locks_alloc_lock();
- new_fl2 = locks_alloc_lock();
- }
- retry:
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- /*
- * New lock request. Walk all POSIX locks and look for conflicts. If
- * there are any, either return error or put the request on the
- * blocker's list of waiters and the global blocked_hash.
- */
- if (request->c.flc_type != F_UNLCK) {
- list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
- if (!posix_locks_conflict(&request->c, &fl->c))
- continue;
- if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
- && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
- owner = fl->fl_lmops->lm_mod_owner;
- func = fl->fl_lmops->lm_expire_lock;
- __module_get(owner);
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- (*func)();
- module_put(owner);
- goto retry;
- }
- if (conflock)
- locks_copy_conflock(conflock, fl);
- error = -EAGAIN;
- if (!(request->c.flc_flags & FL_SLEEP))
- goto out;
- /*
- * Deadlock detection and insertion into the blocked
- * locks list must be done while holding the same lock!
- */
- error = -EDEADLK;
- spin_lock(&blocked_lock_lock);
- /*
- * Ensure that we don't find any locks blocked on this
- * request during deadlock detection.
- */
- __locks_wake_up_blocks(&request->c);
- if (likely(!posix_locks_deadlock(request, fl))) {
- error = FILE_LOCK_DEFERRED;
- __locks_insert_block(&fl->c, &request->c,
- posix_locks_conflict);
- }
- spin_unlock(&blocked_lock_lock);
- goto out;
- }
- }
- /* If we're just looking for a conflict, we're done. */
- error = 0;
- if (request->c.flc_flags & FL_ACCESS)
- goto out;
- /* Find the first old lock with the same owner as the new lock */
- list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) {
- if (posix_same_owner(&request->c, &fl->c))
- break;
- }
- /* Process locks with this owner. */
- list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) {
- if (!posix_same_owner(&request->c, &fl->c))
- break;
- /* Detect adjacent or overlapping regions (if same lock type) */
- if (request->c.flc_type == fl->c.flc_type) {
- /* In all comparisons of start vs end, use
- * "start - 1" rather than "end + 1". If end
- * is OFFSET_MAX, end + 1 will become negative.
- */
- if (fl->fl_end < request->fl_start - 1)
- continue;
- /* If the next lock in the list has entirely bigger
- * addresses than the new one, insert the lock here.
- */
- if (fl->fl_start - 1 > request->fl_end)
- break;
- /* If we come here, the new and old lock are of the
- * same type and adjacent or overlapping. Make one
- * lock yielding from the lower start address of both
- * locks to the higher end address.
- */
- if (fl->fl_start > request->fl_start)
- fl->fl_start = request->fl_start;
- else
- request->fl_start = fl->fl_start;
- if (fl->fl_end < request->fl_end)
- fl->fl_end = request->fl_end;
- else
- request->fl_end = fl->fl_end;
- if (added) {
- locks_delete_lock_ctx(&fl->c, &dispose);
- continue;
- }
- request = fl;
- added = true;
- } else {
- /* Processing for different lock types is a bit
- * more complex.
- */
- if (fl->fl_end < request->fl_start)
- continue;
- if (fl->fl_start > request->fl_end)
- break;
- if (lock_is_unlock(request))
- added = true;
- if (fl->fl_start < request->fl_start)
- left = fl;
- /* If the next lock in the list has a higher end
- * address than the new one, insert the new one here.
- */
- if (fl->fl_end > request->fl_end) {
- right = fl;
- break;
- }
- if (fl->fl_start >= request->fl_start) {
- /* The new lock completely replaces an old
- * one (This may happen several times).
- */
- if (added) {
- locks_delete_lock_ctx(&fl->c, &dispose);
- continue;
- }
- /*
- * Replace the old lock with new_fl, and
- * remove the old one. It's safe to do the
- * insert here since we know that we won't be
- * using new_fl later, and that the lock is
- * just replacing an existing lock.
- */
- error = -ENOLCK;
- if (!new_fl)
- goto out;
- locks_copy_lock(new_fl, request);
- locks_move_blocks(new_fl, request);
- request = new_fl;
- new_fl = NULL;
- locks_insert_lock_ctx(&request->c,
- &fl->c.flc_list);
- locks_delete_lock_ctx(&fl->c, &dispose);
- added = true;
- }
- }
- }
- /*
- * The above code only modifies existing locks in case of merging or
- * replacing. If new lock(s) need to be inserted all modifications are
- * done below this, so it's safe yet to bail out.
- */
- error = -ENOLCK; /* "no luck" */
- if (right && left == right && !new_fl2)
- goto out;
- error = 0;
- if (!added) {
- if (lock_is_unlock(request)) {
- if (request->c.flc_flags & FL_EXISTS)
- error = -ENOENT;
- goto out;
- }
- if (!new_fl) {
- error = -ENOLCK;
- goto out;
- }
- locks_copy_lock(new_fl, request);
- locks_move_blocks(new_fl, request);
- locks_insert_lock_ctx(&new_fl->c, &fl->c.flc_list);
- fl = new_fl;
- new_fl = NULL;
- }
- if (right) {
- if (left == right) {
- /* The new lock breaks the old one in two pieces,
- * so we have to use the second new lock.
- */
- left = new_fl2;
- new_fl2 = NULL;
- locks_copy_lock(left, right);
- locks_insert_lock_ctx(&left->c, &fl->c.flc_list);
- }
- right->fl_start = request->fl_end + 1;
- locks_wake_up_blocks(&right->c);
- }
- if (left) {
- left->fl_end = request->fl_start - 1;
- locks_wake_up_blocks(&left->c);
- }
- out:
- trace_posix_lock_inode(inode, request, error);
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- /*
- * Free any unused locks.
- */
- if (new_fl)
- locks_free_lock(new_fl);
- if (new_fl2)
- locks_free_lock(new_fl2);
- locks_dispose_list(&dispose);
- return error;
- }
- /**
- * posix_lock_file - Apply a POSIX-style lock to a file
- * @filp: The file to apply the lock to
- * @fl: The lock to be applied
- * @conflock: Place to return a copy of the conflicting lock, if found.
- *
- * Add a POSIX style lock to a file.
- * We merge adjacent & overlapping locks whenever possible.
- * POSIX locks are sorted by owner task, then by starting address
- *
- * Note that if called with an FL_EXISTS argument, the caller may determine
- * whether or not a lock was successfully freed by testing the return
- * value for -ENOENT.
- */
- int posix_lock_file(struct file *filp, struct file_lock *fl,
- struct file_lock *conflock)
- {
- return posix_lock_inode(file_inode(filp), fl, conflock);
- }
- EXPORT_SYMBOL(posix_lock_file);
- /**
- * posix_lock_inode_wait - Apply a POSIX-style lock to a file
- * @inode: inode of file to which lock request should be applied
- * @fl: The lock to be applied
- *
- * Apply a POSIX style lock request to an inode.
- */
- static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
- {
- int error;
- might_sleep ();
- for (;;) {
- error = posix_lock_inode(inode, fl, NULL);
- if (error != FILE_LOCK_DEFERRED)
- break;
- error = wait_event_interruptible(fl->c.flc_wait,
- list_empty(&fl->c.flc_blocked_member));
- if (error)
- break;
- }
- locks_delete_block(fl);
- return error;
- }
- static void lease_clear_pending(struct file_lease *fl, int arg)
- {
- switch (arg) {
- case F_UNLCK:
- fl->c.flc_flags &= ~FL_UNLOCK_PENDING;
- fallthrough;
- case F_RDLCK:
- fl->c.flc_flags &= ~FL_DOWNGRADE_PENDING;
- }
- }
- /* We already had a lease on this file; just change its type */
- int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose)
- {
- int error = assign_type(&fl->c, arg);
- if (error)
- return error;
- lease_clear_pending(fl, arg);
- locks_wake_up_blocks(&fl->c);
- if (arg == F_UNLCK) {
- struct file *filp = fl->c.flc_file;
- f_delown(filp);
- file_f_owner(filp)->signum = 0;
- fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync);
- if (fl->fl_fasync != NULL) {
- printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
- fl->fl_fasync = NULL;
- }
- locks_delete_lock_ctx(&fl->c, dispose);
- }
- return 0;
- }
- EXPORT_SYMBOL(lease_modify);
- static bool past_time(unsigned long then)
- {
- if (!then)
- /* 0 is a special value meaning "this never expires": */
- return false;
- return time_after(jiffies, then);
- }
- static void time_out_leases(struct inode *inode, struct list_head *dispose)
- {
- struct file_lock_context *ctx = inode->i_flctx;
- struct file_lease *fl, *tmp;
- lockdep_assert_held(&ctx->flc_lock);
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
- trace_time_out_leases(inode, fl);
- if (past_time(fl->fl_downgrade_time))
- lease_modify(fl, F_RDLCK, dispose);
- if (past_time(fl->fl_break_time))
- lease_modify(fl, F_UNLCK, dispose);
- }
- }
- static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc)
- {
- bool rc;
- struct file_lease *lease = file_lease(lc);
- struct file_lease *breaker = file_lease(bc);
- if (lease->fl_lmops->lm_breaker_owns_lease
- && lease->fl_lmops->lm_breaker_owns_lease(lease))
- return false;
- if ((bc->flc_flags & FL_LAYOUT) != (lc->flc_flags & FL_LAYOUT)) {
- rc = false;
- goto trace;
- }
- if ((bc->flc_flags & FL_DELEG) && (lc->flc_flags & FL_LEASE)) {
- rc = false;
- goto trace;
- }
- rc = locks_conflict(bc, lc);
- trace:
- trace_leases_conflict(rc, lease, breaker);
- return rc;
- }
- static bool
- any_leases_conflict(struct inode *inode, struct file_lease *breaker)
- {
- struct file_lock_context *ctx = inode->i_flctx;
- struct file_lock_core *flc;
- lockdep_assert_held(&ctx->flc_lock);
- list_for_each_entry(flc, &ctx->flc_lease, flc_list) {
- if (leases_conflict(flc, &breaker->c))
- return true;
- }
- return false;
- }
- /**
- * __break_lease - revoke all outstanding leases on file
- * @inode: the inode of the file to return
- * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR:
- * break all leases
- * @type: FL_LEASE: break leases and delegations; FL_DELEG: break
- * only delegations
- *
- * break_lease (inlined for speed) has checked there already is at least
- * some kind of lock (maybe a lease) on this file. Leases are broken on
- * a call to open() or truncate(). This function can sleep unless you
- * specified %O_NONBLOCK to your open().
- */
- int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
- {
- int error = 0;
- struct file_lock_context *ctx;
- struct file_lease *new_fl, *fl, *tmp;
- unsigned long break_time;
- int want_write = (mode & O_ACCMODE) != O_RDONLY;
- LIST_HEAD(dispose);
- new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
- if (IS_ERR(new_fl))
- return PTR_ERR(new_fl);
- new_fl->c.flc_flags = type;
- /* typically we will check that ctx is non-NULL before calling */
- ctx = locks_inode_context(inode);
- if (!ctx) {
- WARN_ON_ONCE(1);
- goto free_lock;
- }
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- if (!any_leases_conflict(inode, new_fl))
- goto out;
- break_time = 0;
- if (lease_break_time > 0) {
- break_time = jiffies + lease_break_time * HZ;
- if (break_time == 0)
- break_time++; /* so that 0 means no break time */
- }
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) {
- if (!leases_conflict(&fl->c, &new_fl->c))
- continue;
- if (want_write) {
- if (fl->c.flc_flags & FL_UNLOCK_PENDING)
- continue;
- fl->c.flc_flags |= FL_UNLOCK_PENDING;
- fl->fl_break_time = break_time;
- } else {
- if (lease_breaking(fl))
- continue;
- fl->c.flc_flags |= FL_DOWNGRADE_PENDING;
- fl->fl_downgrade_time = break_time;
- }
- if (fl->fl_lmops->lm_break(fl))
- locks_delete_lock_ctx(&fl->c, &dispose);
- }
- if (list_empty(&ctx->flc_lease))
- goto out;
- if (mode & O_NONBLOCK) {
- trace_break_lease_noblock(inode, new_fl);
- error = -EWOULDBLOCK;
- goto out;
- }
- restart:
- fl = list_first_entry(&ctx->flc_lease, struct file_lease, c.flc_list);
- break_time = fl->fl_break_time;
- if (break_time != 0)
- break_time -= jiffies;
- if (break_time == 0)
- break_time++;
- locks_insert_block(&fl->c, &new_fl->c, leases_conflict);
- trace_break_lease_block(inode, new_fl);
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
- list_empty(&new_fl->c.flc_blocked_member),
- break_time);
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- trace_break_lease_unblock(inode, new_fl);
- __locks_delete_block(&new_fl->c);
- if (error >= 0) {
- /*
- * Wait for the next conflicting lease that has not been
- * broken yet
- */
- if (error == 0)
- time_out_leases(inode, &dispose);
- if (any_leases_conflict(inode, new_fl))
- goto restart;
- error = 0;
- }
- out:
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- free_lock:
- locks_free_lease(new_fl);
- return error;
- }
- EXPORT_SYMBOL(__break_lease);
- /**
- * lease_get_mtime - update modified time of an inode with exclusive lease
- * @inode: the inode
- * @time: pointer to a timespec which contains the last modified time
- *
- * This is to force NFS clients to flush their caches for files with
- * exclusive leases. The justification is that if someone has an
- * exclusive lease, then they could be modifying it.
- */
- void lease_get_mtime(struct inode *inode, struct timespec64 *time)
- {
- bool has_lease = false;
- struct file_lock_context *ctx;
- struct file_lock_core *flc;
- ctx = locks_inode_context(inode);
- if (ctx && !list_empty_careful(&ctx->flc_lease)) {
- spin_lock(&ctx->flc_lock);
- flc = list_first_entry_or_null(&ctx->flc_lease,
- struct file_lock_core, flc_list);
- if (flc && flc->flc_type == F_WRLCK)
- has_lease = true;
- spin_unlock(&ctx->flc_lock);
- }
- if (has_lease)
- *time = current_time(inode);
- }
- EXPORT_SYMBOL(lease_get_mtime);
- /**
- * fcntl_getlease - Enquire what lease is currently active
- * @filp: the file
- *
- * The value returned by this function will be one of
- * (if no lease break is pending):
- *
- * %F_RDLCK to indicate a shared lease is held.
- *
- * %F_WRLCK to indicate an exclusive lease is held.
- *
- * %F_UNLCK to indicate no lease is held.
- *
- * (if a lease break is pending):
- *
- * %F_RDLCK to indicate an exclusive lease needs to be
- * changed to a shared lease (or removed).
- *
- * %F_UNLCK to indicate the lease needs to be removed.
- *
- * XXX: sfr & willy disagree over whether F_INPROGRESS
- * should be returned to userspace.
- */
- int fcntl_getlease(struct file *filp)
- {
- struct file_lease *fl;
- struct inode *inode = file_inode(filp);
- struct file_lock_context *ctx;
- int type = F_UNLCK;
- LIST_HEAD(dispose);
- ctx = locks_inode_context(inode);
- if (ctx && !list_empty_careful(&ctx->flc_lease)) {
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
- if (fl->c.flc_file != filp)
- continue;
- type = target_leasetype(fl);
- break;
- }
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- }
- return type;
- }
- /**
- * check_conflicting_open - see if the given file points to an inode that has
- * an existing open that would conflict with the
- * desired lease.
- * @filp: file to check
- * @arg: type of lease that we're trying to acquire
- * @flags: current lock flags
- *
- * Check to see if there's an existing open fd on this file that would
- * conflict with the lease we're trying to set.
- */
- static int
- check_conflicting_open(struct file *filp, const int arg, int flags)
- {
- struct inode *inode = file_inode(filp);
- int self_wcount = 0, self_rcount = 0;
- if (flags & FL_LAYOUT)
- return 0;
- if (flags & FL_DELEG)
- /* We leave these checks to the caller */
- return 0;
- if (arg == F_RDLCK)
- return inode_is_open_for_write(inode) ? -EAGAIN : 0;
- else if (arg != F_WRLCK)
- return 0;
- /*
- * Make sure that only read/write count is from lease requestor.
- * Note that this will result in denying write leases when i_writecount
- * is negative, which is what we want. (We shouldn't grant write leases
- * on files open for execution.)
- */
- if (filp->f_mode & FMODE_WRITE)
- self_wcount = 1;
- else if (filp->f_mode & FMODE_READ)
- self_rcount = 1;
- if (atomic_read(&inode->i_writecount) != self_wcount ||
- atomic_read(&inode->i_readcount) != self_rcount)
- return -EAGAIN;
- return 0;
- }
- static int
- generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv)
- {
- struct file_lease *fl, *my_fl = NULL, *lease;
- struct inode *inode = file_inode(filp);
- struct file_lock_context *ctx;
- bool is_deleg = (*flp)->c.flc_flags & FL_DELEG;
- int error;
- LIST_HEAD(dispose);
- lease = *flp;
- trace_generic_add_lease(inode, lease);
- error = file_f_owner_allocate(filp);
- if (error)
- return error;
- /* Note that arg is never F_UNLCK here */
- ctx = locks_get_lock_context(inode, arg);
- if (!ctx)
- return -ENOMEM;
- /*
- * In the delegation case we need mutual exclusion with
- * a number of operations that take the i_mutex. We trylock
- * because delegations are an optional optimization, and if
- * there's some chance of a conflict--we'd rather not
- * bother, maybe that's a sign this just isn't a good file to
- * hand out a delegation on.
- */
- if (is_deleg && !inode_trylock(inode))
- return -EAGAIN;
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- time_out_leases(inode, &dispose);
- error = check_conflicting_open(filp, arg, lease->c.flc_flags);
- if (error)
- goto out;
- /*
- * At this point, we know that if there is an exclusive
- * lease on this file, then we hold it on this filp
- * (otherwise our open of this file would have blocked).
- * And if we are trying to acquire an exclusive lease,
- * then the file is not open by anyone (including us)
- * except for this filp.
- */
- error = -EAGAIN;
- list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
- if (fl->c.flc_file == filp &&
- fl->c.flc_owner == lease->c.flc_owner) {
- my_fl = fl;
- continue;
- }
- /*
- * No exclusive leases if someone else has a lease on
- * this file:
- */
- if (arg == F_WRLCK)
- goto out;
- /*
- * Modifying our existing lease is OK, but no getting a
- * new lease if someone else is opening for write:
- */
- if (fl->c.flc_flags & FL_UNLOCK_PENDING)
- goto out;
- }
- if (my_fl != NULL) {
- lease = my_fl;
- error = lease->fl_lmops->lm_change(lease, arg, &dispose);
- if (error)
- goto out;
- goto out_setup;
- }
- error = -EINVAL;
- if (!leases_enable)
- goto out;
- locks_insert_lock_ctx(&lease->c, &ctx->flc_lease);
- /*
- * The check in break_lease() is lockless. It's possible for another
- * open to race in after we did the earlier check for a conflicting
- * open but before the lease was inserted. Check again for a
- * conflicting open and cancel the lease if there is one.
- *
- * We also add a barrier here to ensure that the insertion of the lock
- * precedes these checks.
- */
- smp_mb();
- error = check_conflicting_open(filp, arg, lease->c.flc_flags);
- if (error) {
- locks_unlink_lock_ctx(&lease->c);
- goto out;
- }
- out_setup:
- if (lease->fl_lmops->lm_setup)
- lease->fl_lmops->lm_setup(lease, priv);
- out:
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- if (is_deleg)
- inode_unlock(inode);
- if (!error && !my_fl)
- *flp = NULL;
- return error;
- }
- static int generic_delete_lease(struct file *filp, void *owner)
- {
- int error = -EAGAIN;
- struct file_lease *fl, *victim = NULL;
- struct inode *inode = file_inode(filp);
- struct file_lock_context *ctx;
- LIST_HEAD(dispose);
- ctx = locks_inode_context(inode);
- if (!ctx) {
- trace_generic_delete_lease(inode, NULL);
- return error;
- }
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) {
- if (fl->c.flc_file == filp &&
- fl->c.flc_owner == owner) {
- victim = fl;
- break;
- }
- }
- trace_generic_delete_lease(inode, victim);
- if (victim)
- error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- return error;
- }
- /**
- * generic_setlease - sets a lease on an open file
- * @filp: file pointer
- * @arg: type of lease to obtain
- * @flp: input - file_lock to use, output - file_lock inserted
- * @priv: private data for lm_setup (may be NULL if lm_setup
- * doesn't require it)
- *
- * The (input) flp->fl_lmops->lm_break function is required
- * by break_lease().
- */
- int generic_setlease(struct file *filp, int arg, struct file_lease **flp,
- void **priv)
- {
- switch (arg) {
- case F_UNLCK:
- return generic_delete_lease(filp, *priv);
- case F_RDLCK:
- case F_WRLCK:
- if (!(*flp)->fl_lmops->lm_break) {
- WARN_ON_ONCE(1);
- return -ENOLCK;
- }
- return generic_add_lease(filp, arg, flp, priv);
- default:
- return -EINVAL;
- }
- }
- EXPORT_SYMBOL(generic_setlease);
- /*
- * Kernel subsystems can register to be notified on any attempt to set
- * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
- * to close files that it may have cached when there is an attempt to set a
- * conflicting lease.
- */
- static struct srcu_notifier_head lease_notifier_chain;
- static inline void
- lease_notifier_chain_init(void)
- {
- srcu_init_notifier_head(&lease_notifier_chain);
- }
- static inline void
- setlease_notifier(int arg, struct file_lease *lease)
- {
- if (arg != F_UNLCK)
- srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
- }
- int lease_register_notifier(struct notifier_block *nb)
- {
- return srcu_notifier_chain_register(&lease_notifier_chain, nb);
- }
- EXPORT_SYMBOL_GPL(lease_register_notifier);
- void lease_unregister_notifier(struct notifier_block *nb)
- {
- srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
- }
- EXPORT_SYMBOL_GPL(lease_unregister_notifier);
- int
- kernel_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
- {
- if (lease)
- setlease_notifier(arg, *lease);
- if (filp->f_op->setlease)
- return filp->f_op->setlease(filp, arg, lease, priv);
- else
- return generic_setlease(filp, arg, lease, priv);
- }
- EXPORT_SYMBOL_GPL(kernel_setlease);
- /**
- * vfs_setlease - sets a lease on an open file
- * @filp: file pointer
- * @arg: type of lease to obtain
- * @lease: file_lock to use when adding a lease
- * @priv: private info for lm_setup when adding a lease (may be
- * NULL if lm_setup doesn't require it)
- *
- * Call this to establish a lease on the file. The "lease" argument is not
- * used for F_UNLCK requests and may be NULL. For commands that set or alter
- * an existing lease, the ``(*lease)->fl_lmops->lm_break`` operation must be
- * set; if not, this function will return -ENOLCK (and generate a scary-looking
- * stack trace).
- *
- * The "priv" pointer is passed directly to the lm_setup function as-is. It
- * may be NULL if the lm_setup operation doesn't require it.
- */
- int
- vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv)
- {
- struct inode *inode = file_inode(filp);
- vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(filp), inode);
- int error;
- if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
- return -EACCES;
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
- error = security_file_lock(filp, arg);
- if (error)
- return error;
- return kernel_setlease(filp, arg, lease, priv);
- }
- EXPORT_SYMBOL_GPL(vfs_setlease);
- static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
- {
- struct file_lease *fl;
- struct fasync_struct *new;
- int error;
- fl = lease_alloc(filp, arg);
- if (IS_ERR(fl))
- return PTR_ERR(fl);
- new = fasync_alloc();
- if (!new) {
- locks_free_lease(fl);
- return -ENOMEM;
- }
- new->fa_fd = fd;
- error = vfs_setlease(filp, arg, &fl, (void **)&new);
- if (fl)
- locks_free_lease(fl);
- if (new)
- fasync_free(new);
- return error;
- }
- /**
- * fcntl_setlease - sets a lease on an open file
- * @fd: open file descriptor
- * @filp: file pointer
- * @arg: type of lease to obtain
- *
- * Call this fcntl to establish a lease on the file.
- * Note that you also need to call %F_SETSIG to
- * receive a signal when the lease is broken.
- */
- int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
- {
- if (arg == F_UNLCK)
- return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
- return do_fcntl_add_lease(fd, filp, arg);
- }
- /**
- * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
- * @inode: inode of the file to apply to
- * @fl: The lock to be applied
- *
- * Apply a FLOCK style lock request to an inode.
- */
- static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
- {
- int error;
- might_sleep();
- for (;;) {
- error = flock_lock_inode(inode, fl);
- if (error != FILE_LOCK_DEFERRED)
- break;
- error = wait_event_interruptible(fl->c.flc_wait,
- list_empty(&fl->c.flc_blocked_member));
- if (error)
- break;
- }
- locks_delete_block(fl);
- return error;
- }
- /**
- * locks_lock_inode_wait - Apply a lock to an inode
- * @inode: inode of the file to apply to
- * @fl: The lock to be applied
- *
- * Apply a POSIX or FLOCK style lock request to an inode.
- */
- int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
- {
- int res = 0;
- switch (fl->c.flc_flags & (FL_POSIX|FL_FLOCK)) {
- case FL_POSIX:
- res = posix_lock_inode_wait(inode, fl);
- break;
- case FL_FLOCK:
- res = flock_lock_inode_wait(inode, fl);
- break;
- default:
- BUG();
- }
- return res;
- }
- EXPORT_SYMBOL(locks_lock_inode_wait);
- /**
- * sys_flock: - flock() system call.
- * @fd: the file descriptor to lock.
- * @cmd: the type of lock to apply.
- *
- * Apply a %FL_FLOCK style lock to an open file descriptor.
- * The @cmd can be one of:
- *
- * - %LOCK_SH -- a shared lock.
- * - %LOCK_EX -- an exclusive lock.
- * - %LOCK_UN -- remove an existing lock.
- * - %LOCK_MAND -- a 'mandatory' flock. (DEPRECATED)
- *
- * %LOCK_MAND support has been removed from the kernel.
- */
- SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
- {
- int can_sleep, error, type;
- struct file_lock fl;
- struct fd f;
- /*
- * LOCK_MAND locks were broken for a long time in that they never
- * conflicted with one another and didn't prevent any sort of open,
- * read or write activity.
- *
- * Just ignore these requests now, to preserve legacy behavior, but
- * throw a warning to let people know that they don't actually work.
- */
- if (cmd & LOCK_MAND) {
- pr_warn_once("%s(%d): Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n", current->comm, current->pid);
- return 0;
- }
- type = flock_translate_cmd(cmd & ~LOCK_NB);
- if (type < 0)
- return type;
- error = -EBADF;
- f = fdget(fd);
- if (!fd_file(f))
- return error;
- if (type != F_UNLCK && !(fd_file(f)->f_mode & (FMODE_READ | FMODE_WRITE)))
- goto out_putf;
- flock_make_lock(fd_file(f), &fl, type);
- error = security_file_lock(fd_file(f), fl.c.flc_type);
- if (error)
- goto out_putf;
- can_sleep = !(cmd & LOCK_NB);
- if (can_sleep)
- fl.c.flc_flags |= FL_SLEEP;
- if (fd_file(f)->f_op->flock)
- error = fd_file(f)->f_op->flock(fd_file(f),
- (can_sleep) ? F_SETLKW : F_SETLK,
- &fl);
- else
- error = locks_lock_file_wait(fd_file(f), &fl);
- locks_release_private(&fl);
- out_putf:
- fdput(f);
- return error;
- }
- /**
- * vfs_test_lock - test file byte range lock
- * @filp: The file to test lock for
- * @fl: The lock to test; also used to hold result
- *
- * Returns -ERRNO on failure. Indicates presence of conflicting lock by
- * setting conf->fl_type to something other than F_UNLCK.
- */
- int vfs_test_lock(struct file *filp, struct file_lock *fl)
- {
- WARN_ON_ONCE(filp != fl->c.flc_file);
- if (filp->f_op->lock)
- return filp->f_op->lock(filp, F_GETLK, fl);
- posix_test_lock(filp, fl);
- return 0;
- }
- EXPORT_SYMBOL_GPL(vfs_test_lock);
- /**
- * locks_translate_pid - translate a file_lock's fl_pid number into a namespace
- * @fl: The file_lock who's fl_pid should be translated
- * @ns: The namespace into which the pid should be translated
- *
- * Used to translate a fl_pid into a namespace virtual pid number
- */
- static pid_t locks_translate_pid(struct file_lock_core *fl, struct pid_namespace *ns)
- {
- pid_t vnr;
- struct pid *pid;
- if (fl->flc_flags & FL_OFDLCK)
- return -1;
- /* Remote locks report a negative pid value */
- if (fl->flc_pid <= 0)
- return fl->flc_pid;
- /*
- * If the flock owner process is dead and its pid has been already
- * freed, the translation below won't work, but we still want to show
- * flock owner pid number in init pidns.
- */
- if (ns == &init_pid_ns)
- return (pid_t) fl->flc_pid;
- rcu_read_lock();
- pid = find_pid_ns(fl->flc_pid, &init_pid_ns);
- vnr = pid_nr_ns(pid, ns);
- rcu_read_unlock();
- return vnr;
- }
- static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
- {
- flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current));
- #if BITS_PER_LONG == 32
- /*
- * Make sure we can represent the posix lock via
- * legacy 32bit flock.
- */
- if (fl->fl_start > OFFT_OFFSET_MAX)
- return -EOVERFLOW;
- if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
- return -EOVERFLOW;
- #endif
- flock->l_start = fl->fl_start;
- flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
- fl->fl_end - fl->fl_start + 1;
- flock->l_whence = 0;
- flock->l_type = fl->c.flc_type;
- return 0;
- }
- #if BITS_PER_LONG == 32
- static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
- {
- flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current));
- flock->l_start = fl->fl_start;
- flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
- fl->fl_end - fl->fl_start + 1;
- flock->l_whence = 0;
- flock->l_type = fl->c.flc_type;
- }
- #endif
- /* Report the first existing lock that would conflict with l.
- * This implements the F_GETLK command of fcntl().
- */
- int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
- {
- struct file_lock *fl;
- int error;
- fl = locks_alloc_lock();
- if (fl == NULL)
- return -ENOMEM;
- error = -EINVAL;
- if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
- && flock->l_type != F_WRLCK)
- goto out;
- error = flock_to_posix_lock(filp, fl, flock);
- if (error)
- goto out;
- if (cmd == F_OFD_GETLK) {
- error = -EINVAL;
- if (flock->l_pid != 0)
- goto out;
- fl->c.flc_flags |= FL_OFDLCK;
- fl->c.flc_owner = filp;
- }
- error = vfs_test_lock(filp, fl);
- if (error)
- goto out;
- flock->l_type = fl->c.flc_type;
- if (fl->c.flc_type != F_UNLCK) {
- error = posix_lock_to_flock(flock, fl);
- if (error)
- goto out;
- }
- out:
- locks_free_lock(fl);
- return error;
- }
- /**
- * vfs_lock_file - file byte range lock
- * @filp: The file to apply the lock to
- * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
- * @fl: The lock to be applied
- * @conf: Place to return a copy of the conflicting lock, if found.
- *
- * A caller that doesn't care about the conflicting lock may pass NULL
- * as the final argument.
- *
- * If the filesystem defines a private ->lock() method, then @conf will
- * be left unchanged; so a caller that cares should initialize it to
- * some acceptable default.
- *
- * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
- * locks, the ->lock() interface may return asynchronously, before the lock has
- * been granted or denied by the underlying filesystem, if (and only if)
- * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
- * flags need to be set.
- *
- * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
- * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
- * blocking lock. When ->lock() does return asynchronously, it must return
- * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
- * If the request is for non-blocking lock the file system should return
- * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
- * with the result. If the request timed out the callback routine will return a
- * nonzero return code and the file system should release the lock. The file
- * system is also responsible to keep a corresponding posix lock when it
- * grants a lock so the VFS can find out which locks are locally held and do
- * the correct lock cleanup when required.
- * The underlying filesystem must not drop the kernel lock or call
- * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
- * return code.
- */
- int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
- {
- WARN_ON_ONCE(filp != fl->c.flc_file);
- if (filp->f_op->lock)
- return filp->f_op->lock(filp, cmd, fl);
- else
- return posix_lock_file(filp, fl, conf);
- }
- EXPORT_SYMBOL_GPL(vfs_lock_file);
- static int do_lock_file_wait(struct file *filp, unsigned int cmd,
- struct file_lock *fl)
- {
- int error;
- error = security_file_lock(filp, fl->c.flc_type);
- if (error)
- return error;
- for (;;) {
- error = vfs_lock_file(filp, cmd, fl, NULL);
- if (error != FILE_LOCK_DEFERRED)
- break;
- error = wait_event_interruptible(fl->c.flc_wait,
- list_empty(&fl->c.flc_blocked_member));
- if (error)
- break;
- }
- locks_delete_block(fl);
- return error;
- }
- /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
- static int
- check_fmode_for_setlk(struct file_lock *fl)
- {
- switch (fl->c.flc_type) {
- case F_RDLCK:
- if (!(fl->c.flc_file->f_mode & FMODE_READ))
- return -EBADF;
- break;
- case F_WRLCK:
- if (!(fl->c.flc_file->f_mode & FMODE_WRITE))
- return -EBADF;
- }
- return 0;
- }
- /* Apply the lock described by l to an open file descriptor.
- * This implements both the F_SETLK and F_SETLKW commands of fcntl().
- */
- int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
- struct flock *flock)
- {
- struct file_lock *file_lock = locks_alloc_lock();
- struct inode *inode = file_inode(filp);
- struct file *f;
- int error;
- if (file_lock == NULL)
- return -ENOLCK;
- error = flock_to_posix_lock(filp, file_lock, flock);
- if (error)
- goto out;
- error = check_fmode_for_setlk(file_lock);
- if (error)
- goto out;
- /*
- * If the cmd is requesting file-private locks, then set the
- * FL_OFDLCK flag and override the owner.
- */
- switch (cmd) {
- case F_OFD_SETLK:
- error = -EINVAL;
- if (flock->l_pid != 0)
- goto out;
- cmd = F_SETLK;
- file_lock->c.flc_flags |= FL_OFDLCK;
- file_lock->c.flc_owner = filp;
- break;
- case F_OFD_SETLKW:
- error = -EINVAL;
- if (flock->l_pid != 0)
- goto out;
- cmd = F_SETLKW;
- file_lock->c.flc_flags |= FL_OFDLCK;
- file_lock->c.flc_owner = filp;
- fallthrough;
- case F_SETLKW:
- file_lock->c.flc_flags |= FL_SLEEP;
- }
- error = do_lock_file_wait(filp, cmd, file_lock);
- /*
- * Detect close/fcntl races and recover by zapping all POSIX locks
- * associated with this file and our files_struct, just like on
- * filp_flush(). There is no need to do that when we're
- * unlocking though, or for OFD locks.
- */
- if (!error && file_lock->c.flc_type != F_UNLCK &&
- !(file_lock->c.flc_flags & FL_OFDLCK)) {
- struct files_struct *files = current->files;
- /*
- * We need that spin_lock here - it prevents reordering between
- * update of i_flctx->flc_posix and check for it done in
- * close(). rcu_read_lock() wouldn't do.
- */
- spin_lock(&files->file_lock);
- f = files_lookup_fd_locked(files, fd);
- spin_unlock(&files->file_lock);
- if (f != filp) {
- locks_remove_posix(filp, files);
- error = -EBADF;
- }
- }
- out:
- trace_fcntl_setlk(inode, file_lock, error);
- locks_free_lock(file_lock);
- return error;
- }
- #if BITS_PER_LONG == 32
- /* Report the first existing lock that would conflict with l.
- * This implements the F_GETLK command of fcntl().
- */
- int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
- {
- struct file_lock *fl;
- int error;
- fl = locks_alloc_lock();
- if (fl == NULL)
- return -ENOMEM;
- error = -EINVAL;
- if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
- && flock->l_type != F_WRLCK)
- goto out;
- error = flock64_to_posix_lock(filp, fl, flock);
- if (error)
- goto out;
- if (cmd == F_OFD_GETLK) {
- error = -EINVAL;
- if (flock->l_pid != 0)
- goto out;
- fl->c.flc_flags |= FL_OFDLCK;
- fl->c.flc_owner = filp;
- }
- error = vfs_test_lock(filp, fl);
- if (error)
- goto out;
- flock->l_type = fl->c.flc_type;
- if (fl->c.flc_type != F_UNLCK)
- posix_lock_to_flock64(flock, fl);
- out:
- locks_free_lock(fl);
- return error;
- }
- /* Apply the lock described by l to an open file descriptor.
- * This implements both the F_SETLK and F_SETLKW commands of fcntl().
- */
- int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
- struct flock64 *flock)
- {
- struct file_lock *file_lock = locks_alloc_lock();
- struct file *f;
- int error;
- if (file_lock == NULL)
- return -ENOLCK;
- error = flock64_to_posix_lock(filp, file_lock, flock);
- if (error)
- goto out;
- error = check_fmode_for_setlk(file_lock);
- if (error)
- goto out;
- /*
- * If the cmd is requesting file-private locks, then set the
- * FL_OFDLCK flag and override the owner.
- */
- switch (cmd) {
- case F_OFD_SETLK:
- error = -EINVAL;
- if (flock->l_pid != 0)
- goto out;
- cmd = F_SETLK64;
- file_lock->c.flc_flags |= FL_OFDLCK;
- file_lock->c.flc_owner = filp;
- break;
- case F_OFD_SETLKW:
- error = -EINVAL;
- if (flock->l_pid != 0)
- goto out;
- cmd = F_SETLKW64;
- file_lock->c.flc_flags |= FL_OFDLCK;
- file_lock->c.flc_owner = filp;
- fallthrough;
- case F_SETLKW64:
- file_lock->c.flc_flags |= FL_SLEEP;
- }
- error = do_lock_file_wait(filp, cmd, file_lock);
- /*
- * Detect close/fcntl races and recover by zapping all POSIX locks
- * associated with this file and our files_struct, just like on
- * filp_flush(). There is no need to do that when we're
- * unlocking though, or for OFD locks.
- */
- if (!error && file_lock->c.flc_type != F_UNLCK &&
- !(file_lock->c.flc_flags & FL_OFDLCK)) {
- struct files_struct *files = current->files;
- /*
- * We need that spin_lock here - it prevents reordering between
- * update of i_flctx->flc_posix and check for it done in
- * close(). rcu_read_lock() wouldn't do.
- */
- spin_lock(&files->file_lock);
- f = files_lookup_fd_locked(files, fd);
- spin_unlock(&files->file_lock);
- if (f != filp) {
- locks_remove_posix(filp, files);
- error = -EBADF;
- }
- }
- out:
- locks_free_lock(file_lock);
- return error;
- }
- #endif /* BITS_PER_LONG == 32 */
- /*
- * This function is called when the file is being removed
- * from the task's fd array. POSIX locks belonging to this task
- * are deleted at this time.
- */
- void locks_remove_posix(struct file *filp, fl_owner_t owner)
- {
- int error;
- struct inode *inode = file_inode(filp);
- struct file_lock lock;
- struct file_lock_context *ctx;
- /*
- * If there are no locks held on this file, we don't need to call
- * posix_lock_file(). Another process could be setting a lock on this
- * file at the same time, but we wouldn't remove that lock anyway.
- */
- ctx = locks_inode_context(inode);
- if (!ctx || list_empty(&ctx->flc_posix))
- return;
- locks_init_lock(&lock);
- lock.c.flc_type = F_UNLCK;
- lock.c.flc_flags = FL_POSIX | FL_CLOSE;
- lock.fl_start = 0;
- lock.fl_end = OFFSET_MAX;
- lock.c.flc_owner = owner;
- lock.c.flc_pid = current->tgid;
- lock.c.flc_file = filp;
- lock.fl_ops = NULL;
- lock.fl_lmops = NULL;
- error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
- if (lock.fl_ops && lock.fl_ops->fl_release_private)
- lock.fl_ops->fl_release_private(&lock);
- trace_locks_remove_posix(inode, &lock, error);
- }
- EXPORT_SYMBOL(locks_remove_posix);
- /* The i_flctx must be valid when calling into here */
- static void
- locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
- {
- struct file_lock fl;
- struct inode *inode = file_inode(filp);
- if (list_empty(&flctx->flc_flock))
- return;
- flock_make_lock(filp, &fl, F_UNLCK);
- fl.c.flc_flags |= FL_CLOSE;
- if (filp->f_op->flock)
- filp->f_op->flock(filp, F_SETLKW, &fl);
- else
- flock_lock_inode(inode, &fl);
- if (fl.fl_ops && fl.fl_ops->fl_release_private)
- fl.fl_ops->fl_release_private(&fl);
- }
- /* The i_flctx must be valid when calling into here */
- static void
- locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
- {
- struct file_lease *fl, *tmp;
- LIST_HEAD(dispose);
- if (list_empty(&ctx->flc_lease))
- return;
- percpu_down_read(&file_rwsem);
- spin_lock(&ctx->flc_lock);
- list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list)
- if (filp == fl->c.flc_file)
- lease_modify(fl, F_UNLCK, &dispose);
- spin_unlock(&ctx->flc_lock);
- percpu_up_read(&file_rwsem);
- locks_dispose_list(&dispose);
- }
- /*
- * This function is called on the last close of an open file.
- */
- void locks_remove_file(struct file *filp)
- {
- struct file_lock_context *ctx;
- ctx = locks_inode_context(file_inode(filp));
- if (!ctx)
- return;
- /* remove any OFD locks */
- locks_remove_posix(filp, filp);
- /* remove flock locks */
- locks_remove_flock(filp, ctx);
- /* remove any leases */
- locks_remove_lease(filp, ctx);
- spin_lock(&ctx->flc_lock);
- locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
- locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
- locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
- spin_unlock(&ctx->flc_lock);
- }
- /**
- * vfs_cancel_lock - file byte range unblock lock
- * @filp: The file to apply the unblock to
- * @fl: The lock to be unblocked
- *
- * Used by lock managers to cancel blocked requests
- */
- int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
- {
- WARN_ON_ONCE(filp != fl->c.flc_file);
- if (filp->f_op->lock)
- return filp->f_op->lock(filp, F_CANCELLK, fl);
- return 0;
- }
- EXPORT_SYMBOL_GPL(vfs_cancel_lock);
- /**
- * vfs_inode_has_locks - are any file locks held on @inode?
- * @inode: inode to check for locks
- *
- * Return true if there are any FL_POSIX or FL_FLOCK locks currently
- * set on @inode.
- */
- bool vfs_inode_has_locks(struct inode *inode)
- {
- struct file_lock_context *ctx;
- bool ret;
- ctx = locks_inode_context(inode);
- if (!ctx)
- return false;
- spin_lock(&ctx->flc_lock);
- ret = !list_empty(&ctx->flc_posix) || !list_empty(&ctx->flc_flock);
- spin_unlock(&ctx->flc_lock);
- return ret;
- }
- EXPORT_SYMBOL_GPL(vfs_inode_has_locks);
- #ifdef CONFIG_PROC_FS
- #include <linux/proc_fs.h>
- #include <linux/seq_file.h>
- struct locks_iterator {
- int li_cpu;
- loff_t li_pos;
- };
- static void lock_get_status(struct seq_file *f, struct file_lock_core *flc,
- loff_t id, char *pfx, int repeat)
- {
- struct inode *inode = NULL;
- unsigned int pid;
- struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
- int type = flc->flc_type;
- struct file_lock *fl = file_lock(flc);
- pid = locks_translate_pid(flc, proc_pidns);
- /*
- * If lock owner is dead (and pid is freed) or not visible in current
- * pidns, zero is shown as a pid value. Check lock info from
- * init_pid_ns to get saved lock pid value.
- */
- if (flc->flc_file != NULL)
- inode = file_inode(flc->flc_file);
- seq_printf(f, "%lld: ", id);
- if (repeat)
- seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
- if (flc->flc_flags & FL_POSIX) {
- if (flc->flc_flags & FL_ACCESS)
- seq_puts(f, "ACCESS");
- else if (flc->flc_flags & FL_OFDLCK)
- seq_puts(f, "OFDLCK");
- else
- seq_puts(f, "POSIX ");
- seq_printf(f, " %s ",
- (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
- } else if (flc->flc_flags & FL_FLOCK) {
- seq_puts(f, "FLOCK ADVISORY ");
- } else if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) {
- struct file_lease *lease = file_lease(flc);
- type = target_leasetype(lease);
- if (flc->flc_flags & FL_DELEG)
- seq_puts(f, "DELEG ");
- else
- seq_puts(f, "LEASE ");
- if (lease_breaking(lease))
- seq_puts(f, "BREAKING ");
- else if (flc->flc_file)
- seq_puts(f, "ACTIVE ");
- else
- seq_puts(f, "BREAKER ");
- } else {
- seq_puts(f, "UNKNOWN UNKNOWN ");
- }
- seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
- (type == F_RDLCK) ? "READ" : "UNLCK");
- if (inode) {
- /* userspace relies on this representation of dev_t */
- seq_printf(f, "%d %02x:%02x:%lu ", pid,
- MAJOR(inode->i_sb->s_dev),
- MINOR(inode->i_sb->s_dev), inode->i_ino);
- } else {
- seq_printf(f, "%d <none>:0 ", pid);
- }
- if (flc->flc_flags & FL_POSIX) {
- if (fl->fl_end == OFFSET_MAX)
- seq_printf(f, "%Ld EOF\n", fl->fl_start);
- else
- seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
- } else {
- seq_puts(f, "0 EOF\n");
- }
- }
- static struct file_lock_core *get_next_blocked_member(struct file_lock_core *node)
- {
- struct file_lock_core *tmp;
- /* NULL node or root node */
- if (node == NULL || node->flc_blocker == NULL)
- return NULL;
- /* Next member in the linked list could be itself */
- tmp = list_next_entry(node, flc_blocked_member);
- if (list_entry_is_head(tmp, &node->flc_blocker->flc_blocked_requests,
- flc_blocked_member)
- || tmp == node) {
- return NULL;
- }
- return tmp;
- }
- static int locks_show(struct seq_file *f, void *v)
- {
- struct locks_iterator *iter = f->private;
- struct file_lock_core *cur, *tmp;
- struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
- int level = 0;
- cur = hlist_entry(v, struct file_lock_core, flc_link);
- if (locks_translate_pid(cur, proc_pidns) == 0)
- return 0;
- /* View this crossed linked list as a binary tree, the first member of flc_blocked_requests
- * is the left child of current node, the next silibing in flc_blocked_member is the
- * right child, we can alse get the parent of current node from flc_blocker, so this
- * question becomes traversal of a binary tree
- */
- while (cur != NULL) {
- if (level)
- lock_get_status(f, cur, iter->li_pos, "-> ", level);
- else
- lock_get_status(f, cur, iter->li_pos, "", level);
- if (!list_empty(&cur->flc_blocked_requests)) {
- /* Turn left */
- cur = list_first_entry_or_null(&cur->flc_blocked_requests,
- struct file_lock_core,
- flc_blocked_member);
- level++;
- } else {
- /* Turn right */
- tmp = get_next_blocked_member(cur);
- /* Fall back to parent node */
- while (tmp == NULL && cur->flc_blocker != NULL) {
- cur = cur->flc_blocker;
- level--;
- tmp = get_next_blocked_member(cur);
- }
- cur = tmp;
- }
- }
- return 0;
- }
- static void __show_fd_locks(struct seq_file *f,
- struct list_head *head, int *id,
- struct file *filp, struct files_struct *files)
- {
- struct file_lock_core *fl;
- list_for_each_entry(fl, head, flc_list) {
- if (filp != fl->flc_file)
- continue;
- if (fl->flc_owner != files && fl->flc_owner != filp)
- continue;
- (*id)++;
- seq_puts(f, "lock:\t");
- lock_get_status(f, fl, *id, "", 0);
- }
- }
- void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files)
- {
- struct inode *inode = file_inode(filp);
- struct file_lock_context *ctx;
- int id = 0;
- ctx = locks_inode_context(inode);
- if (!ctx)
- return;
- spin_lock(&ctx->flc_lock);
- __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
- __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
- __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
- spin_unlock(&ctx->flc_lock);
- }
- static void *locks_start(struct seq_file *f, loff_t *pos)
- __acquires(&blocked_lock_lock)
- {
- struct locks_iterator *iter = f->private;
- iter->li_pos = *pos + 1;
- percpu_down_write(&file_rwsem);
- spin_lock(&blocked_lock_lock);
- return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
- }
- static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
- {
- struct locks_iterator *iter = f->private;
- ++iter->li_pos;
- return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
- }
- static void locks_stop(struct seq_file *f, void *v)
- __releases(&blocked_lock_lock)
- {
- spin_unlock(&blocked_lock_lock);
- percpu_up_write(&file_rwsem);
- }
- static const struct seq_operations locks_seq_operations = {
- .start = locks_start,
- .next = locks_next,
- .stop = locks_stop,
- .show = locks_show,
- };
- static int __init proc_locks_init(void)
- {
- proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
- sizeof(struct locks_iterator), NULL);
- return 0;
- }
- fs_initcall(proc_locks_init);
- #endif
- static int __init filelock_init(void)
- {
- int i;
- flctx_cache = kmem_cache_create("file_lock_ctx",
- sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
- filelock_cache = kmem_cache_create("file_lock_cache",
- sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
- filelease_cache = kmem_cache_create("file_lease_cache",
- sizeof(struct file_lease), 0, SLAB_PANIC, NULL);
- for_each_possible_cpu(i) {
- struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
- spin_lock_init(&fll->lock);
- INIT_HLIST_HEAD(&fll->hlist);
- }
- lease_notifier_chain_init();
- return 0;
- }
- core_initcall(filelock_init);
|