space-info.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "linux/spinlock.h"
  3. #include <linux/minmax.h>
  4. #include "misc.h"
  5. #include "ctree.h"
  6. #include "space-info.h"
  7. #include "sysfs.h"
  8. #include "volumes.h"
  9. #include "free-space-cache.h"
  10. #include "ordered-data.h"
  11. #include "transaction.h"
  12. #include "block-group.h"
  13. #include "fs.h"
  14. #include "accessors.h"
  15. #include "extent-tree.h"
  16. /*
  17. * HOW DOES SPACE RESERVATION WORK
  18. *
  19. * If you want to know about delalloc specifically, there is a separate comment
  20. * for that with the delalloc code. This comment is about how the whole system
  21. * works generally.
  22. *
  23. * BASIC CONCEPTS
  24. *
  25. * 1) space_info. This is the ultimate arbiter of how much space we can use.
  26. * There's a description of the bytes_ fields with the struct declaration,
  27. * refer to that for specifics on each field. Suffice it to say that for
  28. * reservations we care about total_bytes - SUM(space_info->bytes_) when
  29. * determining if there is space to make an allocation. There is a space_info
  30. * for METADATA, SYSTEM, and DATA areas.
  31. *
  32. * 2) block_rsv's. These are basically buckets for every different type of
  33. * metadata reservation we have. You can see the comment in the block_rsv
  34. * code on the rules for each type, but generally block_rsv->reserved is how
  35. * much space is accounted for in space_info->bytes_may_use.
  36. *
  37. * 3) btrfs_calc*_size. These are the worst case calculations we used based
  38. * on the number of items we will want to modify. We have one for changing
  39. * items, and one for inserting new items. Generally we use these helpers to
  40. * determine the size of the block reserves, and then use the actual bytes
  41. * values to adjust the space_info counters.
  42. *
  43. * MAKING RESERVATIONS, THE NORMAL CASE
  44. *
  45. * We call into either btrfs_reserve_data_bytes() or
  46. * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
  47. * num_bytes we want to reserve.
  48. *
  49. * ->reserve
  50. * space_info->bytes_may_reserve += num_bytes
  51. *
  52. * ->extent allocation
  53. * Call btrfs_add_reserved_bytes() which does
  54. * space_info->bytes_may_reserve -= num_bytes
  55. * space_info->bytes_reserved += extent_bytes
  56. *
  57. * ->insert reference
  58. * Call btrfs_update_block_group() which does
  59. * space_info->bytes_reserved -= extent_bytes
  60. * space_info->bytes_used += extent_bytes
  61. *
  62. * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
  63. *
  64. * Assume we are unable to simply make the reservation because we do not have
  65. * enough space
  66. *
  67. * -> __reserve_bytes
  68. * create a reserve_ticket with ->bytes set to our reservation, add it to
  69. * the tail of space_info->tickets, kick async flush thread
  70. *
  71. * ->handle_reserve_ticket
  72. * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
  73. * on the ticket.
  74. *
  75. * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
  76. * Flushes various things attempting to free up space.
  77. *
  78. * -> btrfs_try_granting_tickets()
  79. * This is called by anything that either subtracts space from
  80. * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
  81. * space_info->total_bytes. This loops through the ->priority_tickets and
  82. * then the ->tickets list checking to see if the reservation can be
  83. * completed. If it can the space is added to space_info->bytes_may_use and
  84. * the ticket is woken up.
  85. *
  86. * -> ticket wakeup
  87. * Check if ->bytes == 0, if it does we got our reservation and we can carry
  88. * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
  89. * were interrupted.)
  90. *
  91. * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
  92. *
  93. * Same as the above, except we add ourselves to the
  94. * space_info->priority_tickets, and we do not use ticket->wait, we simply
  95. * call flush_space() ourselves for the states that are safe for us to call
  96. * without deadlocking and hope for the best.
  97. *
  98. * THE FLUSHING STATES
  99. *
  100. * Generally speaking we will have two cases for each state, a "nice" state
  101. * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
  102. * reduce the locking over head on the various trees, and even to keep from
  103. * doing any work at all in the case of delayed refs. Each of these delayed
  104. * things however hold reservations, and so letting them run allows us to
  105. * reclaim space so we can make new reservations.
  106. *
  107. * FLUSH_DELAYED_ITEMS
  108. * Every inode has a delayed item to update the inode. Take a simple write
  109. * for example, we would update the inode item at write time to update the
  110. * mtime, and then again at finish_ordered_io() time in order to update the
  111. * isize or bytes. We keep these delayed items to coalesce these operations
  112. * into a single operation done on demand. These are an easy way to reclaim
  113. * metadata space.
  114. *
  115. * FLUSH_DELALLOC
  116. * Look at the delalloc comment to get an idea of how much space is reserved
  117. * for delayed allocation. We can reclaim some of this space simply by
  118. * running delalloc, but usually we need to wait for ordered extents to
  119. * reclaim the bulk of this space.
  120. *
  121. * FLUSH_DELAYED_REFS
  122. * We have a block reserve for the outstanding delayed refs space, and every
  123. * delayed ref operation holds a reservation. Running these is a quick way
  124. * to reclaim space, but we want to hold this until the end because COW can
  125. * churn a lot and we can avoid making some extent tree modifications if we
  126. * are able to delay for as long as possible.
  127. *
  128. * ALLOC_CHUNK
  129. * We will skip this the first time through space reservation, because of
  130. * overcommit and we don't want to have a lot of useless metadata space when
  131. * our worst case reservations will likely never come true.
  132. *
  133. * RUN_DELAYED_IPUTS
  134. * If we're freeing inodes we're likely freeing checksums, file extent
  135. * items, and extent tree items. Loads of space could be freed up by these
  136. * operations, however they won't be usable until the transaction commits.
  137. *
  138. * COMMIT_TRANS
  139. * This will commit the transaction. Historically we had a lot of logic
  140. * surrounding whether or not we'd commit the transaction, but this waits born
  141. * out of a pre-tickets era where we could end up committing the transaction
  142. * thousands of times in a row without making progress. Now thanks to our
  143. * ticketing system we know if we're not making progress and can error
  144. * everybody out after a few commits rather than burning the disk hoping for
  145. * a different answer.
  146. *
  147. * OVERCOMMIT
  148. *
  149. * Because we hold so many reservations for metadata we will allow you to
  150. * reserve more space than is currently free in the currently allocate
  151. * metadata space. This only happens with metadata, data does not allow
  152. * overcommitting.
  153. *
  154. * You can see the current logic for when we allow overcommit in
  155. * btrfs_can_overcommit(), but it only applies to unallocated space. If there
  156. * is no unallocated space to be had, all reservations are kept within the
  157. * free space in the allocated metadata chunks.
  158. *
  159. * Because of overcommitting, you generally want to use the
  160. * btrfs_can_overcommit() logic for metadata allocations, as it does the right
  161. * thing with or without extra unallocated space.
  162. */
  163. u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info,
  164. bool may_use_included)
  165. {
  166. ASSERT(s_info);
  167. return s_info->bytes_used + s_info->bytes_reserved +
  168. s_info->bytes_pinned + s_info->bytes_readonly +
  169. s_info->bytes_zone_unusable +
  170. (may_use_included ? s_info->bytes_may_use : 0);
  171. }
  172. /*
  173. * after adding space to the filesystem, we need to clear the full flags
  174. * on all the space infos.
  175. */
  176. void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
  177. {
  178. struct list_head *head = &info->space_info;
  179. struct btrfs_space_info *found;
  180. list_for_each_entry(found, head, list)
  181. found->full = 0;
  182. }
  183. /*
  184. * Block groups with more than this value (percents) of unusable space will be
  185. * scheduled for background reclaim.
  186. */
  187. #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
  188. #define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL)
  189. /*
  190. * Calculate chunk size depending on volume type (regular or zoned).
  191. */
  192. static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
  193. {
  194. if (btrfs_is_zoned(fs_info))
  195. return fs_info->zone_size;
  196. ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
  197. if (flags & BTRFS_BLOCK_GROUP_DATA)
  198. return BTRFS_MAX_DATA_CHUNK_SIZE;
  199. else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
  200. return SZ_32M;
  201. /* Handle BTRFS_BLOCK_GROUP_METADATA */
  202. if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
  203. return SZ_1G;
  204. return SZ_256M;
  205. }
  206. /*
  207. * Update default chunk size.
  208. */
  209. void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
  210. u64 chunk_size)
  211. {
  212. WRITE_ONCE(space_info->chunk_size, chunk_size);
  213. }
  214. static int create_space_info(struct btrfs_fs_info *info, u64 flags)
  215. {
  216. struct btrfs_space_info *space_info;
  217. int i;
  218. int ret;
  219. space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
  220. if (!space_info)
  221. return -ENOMEM;
  222. space_info->fs_info = info;
  223. for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
  224. INIT_LIST_HEAD(&space_info->block_groups[i]);
  225. init_rwsem(&space_info->groups_sem);
  226. spin_lock_init(&space_info->lock);
  227. space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  228. space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
  229. INIT_LIST_HEAD(&space_info->ro_bgs);
  230. INIT_LIST_HEAD(&space_info->tickets);
  231. INIT_LIST_HEAD(&space_info->priority_tickets);
  232. space_info->clamp = 1;
  233. btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
  234. if (btrfs_is_zoned(info))
  235. space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
  236. ret = btrfs_sysfs_add_space_info_type(info, space_info);
  237. if (ret)
  238. return ret;
  239. list_add(&space_info->list, &info->space_info);
  240. if (flags & BTRFS_BLOCK_GROUP_DATA)
  241. info->data_sinfo = space_info;
  242. return ret;
  243. }
  244. int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
  245. {
  246. struct btrfs_super_block *disk_super;
  247. u64 features;
  248. u64 flags;
  249. int mixed = 0;
  250. int ret;
  251. disk_super = fs_info->super_copy;
  252. if (!btrfs_super_root(disk_super))
  253. return -EINVAL;
  254. features = btrfs_super_incompat_flags(disk_super);
  255. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  256. mixed = 1;
  257. flags = BTRFS_BLOCK_GROUP_SYSTEM;
  258. ret = create_space_info(fs_info, flags);
  259. if (ret)
  260. goto out;
  261. if (mixed) {
  262. flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
  263. ret = create_space_info(fs_info, flags);
  264. } else {
  265. flags = BTRFS_BLOCK_GROUP_METADATA;
  266. ret = create_space_info(fs_info, flags);
  267. if (ret)
  268. goto out;
  269. flags = BTRFS_BLOCK_GROUP_DATA;
  270. ret = create_space_info(fs_info, flags);
  271. }
  272. out:
  273. return ret;
  274. }
  275. void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
  276. struct btrfs_block_group *block_group)
  277. {
  278. struct btrfs_space_info *found;
  279. int factor, index;
  280. factor = btrfs_bg_type_to_factor(block_group->flags);
  281. found = btrfs_find_space_info(info, block_group->flags);
  282. ASSERT(found);
  283. spin_lock(&found->lock);
  284. found->total_bytes += block_group->length;
  285. found->disk_total += block_group->length * factor;
  286. found->bytes_used += block_group->used;
  287. found->disk_used += block_group->used * factor;
  288. found->bytes_readonly += block_group->bytes_super;
  289. btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
  290. if (block_group->length > 0)
  291. found->full = 0;
  292. btrfs_try_granting_tickets(info, found);
  293. spin_unlock(&found->lock);
  294. block_group->space_info = found;
  295. index = btrfs_bg_flags_to_raid_index(block_group->flags);
  296. down_write(&found->groups_sem);
  297. list_add_tail(&block_group->list, &found->block_groups[index]);
  298. up_write(&found->groups_sem);
  299. }
  300. struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
  301. u64 flags)
  302. {
  303. struct list_head *head = &info->space_info;
  304. struct btrfs_space_info *found;
  305. flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
  306. list_for_each_entry(found, head, list) {
  307. if (found->flags & flags)
  308. return found;
  309. }
  310. return NULL;
  311. }
  312. static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info)
  313. {
  314. struct btrfs_space_info *data_sinfo;
  315. u64 data_chunk_size;
  316. /*
  317. * Calculate the data_chunk_size, space_info->chunk_size is the
  318. * "optimal" chunk size based on the fs size. However when we actually
  319. * allocate the chunk we will strip this down further, making it no
  320. * more than 10% of the disk or 1G, whichever is smaller.
  321. *
  322. * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size)
  323. * as it is.
  324. */
  325. data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
  326. if (btrfs_is_zoned(fs_info))
  327. return data_sinfo->chunk_size;
  328. data_chunk_size = min(data_sinfo->chunk_size,
  329. mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
  330. return min_t(u64, data_chunk_size, SZ_1G);
  331. }
  332. static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
  333. const struct btrfs_space_info *space_info,
  334. enum btrfs_reserve_flush_enum flush)
  335. {
  336. u64 profile;
  337. u64 avail;
  338. u64 data_chunk_size;
  339. int factor;
  340. if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
  341. profile = btrfs_system_alloc_profile(fs_info);
  342. else
  343. profile = btrfs_metadata_alloc_profile(fs_info);
  344. avail = atomic64_read(&fs_info->free_chunk_space);
  345. /*
  346. * If we have dup, raid1 or raid10 then only half of the free
  347. * space is actually usable. For raid56, the space info used
  348. * doesn't include the parity drive, so we don't have to
  349. * change the math
  350. */
  351. factor = btrfs_bg_type_to_factor(profile);
  352. avail = div_u64(avail, factor);
  353. if (avail == 0)
  354. return 0;
  355. data_chunk_size = calc_effective_data_chunk_size(fs_info);
  356. /*
  357. * Since data allocations immediately use block groups as part of the
  358. * reservation, because we assume that data reservations will == actual
  359. * usage, we could potentially overcommit and then immediately have that
  360. * available space used by a data allocation, which could put us in a
  361. * bind when we get close to filling the file system.
  362. *
  363. * To handle this simply remove the data_chunk_size from the available
  364. * space. If we are relatively empty this won't affect our ability to
  365. * overcommit much, and if we're very close to full it'll keep us from
  366. * getting into a position where we've given ourselves very little
  367. * metadata wiggle room.
  368. */
  369. if (avail <= data_chunk_size)
  370. return 0;
  371. avail -= data_chunk_size;
  372. /*
  373. * If we aren't flushing all things, let us overcommit up to
  374. * 1/2th of the space. If we can flush, don't let us overcommit
  375. * too much, let it overcommit up to 1/8 of the space.
  376. */
  377. if (flush == BTRFS_RESERVE_FLUSH_ALL)
  378. avail >>= 3;
  379. else
  380. avail >>= 1;
  381. /*
  382. * On the zoned mode, we always allocate one zone as one chunk.
  383. * Returning non-zone size alingned bytes here will result in
  384. * less pressure for the async metadata reclaim process, and it
  385. * will over-commit too much leading to ENOSPC. Align down to the
  386. * zone size to avoid that.
  387. */
  388. if (btrfs_is_zoned(fs_info))
  389. avail = ALIGN_DOWN(avail, fs_info->zone_size);
  390. return avail;
  391. }
  392. int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
  393. const struct btrfs_space_info *space_info, u64 bytes,
  394. enum btrfs_reserve_flush_enum flush)
  395. {
  396. u64 avail;
  397. u64 used;
  398. /* Don't overcommit when in mixed mode */
  399. if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
  400. return 0;
  401. used = btrfs_space_info_used(space_info, true);
  402. avail = calc_available_free_space(fs_info, space_info, flush);
  403. if (used + bytes < space_info->total_bytes + avail)
  404. return 1;
  405. return 0;
  406. }
  407. static void remove_ticket(struct btrfs_space_info *space_info,
  408. struct reserve_ticket *ticket)
  409. {
  410. if (!list_empty(&ticket->list)) {
  411. list_del_init(&ticket->list);
  412. ASSERT(space_info->reclaim_size >= ticket->bytes);
  413. space_info->reclaim_size -= ticket->bytes;
  414. }
  415. }
  416. /*
  417. * This is for space we already have accounted in space_info->bytes_may_use, so
  418. * basically when we're returning space from block_rsv's.
  419. */
  420. void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
  421. struct btrfs_space_info *space_info)
  422. {
  423. struct list_head *head;
  424. enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
  425. lockdep_assert_held(&space_info->lock);
  426. head = &space_info->priority_tickets;
  427. again:
  428. while (!list_empty(head)) {
  429. struct reserve_ticket *ticket;
  430. u64 used = btrfs_space_info_used(space_info, true);
  431. ticket = list_first_entry(head, struct reserve_ticket, list);
  432. /* Check and see if our ticket can be satisfied now. */
  433. if ((used + ticket->bytes <= space_info->total_bytes) ||
  434. btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
  435. flush)) {
  436. btrfs_space_info_update_bytes_may_use(fs_info,
  437. space_info,
  438. ticket->bytes);
  439. remove_ticket(space_info, ticket);
  440. ticket->bytes = 0;
  441. space_info->tickets_id++;
  442. wake_up(&ticket->wait);
  443. } else {
  444. break;
  445. }
  446. }
  447. if (head == &space_info->priority_tickets) {
  448. head = &space_info->tickets;
  449. flush = BTRFS_RESERVE_FLUSH_ALL;
  450. goto again;
  451. }
  452. }
  453. #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
  454. do { \
  455. struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
  456. spin_lock(&__rsv->lock); \
  457. btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
  458. __rsv->size, __rsv->reserved); \
  459. spin_unlock(&__rsv->lock); \
  460. } while (0)
  461. static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
  462. {
  463. switch (space_info->flags) {
  464. case BTRFS_BLOCK_GROUP_SYSTEM:
  465. return "SYSTEM";
  466. case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
  467. return "DATA+METADATA";
  468. case BTRFS_BLOCK_GROUP_DATA:
  469. return "DATA";
  470. case BTRFS_BLOCK_GROUP_METADATA:
  471. return "METADATA";
  472. default:
  473. return "UNKNOWN";
  474. }
  475. }
  476. static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
  477. {
  478. DUMP_BLOCK_RSV(fs_info, global_block_rsv);
  479. DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
  480. DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
  481. DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
  482. DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
  483. }
  484. static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info,
  485. const struct btrfs_space_info *info)
  486. {
  487. const char *flag_str = space_info_flag_to_str(info);
  488. lockdep_assert_held(&info->lock);
  489. /* The free space could be negative in case of overcommit */
  490. btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
  491. flag_str,
  492. (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
  493. info->full ? "" : "not ");
  494. btrfs_info(fs_info,
  495. "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
  496. info->total_bytes, info->bytes_used, info->bytes_pinned,
  497. info->bytes_reserved, info->bytes_may_use,
  498. info->bytes_readonly, info->bytes_zone_unusable);
  499. }
  500. void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
  501. struct btrfs_space_info *info, u64 bytes,
  502. int dump_block_groups)
  503. {
  504. struct btrfs_block_group *cache;
  505. u64 total_avail = 0;
  506. int index = 0;
  507. spin_lock(&info->lock);
  508. __btrfs_dump_space_info(fs_info, info);
  509. dump_global_block_rsv(fs_info);
  510. spin_unlock(&info->lock);
  511. if (!dump_block_groups)
  512. return;
  513. down_read(&info->groups_sem);
  514. again:
  515. list_for_each_entry(cache, &info->block_groups[index], list) {
  516. u64 avail;
  517. spin_lock(&cache->lock);
  518. avail = cache->length - cache->used - cache->pinned -
  519. cache->reserved - cache->bytes_super - cache->zone_unusable;
  520. btrfs_info(fs_info,
  521. "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
  522. cache->start, cache->length, cache->used, cache->pinned,
  523. cache->reserved, cache->delalloc_bytes,
  524. cache->bytes_super, cache->zone_unusable,
  525. avail, cache->ro ? "[readonly]" : "");
  526. spin_unlock(&cache->lock);
  527. btrfs_dump_free_space(cache, bytes);
  528. total_avail += avail;
  529. }
  530. if (++index < BTRFS_NR_RAID_TYPES)
  531. goto again;
  532. up_read(&info->groups_sem);
  533. btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
  534. }
  535. static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
  536. u64 to_reclaim)
  537. {
  538. u64 bytes;
  539. u64 nr;
  540. bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
  541. nr = div64_u64(to_reclaim, bytes);
  542. if (!nr)
  543. nr = 1;
  544. return nr;
  545. }
  546. /*
  547. * shrink metadata reservation for delalloc
  548. */
  549. static void shrink_delalloc(struct btrfs_fs_info *fs_info,
  550. struct btrfs_space_info *space_info,
  551. u64 to_reclaim, bool wait_ordered,
  552. bool for_preempt)
  553. {
  554. struct btrfs_trans_handle *trans;
  555. u64 delalloc_bytes;
  556. u64 ordered_bytes;
  557. u64 items;
  558. long time_left;
  559. int loops;
  560. delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
  561. ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
  562. if (delalloc_bytes == 0 && ordered_bytes == 0)
  563. return;
  564. /* Calc the number of the pages we need flush for space reservation */
  565. if (to_reclaim == U64_MAX) {
  566. items = U64_MAX;
  567. } else {
  568. /*
  569. * to_reclaim is set to however much metadata we need to
  570. * reclaim, but reclaiming that much data doesn't really track
  571. * exactly. What we really want to do is reclaim full inode's
  572. * worth of reservations, however that's not available to us
  573. * here. We will take a fraction of the delalloc bytes for our
  574. * flushing loops and hope for the best. Delalloc will expand
  575. * the amount we write to cover an entire dirty extent, which
  576. * will reclaim the metadata reservation for that range. If
  577. * it's not enough subsequent flush stages will be more
  578. * aggressive.
  579. */
  580. to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
  581. items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
  582. }
  583. trans = current->journal_info;
  584. /*
  585. * If we are doing more ordered than delalloc we need to just wait on
  586. * ordered extents, otherwise we'll waste time trying to flush delalloc
  587. * that likely won't give us the space back we need.
  588. */
  589. if (ordered_bytes > delalloc_bytes && !for_preempt)
  590. wait_ordered = true;
  591. loops = 0;
  592. while ((delalloc_bytes || ordered_bytes) && loops < 3) {
  593. u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
  594. long nr_pages = min_t(u64, temp, LONG_MAX);
  595. int async_pages;
  596. btrfs_start_delalloc_roots(fs_info, nr_pages, true);
  597. /*
  598. * We need to make sure any outstanding async pages are now
  599. * processed before we continue. This is because things like
  600. * sync_inode() try to be smart and skip writing if the inode is
  601. * marked clean. We don't use filemap_fwrite for flushing
  602. * because we want to control how many pages we write out at a
  603. * time, thus this is the only safe way to make sure we've
  604. * waited for outstanding compressed workers to have started
  605. * their jobs and thus have ordered extents set up properly.
  606. *
  607. * This exists because we do not want to wait for each
  608. * individual inode to finish its async work, we simply want to
  609. * start the IO on everybody, and then come back here and wait
  610. * for all of the async work to catch up. Once we're done with
  611. * that we know we'll have ordered extents for everything and we
  612. * can decide if we wait for that or not.
  613. *
  614. * If we choose to replace this in the future, make absolutely
  615. * sure that the proper waiting is being done in the async case,
  616. * as there have been bugs in that area before.
  617. */
  618. async_pages = atomic_read(&fs_info->async_delalloc_pages);
  619. if (!async_pages)
  620. goto skip_async;
  621. /*
  622. * We don't want to wait forever, if we wrote less pages in this
  623. * loop than we have outstanding, only wait for that number of
  624. * pages, otherwise we can wait for all async pages to finish
  625. * before continuing.
  626. */
  627. if (async_pages > nr_pages)
  628. async_pages -= nr_pages;
  629. else
  630. async_pages = 0;
  631. wait_event(fs_info->async_submit_wait,
  632. atomic_read(&fs_info->async_delalloc_pages) <=
  633. async_pages);
  634. skip_async:
  635. loops++;
  636. if (wait_ordered && !trans) {
  637. btrfs_wait_ordered_roots(fs_info, items, NULL);
  638. } else {
  639. time_left = schedule_timeout_killable(1);
  640. if (time_left)
  641. break;
  642. }
  643. /*
  644. * If we are for preemption we just want a one-shot of delalloc
  645. * flushing so we can stop flushing if we decide we don't need
  646. * to anymore.
  647. */
  648. if (for_preempt)
  649. break;
  650. spin_lock(&space_info->lock);
  651. if (list_empty(&space_info->tickets) &&
  652. list_empty(&space_info->priority_tickets)) {
  653. spin_unlock(&space_info->lock);
  654. break;
  655. }
  656. spin_unlock(&space_info->lock);
  657. delalloc_bytes = percpu_counter_sum_positive(
  658. &fs_info->delalloc_bytes);
  659. ordered_bytes = percpu_counter_sum_positive(
  660. &fs_info->ordered_bytes);
  661. }
  662. }
  663. /*
  664. * Try to flush some data based on policy set by @state. This is only advisory
  665. * and may fail for various reasons. The caller is supposed to examine the
  666. * state of @space_info to detect the outcome.
  667. */
  668. static void flush_space(struct btrfs_fs_info *fs_info,
  669. struct btrfs_space_info *space_info, u64 num_bytes,
  670. enum btrfs_flush_state state, bool for_preempt)
  671. {
  672. struct btrfs_root *root = fs_info->tree_root;
  673. struct btrfs_trans_handle *trans;
  674. int nr;
  675. int ret = 0;
  676. switch (state) {
  677. case FLUSH_DELAYED_ITEMS_NR:
  678. case FLUSH_DELAYED_ITEMS:
  679. if (state == FLUSH_DELAYED_ITEMS_NR)
  680. nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
  681. else
  682. nr = -1;
  683. trans = btrfs_join_transaction_nostart(root);
  684. if (IS_ERR(trans)) {
  685. ret = PTR_ERR(trans);
  686. if (ret == -ENOENT)
  687. ret = 0;
  688. break;
  689. }
  690. ret = btrfs_run_delayed_items_nr(trans, nr);
  691. btrfs_end_transaction(trans);
  692. break;
  693. case FLUSH_DELALLOC:
  694. case FLUSH_DELALLOC_WAIT:
  695. case FLUSH_DELALLOC_FULL:
  696. if (state == FLUSH_DELALLOC_FULL)
  697. num_bytes = U64_MAX;
  698. shrink_delalloc(fs_info, space_info, num_bytes,
  699. state != FLUSH_DELALLOC, for_preempt);
  700. break;
  701. case FLUSH_DELAYED_REFS_NR:
  702. case FLUSH_DELAYED_REFS:
  703. trans = btrfs_join_transaction_nostart(root);
  704. if (IS_ERR(trans)) {
  705. ret = PTR_ERR(trans);
  706. if (ret == -ENOENT)
  707. ret = 0;
  708. break;
  709. }
  710. if (state == FLUSH_DELAYED_REFS_NR)
  711. btrfs_run_delayed_refs(trans, num_bytes);
  712. else
  713. btrfs_run_delayed_refs(trans, 0);
  714. btrfs_end_transaction(trans);
  715. break;
  716. case ALLOC_CHUNK:
  717. case ALLOC_CHUNK_FORCE:
  718. trans = btrfs_join_transaction(root);
  719. if (IS_ERR(trans)) {
  720. ret = PTR_ERR(trans);
  721. break;
  722. }
  723. ret = btrfs_chunk_alloc(trans,
  724. btrfs_get_alloc_profile(fs_info, space_info->flags),
  725. (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
  726. CHUNK_ALLOC_FORCE);
  727. btrfs_end_transaction(trans);
  728. if (ret > 0 || ret == -ENOSPC)
  729. ret = 0;
  730. break;
  731. case RUN_DELAYED_IPUTS:
  732. /*
  733. * If we have pending delayed iputs then we could free up a
  734. * bunch of pinned space, so make sure we run the iputs before
  735. * we do our pinned bytes check below.
  736. */
  737. btrfs_run_delayed_iputs(fs_info);
  738. btrfs_wait_on_delayed_iputs(fs_info);
  739. break;
  740. case COMMIT_TRANS:
  741. ASSERT(current->journal_info == NULL);
  742. /*
  743. * We don't want to start a new transaction, just attach to the
  744. * current one or wait it fully commits in case its commit is
  745. * happening at the moment. Note: we don't use a nostart join
  746. * because that does not wait for a transaction to fully commit
  747. * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
  748. */
  749. ret = btrfs_commit_current_transaction(root);
  750. break;
  751. default:
  752. ret = -ENOSPC;
  753. break;
  754. }
  755. trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
  756. ret, for_preempt);
  757. return;
  758. }
  759. static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
  760. const struct btrfs_space_info *space_info)
  761. {
  762. u64 used;
  763. u64 avail;
  764. u64 to_reclaim = space_info->reclaim_size;
  765. lockdep_assert_held(&space_info->lock);
  766. avail = calc_available_free_space(fs_info, space_info,
  767. BTRFS_RESERVE_FLUSH_ALL);
  768. used = btrfs_space_info_used(space_info, true);
  769. /*
  770. * We may be flushing because suddenly we have less space than we had
  771. * before, and now we're well over-committed based on our current free
  772. * space. If that's the case add in our overage so we make sure to put
  773. * appropriate pressure on the flushing state machine.
  774. */
  775. if (space_info->total_bytes + avail < used)
  776. to_reclaim += used - (space_info->total_bytes + avail);
  777. return to_reclaim;
  778. }
  779. static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
  780. const struct btrfs_space_info *space_info)
  781. {
  782. const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
  783. u64 ordered, delalloc;
  784. u64 thresh;
  785. u64 used;
  786. thresh = mult_perc(space_info->total_bytes, 90);
  787. lockdep_assert_held(&space_info->lock);
  788. /* If we're just plain full then async reclaim just slows us down. */
  789. if ((space_info->bytes_used + space_info->bytes_reserved +
  790. global_rsv_size) >= thresh)
  791. return false;
  792. used = space_info->bytes_may_use + space_info->bytes_pinned;
  793. /* The total flushable belongs to the global rsv, don't flush. */
  794. if (global_rsv_size >= used)
  795. return false;
  796. /*
  797. * 128MiB is 1/4 of the maximum global rsv size. If we have less than
  798. * that devoted to other reservations then there's no sense in flushing,
  799. * we don't have a lot of things that need flushing.
  800. */
  801. if (used - global_rsv_size <= SZ_128M)
  802. return false;
  803. /*
  804. * We have tickets queued, bail so we don't compete with the async
  805. * flushers.
  806. */
  807. if (space_info->reclaim_size)
  808. return false;
  809. /*
  810. * If we have over half of the free space occupied by reservations or
  811. * pinned then we want to start flushing.
  812. *
  813. * We do not do the traditional thing here, which is to say
  814. *
  815. * if (used >= ((total_bytes + avail) / 2))
  816. * return 1;
  817. *
  818. * because this doesn't quite work how we want. If we had more than 50%
  819. * of the space_info used by bytes_used and we had 0 available we'd just
  820. * constantly run the background flusher. Instead we want it to kick in
  821. * if our reclaimable space exceeds our clamped free space.
  822. *
  823. * Our clamping range is 2^1 -> 2^8. Practically speaking that means
  824. * the following:
  825. *
  826. * Amount of RAM Minimum threshold Maximum threshold
  827. *
  828. * 256GiB 1GiB 128GiB
  829. * 128GiB 512MiB 64GiB
  830. * 64GiB 256MiB 32GiB
  831. * 32GiB 128MiB 16GiB
  832. * 16GiB 64MiB 8GiB
  833. *
  834. * These are the range our thresholds will fall in, corresponding to how
  835. * much delalloc we need for the background flusher to kick in.
  836. */
  837. thresh = calc_available_free_space(fs_info, space_info,
  838. BTRFS_RESERVE_FLUSH_ALL);
  839. used = space_info->bytes_used + space_info->bytes_reserved +
  840. space_info->bytes_readonly + global_rsv_size;
  841. if (used < space_info->total_bytes)
  842. thresh += space_info->total_bytes - used;
  843. thresh >>= space_info->clamp;
  844. used = space_info->bytes_pinned;
  845. /*
  846. * If we have more ordered bytes than delalloc bytes then we're either
  847. * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
  848. * around. Preemptive flushing is only useful in that it can free up
  849. * space before tickets need to wait for things to finish. In the case
  850. * of ordered extents, preemptively waiting on ordered extents gets us
  851. * nothing, if our reservations are tied up in ordered extents we'll
  852. * simply have to slow down writers by forcing them to wait on ordered
  853. * extents.
  854. *
  855. * In the case that ordered is larger than delalloc, only include the
  856. * block reserves that we would actually be able to directly reclaim
  857. * from. In this case if we're heavy on metadata operations this will
  858. * clearly be heavy enough to warrant preemptive flushing. In the case
  859. * of heavy DIO or ordered reservations, preemptive flushing will just
  860. * waste time and cause us to slow down.
  861. *
  862. * We want to make sure we truly are maxed out on ordered however, so
  863. * cut ordered in half, and if it's still higher than delalloc then we
  864. * can keep flushing. This is to avoid the case where we start
  865. * flushing, and now delalloc == ordered and we stop preemptively
  866. * flushing when we could still have several gigs of delalloc to flush.
  867. */
  868. ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
  869. delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
  870. if (ordered >= delalloc)
  871. used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
  872. btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
  873. else
  874. used += space_info->bytes_may_use - global_rsv_size;
  875. return (used >= thresh && !btrfs_fs_closing(fs_info) &&
  876. !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
  877. }
  878. static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
  879. struct btrfs_space_info *space_info,
  880. struct reserve_ticket *ticket)
  881. {
  882. struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  883. u64 min_bytes;
  884. if (!ticket->steal)
  885. return false;
  886. if (global_rsv->space_info != space_info)
  887. return false;
  888. spin_lock(&global_rsv->lock);
  889. min_bytes = mult_perc(global_rsv->size, 10);
  890. if (global_rsv->reserved < min_bytes + ticket->bytes) {
  891. spin_unlock(&global_rsv->lock);
  892. return false;
  893. }
  894. global_rsv->reserved -= ticket->bytes;
  895. remove_ticket(space_info, ticket);
  896. ticket->bytes = 0;
  897. wake_up(&ticket->wait);
  898. space_info->tickets_id++;
  899. if (global_rsv->reserved < global_rsv->size)
  900. global_rsv->full = 0;
  901. spin_unlock(&global_rsv->lock);
  902. return true;
  903. }
  904. /*
  905. * We've exhausted our flushing, start failing tickets.
  906. *
  907. * @fs_info - fs_info for this fs
  908. * @space_info - the space info we were flushing
  909. *
  910. * We call this when we've exhausted our flushing ability and haven't made
  911. * progress in satisfying tickets. The reservation code handles tickets in
  912. * order, so if there is a large ticket first and then smaller ones we could
  913. * very well satisfy the smaller tickets. This will attempt to wake up any
  914. * tickets in the list to catch this case.
  915. *
  916. * This function returns true if it was able to make progress by clearing out
  917. * other tickets, or if it stumbles across a ticket that was smaller than the
  918. * first ticket.
  919. */
  920. static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
  921. struct btrfs_space_info *space_info)
  922. {
  923. struct reserve_ticket *ticket;
  924. u64 tickets_id = space_info->tickets_id;
  925. const bool aborted = BTRFS_FS_ERROR(fs_info);
  926. trace_btrfs_fail_all_tickets(fs_info, space_info);
  927. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
  928. btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
  929. __btrfs_dump_space_info(fs_info, space_info);
  930. }
  931. while (!list_empty(&space_info->tickets) &&
  932. tickets_id == space_info->tickets_id) {
  933. ticket = list_first_entry(&space_info->tickets,
  934. struct reserve_ticket, list);
  935. if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
  936. return true;
  937. if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
  938. btrfs_info(fs_info, "failing ticket with %llu bytes",
  939. ticket->bytes);
  940. remove_ticket(space_info, ticket);
  941. if (aborted)
  942. ticket->error = -EIO;
  943. else
  944. ticket->error = -ENOSPC;
  945. wake_up(&ticket->wait);
  946. /*
  947. * We're just throwing tickets away, so more flushing may not
  948. * trip over btrfs_try_granting_tickets, so we need to call it
  949. * here to see if we can make progress with the next ticket in
  950. * the list.
  951. */
  952. if (!aborted)
  953. btrfs_try_granting_tickets(fs_info, space_info);
  954. }
  955. return (tickets_id != space_info->tickets_id);
  956. }
  957. /*
  958. * This is for normal flushers, we can wait all goddamned day if we want to. We
  959. * will loop and continuously try to flush as long as we are making progress.
  960. * We count progress as clearing off tickets each time we have to loop.
  961. */
  962. static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
  963. {
  964. struct btrfs_fs_info *fs_info;
  965. struct btrfs_space_info *space_info;
  966. u64 to_reclaim;
  967. enum btrfs_flush_state flush_state;
  968. int commit_cycles = 0;
  969. u64 last_tickets_id;
  970. fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
  971. space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  972. spin_lock(&space_info->lock);
  973. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
  974. if (!to_reclaim) {
  975. space_info->flush = 0;
  976. spin_unlock(&space_info->lock);
  977. return;
  978. }
  979. last_tickets_id = space_info->tickets_id;
  980. spin_unlock(&space_info->lock);
  981. flush_state = FLUSH_DELAYED_ITEMS_NR;
  982. do {
  983. flush_space(fs_info, space_info, to_reclaim, flush_state, false);
  984. spin_lock(&space_info->lock);
  985. if (list_empty(&space_info->tickets)) {
  986. space_info->flush = 0;
  987. spin_unlock(&space_info->lock);
  988. return;
  989. }
  990. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
  991. space_info);
  992. if (last_tickets_id == space_info->tickets_id) {
  993. flush_state++;
  994. } else {
  995. last_tickets_id = space_info->tickets_id;
  996. flush_state = FLUSH_DELAYED_ITEMS_NR;
  997. if (commit_cycles)
  998. commit_cycles--;
  999. }
  1000. /*
  1001. * We do not want to empty the system of delalloc unless we're
  1002. * under heavy pressure, so allow one trip through the flushing
  1003. * logic before we start doing a FLUSH_DELALLOC_FULL.
  1004. */
  1005. if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
  1006. flush_state++;
  1007. /*
  1008. * We don't want to force a chunk allocation until we've tried
  1009. * pretty hard to reclaim space. Think of the case where we
  1010. * freed up a bunch of space and so have a lot of pinned space
  1011. * to reclaim. We would rather use that than possibly create a
  1012. * underutilized metadata chunk. So if this is our first run
  1013. * through the flushing state machine skip ALLOC_CHUNK_FORCE and
  1014. * commit the transaction. If nothing has changed the next go
  1015. * around then we can force a chunk allocation.
  1016. */
  1017. if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
  1018. flush_state++;
  1019. if (flush_state > COMMIT_TRANS) {
  1020. commit_cycles++;
  1021. if (commit_cycles > 2) {
  1022. if (maybe_fail_all_tickets(fs_info, space_info)) {
  1023. flush_state = FLUSH_DELAYED_ITEMS_NR;
  1024. commit_cycles--;
  1025. } else {
  1026. space_info->flush = 0;
  1027. }
  1028. } else {
  1029. flush_state = FLUSH_DELAYED_ITEMS_NR;
  1030. }
  1031. }
  1032. spin_unlock(&space_info->lock);
  1033. } while (flush_state <= COMMIT_TRANS);
  1034. }
  1035. /*
  1036. * This handles pre-flushing of metadata space before we get to the point that
  1037. * we need to start blocking threads on tickets. The logic here is different
  1038. * from the other flush paths because it doesn't rely on tickets to tell us how
  1039. * much we need to flush, instead it attempts to keep us below the 80% full
  1040. * watermark of space by flushing whichever reservation pool is currently the
  1041. * largest.
  1042. */
  1043. static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
  1044. {
  1045. struct btrfs_fs_info *fs_info;
  1046. struct btrfs_space_info *space_info;
  1047. struct btrfs_block_rsv *delayed_block_rsv;
  1048. struct btrfs_block_rsv *delayed_refs_rsv;
  1049. struct btrfs_block_rsv *global_rsv;
  1050. struct btrfs_block_rsv *trans_rsv;
  1051. int loops = 0;
  1052. fs_info = container_of(work, struct btrfs_fs_info,
  1053. preempt_reclaim_work);
  1054. space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
  1055. delayed_block_rsv = &fs_info->delayed_block_rsv;
  1056. delayed_refs_rsv = &fs_info->delayed_refs_rsv;
  1057. global_rsv = &fs_info->global_block_rsv;
  1058. trans_rsv = &fs_info->trans_block_rsv;
  1059. spin_lock(&space_info->lock);
  1060. while (need_preemptive_reclaim(fs_info, space_info)) {
  1061. enum btrfs_flush_state flush;
  1062. u64 delalloc_size = 0;
  1063. u64 to_reclaim, block_rsv_size;
  1064. const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
  1065. loops++;
  1066. /*
  1067. * We don't have a precise counter for the metadata being
  1068. * reserved for delalloc, so we'll approximate it by subtracting
  1069. * out the block rsv's space from the bytes_may_use. If that
  1070. * amount is higher than the individual reserves, then we can
  1071. * assume it's tied up in delalloc reservations.
  1072. */
  1073. block_rsv_size = global_rsv_size +
  1074. btrfs_block_rsv_reserved(delayed_block_rsv) +
  1075. btrfs_block_rsv_reserved(delayed_refs_rsv) +
  1076. btrfs_block_rsv_reserved(trans_rsv);
  1077. if (block_rsv_size < space_info->bytes_may_use)
  1078. delalloc_size = space_info->bytes_may_use - block_rsv_size;
  1079. /*
  1080. * We don't want to include the global_rsv in our calculation,
  1081. * because that's space we can't touch. Subtract it from the
  1082. * block_rsv_size for the next checks.
  1083. */
  1084. block_rsv_size -= global_rsv_size;
  1085. /*
  1086. * We really want to avoid flushing delalloc too much, as it
  1087. * could result in poor allocation patterns, so only flush it if
  1088. * it's larger than the rest of the pools combined.
  1089. */
  1090. if (delalloc_size > block_rsv_size) {
  1091. to_reclaim = delalloc_size;
  1092. flush = FLUSH_DELALLOC;
  1093. } else if (space_info->bytes_pinned >
  1094. (btrfs_block_rsv_reserved(delayed_block_rsv) +
  1095. btrfs_block_rsv_reserved(delayed_refs_rsv))) {
  1096. to_reclaim = space_info->bytes_pinned;
  1097. flush = COMMIT_TRANS;
  1098. } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
  1099. btrfs_block_rsv_reserved(delayed_refs_rsv)) {
  1100. to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
  1101. flush = FLUSH_DELAYED_ITEMS_NR;
  1102. } else {
  1103. to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
  1104. flush = FLUSH_DELAYED_REFS_NR;
  1105. }
  1106. spin_unlock(&space_info->lock);
  1107. /*
  1108. * We don't want to reclaim everything, just a portion, so scale
  1109. * down the to_reclaim by 1/4. If it takes us down to 0,
  1110. * reclaim 1 items worth.
  1111. */
  1112. to_reclaim >>= 2;
  1113. if (!to_reclaim)
  1114. to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
  1115. flush_space(fs_info, space_info, to_reclaim, flush, true);
  1116. cond_resched();
  1117. spin_lock(&space_info->lock);
  1118. }
  1119. /* We only went through once, back off our clamping. */
  1120. if (loops == 1 && !space_info->reclaim_size)
  1121. space_info->clamp = max(1, space_info->clamp - 1);
  1122. trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
  1123. spin_unlock(&space_info->lock);
  1124. }
  1125. /*
  1126. * FLUSH_DELALLOC_WAIT:
  1127. * Space is freed from flushing delalloc in one of two ways.
  1128. *
  1129. * 1) compression is on and we allocate less space than we reserved
  1130. * 2) we are overwriting existing space
  1131. *
  1132. * For #1 that extra space is reclaimed as soon as the delalloc pages are
  1133. * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
  1134. * length to ->bytes_reserved, and subtracts the reserved space from
  1135. * ->bytes_may_use.
  1136. *
  1137. * For #2 this is trickier. Once the ordered extent runs we will drop the
  1138. * extent in the range we are overwriting, which creates a delayed ref for
  1139. * that freed extent. This however is not reclaimed until the transaction
  1140. * commits, thus the next stages.
  1141. *
  1142. * RUN_DELAYED_IPUTS
  1143. * If we are freeing inodes, we want to make sure all delayed iputs have
  1144. * completed, because they could have been on an inode with i_nlink == 0, and
  1145. * thus have been truncated and freed up space. But again this space is not
  1146. * immediately re-usable, it comes in the form of a delayed ref, which must be
  1147. * run and then the transaction must be committed.
  1148. *
  1149. * COMMIT_TRANS
  1150. * This is where we reclaim all of the pinned space generated by running the
  1151. * iputs
  1152. *
  1153. * ALLOC_CHUNK_FORCE
  1154. * For data we start with alloc chunk force, however we could have been full
  1155. * before, and then the transaction commit could have freed new block groups,
  1156. * so if we now have space to allocate do the force chunk allocation.
  1157. */
  1158. static const enum btrfs_flush_state data_flush_states[] = {
  1159. FLUSH_DELALLOC_FULL,
  1160. RUN_DELAYED_IPUTS,
  1161. COMMIT_TRANS,
  1162. ALLOC_CHUNK_FORCE,
  1163. };
  1164. static void btrfs_async_reclaim_data_space(struct work_struct *work)
  1165. {
  1166. struct btrfs_fs_info *fs_info;
  1167. struct btrfs_space_info *space_info;
  1168. u64 last_tickets_id;
  1169. enum btrfs_flush_state flush_state = 0;
  1170. fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
  1171. space_info = fs_info->data_sinfo;
  1172. spin_lock(&space_info->lock);
  1173. if (list_empty(&space_info->tickets)) {
  1174. space_info->flush = 0;
  1175. spin_unlock(&space_info->lock);
  1176. return;
  1177. }
  1178. last_tickets_id = space_info->tickets_id;
  1179. spin_unlock(&space_info->lock);
  1180. while (!space_info->full) {
  1181. flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
  1182. spin_lock(&space_info->lock);
  1183. if (list_empty(&space_info->tickets)) {
  1184. space_info->flush = 0;
  1185. spin_unlock(&space_info->lock);
  1186. return;
  1187. }
  1188. /* Something happened, fail everything and bail. */
  1189. if (BTRFS_FS_ERROR(fs_info))
  1190. goto aborted_fs;
  1191. last_tickets_id = space_info->tickets_id;
  1192. spin_unlock(&space_info->lock);
  1193. }
  1194. while (flush_state < ARRAY_SIZE(data_flush_states)) {
  1195. flush_space(fs_info, space_info, U64_MAX,
  1196. data_flush_states[flush_state], false);
  1197. spin_lock(&space_info->lock);
  1198. if (list_empty(&space_info->tickets)) {
  1199. space_info->flush = 0;
  1200. spin_unlock(&space_info->lock);
  1201. return;
  1202. }
  1203. if (last_tickets_id == space_info->tickets_id) {
  1204. flush_state++;
  1205. } else {
  1206. last_tickets_id = space_info->tickets_id;
  1207. flush_state = 0;
  1208. }
  1209. if (flush_state >= ARRAY_SIZE(data_flush_states)) {
  1210. if (space_info->full) {
  1211. if (maybe_fail_all_tickets(fs_info, space_info))
  1212. flush_state = 0;
  1213. else
  1214. space_info->flush = 0;
  1215. } else {
  1216. flush_state = 0;
  1217. }
  1218. /* Something happened, fail everything and bail. */
  1219. if (BTRFS_FS_ERROR(fs_info))
  1220. goto aborted_fs;
  1221. }
  1222. spin_unlock(&space_info->lock);
  1223. }
  1224. return;
  1225. aborted_fs:
  1226. maybe_fail_all_tickets(fs_info, space_info);
  1227. space_info->flush = 0;
  1228. spin_unlock(&space_info->lock);
  1229. }
  1230. void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
  1231. {
  1232. INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
  1233. INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
  1234. INIT_WORK(&fs_info->preempt_reclaim_work,
  1235. btrfs_preempt_reclaim_metadata_space);
  1236. }
  1237. static const enum btrfs_flush_state priority_flush_states[] = {
  1238. FLUSH_DELAYED_ITEMS_NR,
  1239. FLUSH_DELAYED_ITEMS,
  1240. ALLOC_CHUNK,
  1241. };
  1242. static const enum btrfs_flush_state evict_flush_states[] = {
  1243. FLUSH_DELAYED_ITEMS_NR,
  1244. FLUSH_DELAYED_ITEMS,
  1245. FLUSH_DELAYED_REFS_NR,
  1246. FLUSH_DELAYED_REFS,
  1247. FLUSH_DELALLOC,
  1248. FLUSH_DELALLOC_WAIT,
  1249. FLUSH_DELALLOC_FULL,
  1250. ALLOC_CHUNK,
  1251. COMMIT_TRANS,
  1252. };
  1253. static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
  1254. struct btrfs_space_info *space_info,
  1255. struct reserve_ticket *ticket,
  1256. const enum btrfs_flush_state *states,
  1257. int states_nr)
  1258. {
  1259. u64 to_reclaim;
  1260. int flush_state = 0;
  1261. spin_lock(&space_info->lock);
  1262. to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
  1263. /*
  1264. * This is the priority reclaim path, so to_reclaim could be >0 still
  1265. * because we may have only satisfied the priority tickets and still
  1266. * left non priority tickets on the list. We would then have
  1267. * to_reclaim but ->bytes == 0.
  1268. */
  1269. if (ticket->bytes == 0) {
  1270. spin_unlock(&space_info->lock);
  1271. return;
  1272. }
  1273. while (flush_state < states_nr) {
  1274. spin_unlock(&space_info->lock);
  1275. flush_space(fs_info, space_info, to_reclaim, states[flush_state],
  1276. false);
  1277. flush_state++;
  1278. spin_lock(&space_info->lock);
  1279. if (ticket->bytes == 0) {
  1280. spin_unlock(&space_info->lock);
  1281. return;
  1282. }
  1283. }
  1284. /*
  1285. * Attempt to steal from the global rsv if we can, except if the fs was
  1286. * turned into error mode due to a transaction abort when flushing space
  1287. * above, in that case fail with the abort error instead of returning
  1288. * success to the caller if we can steal from the global rsv - this is
  1289. * just to have caller fail immeditelly instead of later when trying to
  1290. * modify the fs, making it easier to debug -ENOSPC problems.
  1291. */
  1292. if (BTRFS_FS_ERROR(fs_info)) {
  1293. ticket->error = BTRFS_FS_ERROR(fs_info);
  1294. remove_ticket(space_info, ticket);
  1295. } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
  1296. ticket->error = -ENOSPC;
  1297. remove_ticket(space_info, ticket);
  1298. }
  1299. /*
  1300. * We must run try_granting_tickets here because we could be a large
  1301. * ticket in front of a smaller ticket that can now be satisfied with
  1302. * the available space.
  1303. */
  1304. btrfs_try_granting_tickets(fs_info, space_info);
  1305. spin_unlock(&space_info->lock);
  1306. }
  1307. static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
  1308. struct btrfs_space_info *space_info,
  1309. struct reserve_ticket *ticket)
  1310. {
  1311. spin_lock(&space_info->lock);
  1312. /* We could have been granted before we got here. */
  1313. if (ticket->bytes == 0) {
  1314. spin_unlock(&space_info->lock);
  1315. return;
  1316. }
  1317. while (!space_info->full) {
  1318. spin_unlock(&space_info->lock);
  1319. flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
  1320. spin_lock(&space_info->lock);
  1321. if (ticket->bytes == 0) {
  1322. spin_unlock(&space_info->lock);
  1323. return;
  1324. }
  1325. }
  1326. ticket->error = -ENOSPC;
  1327. remove_ticket(space_info, ticket);
  1328. btrfs_try_granting_tickets(fs_info, space_info);
  1329. spin_unlock(&space_info->lock);
  1330. }
  1331. static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
  1332. struct btrfs_space_info *space_info,
  1333. struct reserve_ticket *ticket)
  1334. {
  1335. DEFINE_WAIT(wait);
  1336. int ret = 0;
  1337. spin_lock(&space_info->lock);
  1338. while (ticket->bytes > 0 && ticket->error == 0) {
  1339. ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
  1340. if (ret) {
  1341. /*
  1342. * Delete us from the list. After we unlock the space
  1343. * info, we don't want the async reclaim job to reserve
  1344. * space for this ticket. If that would happen, then the
  1345. * ticket's task would not known that space was reserved
  1346. * despite getting an error, resulting in a space leak
  1347. * (bytes_may_use counter of our space_info).
  1348. */
  1349. remove_ticket(space_info, ticket);
  1350. ticket->error = -EINTR;
  1351. break;
  1352. }
  1353. spin_unlock(&space_info->lock);
  1354. schedule();
  1355. finish_wait(&ticket->wait, &wait);
  1356. spin_lock(&space_info->lock);
  1357. }
  1358. spin_unlock(&space_info->lock);
  1359. }
  1360. /*
  1361. * Do the appropriate flushing and waiting for a ticket.
  1362. *
  1363. * @fs_info: the filesystem
  1364. * @space_info: space info for the reservation
  1365. * @ticket: ticket for the reservation
  1366. * @start_ns: timestamp when the reservation started
  1367. * @orig_bytes: amount of bytes originally reserved
  1368. * @flush: how much we can flush
  1369. *
  1370. * This does the work of figuring out how to flush for the ticket, waiting for
  1371. * the reservation, and returning the appropriate error if there is one.
  1372. */
  1373. static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
  1374. struct btrfs_space_info *space_info,
  1375. struct reserve_ticket *ticket,
  1376. u64 start_ns, u64 orig_bytes,
  1377. enum btrfs_reserve_flush_enum flush)
  1378. {
  1379. int ret;
  1380. switch (flush) {
  1381. case BTRFS_RESERVE_FLUSH_DATA:
  1382. case BTRFS_RESERVE_FLUSH_ALL:
  1383. case BTRFS_RESERVE_FLUSH_ALL_STEAL:
  1384. wait_reserve_ticket(fs_info, space_info, ticket);
  1385. break;
  1386. case BTRFS_RESERVE_FLUSH_LIMIT:
  1387. priority_reclaim_metadata_space(fs_info, space_info, ticket,
  1388. priority_flush_states,
  1389. ARRAY_SIZE(priority_flush_states));
  1390. break;
  1391. case BTRFS_RESERVE_FLUSH_EVICT:
  1392. priority_reclaim_metadata_space(fs_info, space_info, ticket,
  1393. evict_flush_states,
  1394. ARRAY_SIZE(evict_flush_states));
  1395. break;
  1396. case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
  1397. priority_reclaim_data_space(fs_info, space_info, ticket);
  1398. break;
  1399. default:
  1400. ASSERT(0);
  1401. break;
  1402. }
  1403. ret = ticket->error;
  1404. ASSERT(list_empty(&ticket->list));
  1405. /*
  1406. * Check that we can't have an error set if the reservation succeeded,
  1407. * as that would confuse tasks and lead them to error out without
  1408. * releasing reserved space (if an error happens the expectation is that
  1409. * space wasn't reserved at all).
  1410. */
  1411. ASSERT(!(ticket->bytes == 0 && ticket->error));
  1412. trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
  1413. start_ns, flush, ticket->error);
  1414. return ret;
  1415. }
  1416. /*
  1417. * This returns true if this flush state will go through the ordinary flushing
  1418. * code.
  1419. */
  1420. static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
  1421. {
  1422. return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
  1423. (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
  1424. }
  1425. static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
  1426. struct btrfs_space_info *space_info)
  1427. {
  1428. u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
  1429. u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
  1430. /*
  1431. * If we're heavy on ordered operations then clamping won't help us. We
  1432. * need to clamp specifically to keep up with dirty'ing buffered
  1433. * writers, because there's not a 1:1 correlation of writing delalloc
  1434. * and freeing space, like there is with flushing delayed refs or
  1435. * delayed nodes. If we're already more ordered than delalloc then
  1436. * we're keeping up, otherwise we aren't and should probably clamp.
  1437. */
  1438. if (ordered < delalloc)
  1439. space_info->clamp = min(space_info->clamp + 1, 8);
  1440. }
  1441. static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
  1442. {
  1443. return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
  1444. flush == BTRFS_RESERVE_FLUSH_EVICT);
  1445. }
  1446. /*
  1447. * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
  1448. * fail as quickly as possible.
  1449. */
  1450. static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
  1451. {
  1452. return (flush != BTRFS_RESERVE_NO_FLUSH &&
  1453. flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
  1454. }
  1455. /*
  1456. * Try to reserve bytes from the block_rsv's space.
  1457. *
  1458. * @fs_info: the filesystem
  1459. * @space_info: space info we want to allocate from
  1460. * @orig_bytes: number of bytes we want
  1461. * @flush: whether or not we can flush to make our reservation
  1462. *
  1463. * This will reserve orig_bytes number of bytes from the space info associated
  1464. * with the block_rsv. If there is not enough space it will make an attempt to
  1465. * flush out space to make room. It will do this by flushing delalloc if
  1466. * possible or committing the transaction. If flush is 0 then no attempts to
  1467. * regain reservations will be made and this will fail if there is not enough
  1468. * space already.
  1469. */
  1470. static int __reserve_bytes(struct btrfs_fs_info *fs_info,
  1471. struct btrfs_space_info *space_info, u64 orig_bytes,
  1472. enum btrfs_reserve_flush_enum flush)
  1473. {
  1474. struct work_struct *async_work;
  1475. struct reserve_ticket ticket;
  1476. u64 start_ns = 0;
  1477. u64 used;
  1478. int ret = -ENOSPC;
  1479. bool pending_tickets;
  1480. ASSERT(orig_bytes);
  1481. /*
  1482. * If have a transaction handle (current->journal_info != NULL), then
  1483. * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
  1484. * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
  1485. * flushing methods can trigger transaction commits.
  1486. */
  1487. if (current->journal_info) {
  1488. /* One assert per line for easier debugging. */
  1489. ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
  1490. ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
  1491. ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
  1492. }
  1493. if (flush == BTRFS_RESERVE_FLUSH_DATA)
  1494. async_work = &fs_info->async_data_reclaim_work;
  1495. else
  1496. async_work = &fs_info->async_reclaim_work;
  1497. spin_lock(&space_info->lock);
  1498. used = btrfs_space_info_used(space_info, true);
  1499. /*
  1500. * We don't want NO_FLUSH allocations to jump everybody, they can
  1501. * generally handle ENOSPC in a different way, so treat them the same as
  1502. * normal flushers when it comes to skipping pending tickets.
  1503. */
  1504. if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
  1505. pending_tickets = !list_empty(&space_info->tickets) ||
  1506. !list_empty(&space_info->priority_tickets);
  1507. else
  1508. pending_tickets = !list_empty(&space_info->priority_tickets);
  1509. /*
  1510. * Carry on if we have enough space (short-circuit) OR call
  1511. * can_overcommit() to ensure we can overcommit to continue.
  1512. */
  1513. if (!pending_tickets &&
  1514. ((used + orig_bytes <= space_info->total_bytes) ||
  1515. btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
  1516. btrfs_space_info_update_bytes_may_use(fs_info, space_info,
  1517. orig_bytes);
  1518. ret = 0;
  1519. }
  1520. /*
  1521. * Things are dire, we need to make a reservation so we don't abort. We
  1522. * will let this reservation go through as long as we have actual space
  1523. * left to allocate for the block.
  1524. */
  1525. if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
  1526. used = btrfs_space_info_used(space_info, false);
  1527. if (used + orig_bytes <= space_info->total_bytes) {
  1528. btrfs_space_info_update_bytes_may_use(fs_info, space_info,
  1529. orig_bytes);
  1530. ret = 0;
  1531. }
  1532. }
  1533. /*
  1534. * If we couldn't make a reservation then setup our reservation ticket
  1535. * and kick the async worker if it's not already running.
  1536. *
  1537. * If we are a priority flusher then we just need to add our ticket to
  1538. * the list and we will do our own flushing further down.
  1539. */
  1540. if (ret && can_ticket(flush)) {
  1541. ticket.bytes = orig_bytes;
  1542. ticket.error = 0;
  1543. space_info->reclaim_size += ticket.bytes;
  1544. init_waitqueue_head(&ticket.wait);
  1545. ticket.steal = can_steal(flush);
  1546. if (trace_btrfs_reserve_ticket_enabled())
  1547. start_ns = ktime_get_ns();
  1548. if (flush == BTRFS_RESERVE_FLUSH_ALL ||
  1549. flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
  1550. flush == BTRFS_RESERVE_FLUSH_DATA) {
  1551. list_add_tail(&ticket.list, &space_info->tickets);
  1552. if (!space_info->flush) {
  1553. /*
  1554. * We were forced to add a reserve ticket, so
  1555. * our preemptive flushing is unable to keep
  1556. * up. Clamp down on the threshold for the
  1557. * preemptive flushing in order to keep up with
  1558. * the workload.
  1559. */
  1560. maybe_clamp_preempt(fs_info, space_info);
  1561. space_info->flush = 1;
  1562. trace_btrfs_trigger_flush(fs_info,
  1563. space_info->flags,
  1564. orig_bytes, flush,
  1565. "enospc");
  1566. queue_work(system_unbound_wq, async_work);
  1567. }
  1568. } else {
  1569. list_add_tail(&ticket.list,
  1570. &space_info->priority_tickets);
  1571. }
  1572. } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
  1573. /*
  1574. * We will do the space reservation dance during log replay,
  1575. * which means we won't have fs_info->fs_root set, so don't do
  1576. * the async reclaim as we will panic.
  1577. */
  1578. if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
  1579. !work_busy(&fs_info->preempt_reclaim_work) &&
  1580. need_preemptive_reclaim(fs_info, space_info)) {
  1581. trace_btrfs_trigger_flush(fs_info, space_info->flags,
  1582. orig_bytes, flush, "preempt");
  1583. queue_work(system_unbound_wq,
  1584. &fs_info->preempt_reclaim_work);
  1585. }
  1586. }
  1587. spin_unlock(&space_info->lock);
  1588. if (!ret || !can_ticket(flush))
  1589. return ret;
  1590. return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
  1591. orig_bytes, flush);
  1592. }
  1593. /*
  1594. * Try to reserve metadata bytes from the block_rsv's space.
  1595. *
  1596. * @fs_info: the filesystem
  1597. * @space_info: the space_info we're allocating for
  1598. * @orig_bytes: number of bytes we want
  1599. * @flush: whether or not we can flush to make our reservation
  1600. *
  1601. * This will reserve orig_bytes number of bytes from the space info associated
  1602. * with the block_rsv. If there is not enough space it will make an attempt to
  1603. * flush out space to make room. It will do this by flushing delalloc if
  1604. * possible or committing the transaction. If flush is 0 then no attempts to
  1605. * regain reservations will be made and this will fail if there is not enough
  1606. * space already.
  1607. */
  1608. int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
  1609. struct btrfs_space_info *space_info,
  1610. u64 orig_bytes,
  1611. enum btrfs_reserve_flush_enum flush)
  1612. {
  1613. int ret;
  1614. ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
  1615. if (ret == -ENOSPC) {
  1616. trace_btrfs_space_reservation(fs_info, "space_info:enospc",
  1617. space_info->flags, orig_bytes, 1);
  1618. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
  1619. btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
  1620. }
  1621. return ret;
  1622. }
  1623. /*
  1624. * Try to reserve data bytes for an allocation.
  1625. *
  1626. * @fs_info: the filesystem
  1627. * @bytes: number of bytes we need
  1628. * @flush: how we are allowed to flush
  1629. *
  1630. * This will reserve bytes from the data space info. If there is not enough
  1631. * space then we will attempt to flush space as specified by flush.
  1632. */
  1633. int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
  1634. enum btrfs_reserve_flush_enum flush)
  1635. {
  1636. struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
  1637. int ret;
  1638. ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
  1639. flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
  1640. flush == BTRFS_RESERVE_NO_FLUSH);
  1641. ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
  1642. ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
  1643. if (ret == -ENOSPC) {
  1644. trace_btrfs_space_reservation(fs_info, "space_info:enospc",
  1645. data_sinfo->flags, bytes, 1);
  1646. if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
  1647. btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
  1648. }
  1649. return ret;
  1650. }
  1651. /* Dump all the space infos when we abort a transaction due to ENOSPC. */
  1652. __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
  1653. {
  1654. struct btrfs_space_info *space_info;
  1655. btrfs_info(fs_info, "dumping space info:");
  1656. list_for_each_entry(space_info, &fs_info->space_info, list) {
  1657. spin_lock(&space_info->lock);
  1658. __btrfs_dump_space_info(fs_info, space_info);
  1659. spin_unlock(&space_info->lock);
  1660. }
  1661. dump_global_block_rsv(fs_info);
  1662. }
  1663. /*
  1664. * Account the unused space of all the readonly block group in the space_info.
  1665. * takes mirrors into account.
  1666. */
  1667. u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
  1668. {
  1669. struct btrfs_block_group *block_group;
  1670. u64 free_bytes = 0;
  1671. int factor;
  1672. /* It's df, we don't care if it's racy */
  1673. if (list_empty(&sinfo->ro_bgs))
  1674. return 0;
  1675. spin_lock(&sinfo->lock);
  1676. list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
  1677. spin_lock(&block_group->lock);
  1678. if (!block_group->ro) {
  1679. spin_unlock(&block_group->lock);
  1680. continue;
  1681. }
  1682. factor = btrfs_bg_type_to_factor(block_group->flags);
  1683. free_bytes += (block_group->length -
  1684. block_group->used) * factor;
  1685. spin_unlock(&block_group->lock);
  1686. }
  1687. spin_unlock(&sinfo->lock);
  1688. return free_bytes;
  1689. }
  1690. static u64 calc_pct_ratio(u64 x, u64 y)
  1691. {
  1692. int err;
  1693. if (!y)
  1694. return 0;
  1695. again:
  1696. err = check_mul_overflow(100, x, &x);
  1697. if (err)
  1698. goto lose_precision;
  1699. return div64_u64(x, y);
  1700. lose_precision:
  1701. x >>= 10;
  1702. y >>= 10;
  1703. if (!y)
  1704. y = 1;
  1705. goto again;
  1706. }
  1707. /*
  1708. * A reasonable buffer for unallocated space is 10 data block_groups.
  1709. * If we claw this back repeatedly, we can still achieve efficient
  1710. * utilization when near full, and not do too much reclaim while
  1711. * always maintaining a solid buffer for workloads that quickly
  1712. * allocate and pressure the unallocated space.
  1713. */
  1714. static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info)
  1715. {
  1716. u64 chunk_sz = calc_effective_data_chunk_size(fs_info);
  1717. return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz;
  1718. }
  1719. /*
  1720. * The fundamental goal of automatic reclaim is to protect the filesystem's
  1721. * unallocated space and thus minimize the probability of the filesystem going
  1722. * read only when a metadata allocation failure causes a transaction abort.
  1723. *
  1724. * However, relocations happen into the space_info's unused space, therefore
  1725. * automatic reclaim must also back off as that space runs low. There is no
  1726. * value in doing trivial "relocations" of re-writing the same block group
  1727. * into a fresh one.
  1728. *
  1729. * Furthermore, we want to avoid doing too much reclaim even if there are good
  1730. * candidates. This is because the allocator is pretty good at filling up the
  1731. * holes with writes. So we want to do just enough reclaim to try and stay
  1732. * safe from running out of unallocated space but not be wasteful about it.
  1733. *
  1734. * Therefore, the dynamic reclaim threshold is calculated as follows:
  1735. * - calculate a target unallocated amount of 5 block group sized chunks
  1736. * - ratchet up the intensity of reclaim depending on how far we are from
  1737. * that target by using a formula of unalloc / target to set the threshold.
  1738. *
  1739. * Typically with 10 block groups as the target, the discrete values this comes
  1740. * out to are 0, 10, 20, ... , 80, 90, and 99.
  1741. */
  1742. static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info)
  1743. {
  1744. struct btrfs_fs_info *fs_info = space_info->fs_info;
  1745. u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
  1746. u64 target = calc_unalloc_target(fs_info);
  1747. u64 alloc = space_info->total_bytes;
  1748. u64 used = btrfs_space_info_used(space_info, false);
  1749. u64 unused = alloc - used;
  1750. u64 want = target > unalloc ? target - unalloc : 0;
  1751. u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
  1752. /* If we have no unused space, don't bother, it won't work anyway. */
  1753. if (unused < data_chunk_size)
  1754. return 0;
  1755. /* Cast to int is OK because want <= target. */
  1756. return calc_pct_ratio(want, target);
  1757. }
  1758. int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info)
  1759. {
  1760. lockdep_assert_held(&space_info->lock);
  1761. if (READ_ONCE(space_info->dynamic_reclaim))
  1762. return calc_dynamic_reclaim_threshold(space_info);
  1763. return READ_ONCE(space_info->bg_reclaim_threshold);
  1764. }
  1765. /*
  1766. * Under "urgent" reclaim, we will reclaim even fresh block groups that have
  1767. * recently seen successful allocations, as we are desperate to reclaim
  1768. * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs.
  1769. */
  1770. static bool is_reclaim_urgent(struct btrfs_space_info *space_info)
  1771. {
  1772. struct btrfs_fs_info *fs_info = space_info->fs_info;
  1773. u64 unalloc = atomic64_read(&fs_info->free_chunk_space);
  1774. u64 data_chunk_size = calc_effective_data_chunk_size(fs_info);
  1775. return unalloc < data_chunk_size;
  1776. }
  1777. static void do_reclaim_sweep(const struct btrfs_fs_info *fs_info,
  1778. struct btrfs_space_info *space_info, int raid)
  1779. {
  1780. struct btrfs_block_group *bg;
  1781. int thresh_pct;
  1782. bool try_again = true;
  1783. bool urgent;
  1784. spin_lock(&space_info->lock);
  1785. urgent = is_reclaim_urgent(space_info);
  1786. thresh_pct = btrfs_calc_reclaim_threshold(space_info);
  1787. spin_unlock(&space_info->lock);
  1788. down_read(&space_info->groups_sem);
  1789. again:
  1790. list_for_each_entry(bg, &space_info->block_groups[raid], list) {
  1791. u64 thresh;
  1792. bool reclaim = false;
  1793. btrfs_get_block_group(bg);
  1794. spin_lock(&bg->lock);
  1795. thresh = mult_perc(bg->length, thresh_pct);
  1796. if (bg->used < thresh && bg->reclaim_mark) {
  1797. try_again = false;
  1798. reclaim = true;
  1799. }
  1800. bg->reclaim_mark++;
  1801. spin_unlock(&bg->lock);
  1802. if (reclaim)
  1803. btrfs_mark_bg_to_reclaim(bg);
  1804. btrfs_put_block_group(bg);
  1805. }
  1806. /*
  1807. * In situations where we are very motivated to reclaim (low unalloc)
  1808. * use two passes to make the reclaim mark check best effort.
  1809. *
  1810. * If we have any staler groups, we don't touch the fresher ones, but if we
  1811. * really need a block group, do take a fresh one.
  1812. */
  1813. if (try_again && urgent) {
  1814. try_again = false;
  1815. goto again;
  1816. }
  1817. up_read(&space_info->groups_sem);
  1818. }
  1819. void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes)
  1820. {
  1821. u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info);
  1822. lockdep_assert_held(&space_info->lock);
  1823. space_info->reclaimable_bytes += bytes;
  1824. if (space_info->reclaimable_bytes >= chunk_sz)
  1825. btrfs_set_periodic_reclaim_ready(space_info, true);
  1826. }
  1827. void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready)
  1828. {
  1829. lockdep_assert_held(&space_info->lock);
  1830. if (!READ_ONCE(space_info->periodic_reclaim))
  1831. return;
  1832. if (ready != space_info->periodic_reclaim_ready) {
  1833. space_info->periodic_reclaim_ready = ready;
  1834. if (!ready)
  1835. space_info->reclaimable_bytes = 0;
  1836. }
  1837. }
  1838. bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info)
  1839. {
  1840. bool ret;
  1841. if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
  1842. return false;
  1843. if (!READ_ONCE(space_info->periodic_reclaim))
  1844. return false;
  1845. spin_lock(&space_info->lock);
  1846. ret = space_info->periodic_reclaim_ready;
  1847. btrfs_set_periodic_reclaim_ready(space_info, false);
  1848. spin_unlock(&space_info->lock);
  1849. return ret;
  1850. }
  1851. void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
  1852. {
  1853. int raid;
  1854. struct btrfs_space_info *space_info;
  1855. list_for_each_entry(space_info, &fs_info->space_info, list) {
  1856. if (!btrfs_should_periodic_reclaim(space_info))
  1857. continue;
  1858. for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++)
  1859. do_reclaim_sweep(fs_info, space_info, raid);
  1860. }
  1861. }