resize.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/resize.c
  4. *
  5. * Support for resizing an ext4 filesystem while it is mounted.
  6. *
  7. * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
  8. *
  9. * This could probably be made into a module, because it is not often in use.
  10. */
  11. #define EXT4FS_DEBUG
  12. #include <linux/errno.h>
  13. #include <linux/slab.h>
  14. #include "ext4_jbd2.h"
  15. struct ext4_rcu_ptr {
  16. struct rcu_head rcu;
  17. void *ptr;
  18. };
  19. static void ext4_rcu_ptr_callback(struct rcu_head *head)
  20. {
  21. struct ext4_rcu_ptr *ptr;
  22. ptr = container_of(head, struct ext4_rcu_ptr, rcu);
  23. kvfree(ptr->ptr);
  24. kfree(ptr);
  25. }
  26. void ext4_kvfree_array_rcu(void *to_free)
  27. {
  28. struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
  29. if (ptr) {
  30. ptr->ptr = to_free;
  31. call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
  32. return;
  33. }
  34. synchronize_rcu();
  35. kvfree(to_free);
  36. }
  37. int ext4_resize_begin(struct super_block *sb)
  38. {
  39. struct ext4_sb_info *sbi = EXT4_SB(sb);
  40. int ret = 0;
  41. if (!capable(CAP_SYS_RESOURCE))
  42. return -EPERM;
  43. /*
  44. * If we are not using the primary superblock/GDT copy don't resize,
  45. * because the user tools have no way of handling this. Probably a
  46. * bad time to do it anyways.
  47. */
  48. if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
  49. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
  50. ext4_warning(sb, "won't resize using backup superblock at %llu",
  51. (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
  52. return -EPERM;
  53. }
  54. /*
  55. * We are not allowed to do online-resizing on a filesystem mounted
  56. * with error, because it can destroy the filesystem easily.
  57. */
  58. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  59. ext4_warning(sb, "There are errors in the filesystem, "
  60. "so online resizing is not allowed");
  61. return -EPERM;
  62. }
  63. if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
  64. &EXT4_SB(sb)->s_ext4_flags))
  65. ret = -EBUSY;
  66. return ret;
  67. }
  68. void ext4_resize_end(struct super_block *sb)
  69. {
  70. clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
  71. smp_mb__after_atomic();
  72. }
  73. static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
  74. ext4_group_t group) {
  75. return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
  76. EXT4_DESC_PER_BLOCK_BITS(sb);
  77. }
  78. static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
  79. ext4_group_t group) {
  80. group = ext4_meta_bg_first_group(sb, group);
  81. return ext4_group_first_block_no(sb, group);
  82. }
  83. static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
  84. ext4_group_t group) {
  85. ext4_grpblk_t overhead;
  86. overhead = ext4_bg_num_gdb(sb, group);
  87. if (ext4_bg_has_super(sb, group))
  88. overhead += 1 +
  89. le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  90. return overhead;
  91. }
  92. #define outside(b, first, last) ((b) < (first) || (b) >= (last))
  93. #define inside(b, first, last) ((b) >= (first) && (b) < (last))
  94. static int verify_group_input(struct super_block *sb,
  95. struct ext4_new_group_data *input)
  96. {
  97. struct ext4_sb_info *sbi = EXT4_SB(sb);
  98. struct ext4_super_block *es = sbi->s_es;
  99. ext4_fsblk_t start = ext4_blocks_count(es);
  100. ext4_fsblk_t end = start + input->blocks_count;
  101. ext4_group_t group = input->group;
  102. ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
  103. unsigned overhead;
  104. ext4_fsblk_t metaend;
  105. struct buffer_head *bh = NULL;
  106. ext4_grpblk_t free_blocks_count, offset;
  107. int err = -EINVAL;
  108. if (group != sbi->s_groups_count) {
  109. ext4_warning(sb, "Cannot add at group %u (only %u groups)",
  110. input->group, sbi->s_groups_count);
  111. return -EINVAL;
  112. }
  113. overhead = ext4_group_overhead_blocks(sb, group);
  114. metaend = start + overhead;
  115. input->free_clusters_count = free_blocks_count =
  116. input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
  117. if (test_opt(sb, DEBUG))
  118. printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
  119. "(%d free, %u reserved)\n",
  120. ext4_bg_has_super(sb, input->group) ? "normal" :
  121. "no-super", input->group, input->blocks_count,
  122. free_blocks_count, input->reserved_blocks);
  123. ext4_get_group_no_and_offset(sb, start, NULL, &offset);
  124. if (offset != 0)
  125. ext4_warning(sb, "Last group not full");
  126. else if (input->reserved_blocks > input->blocks_count / 5)
  127. ext4_warning(sb, "Reserved blocks too high (%u)",
  128. input->reserved_blocks);
  129. else if (free_blocks_count < 0)
  130. ext4_warning(sb, "Bad blocks count %u",
  131. input->blocks_count);
  132. else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
  133. err = PTR_ERR(bh);
  134. bh = NULL;
  135. ext4_warning(sb, "Cannot read last block (%llu)",
  136. end - 1);
  137. } else if (outside(input->block_bitmap, start, end))
  138. ext4_warning(sb, "Block bitmap not in group (block %llu)",
  139. (unsigned long long)input->block_bitmap);
  140. else if (outside(input->inode_bitmap, start, end))
  141. ext4_warning(sb, "Inode bitmap not in group (block %llu)",
  142. (unsigned long long)input->inode_bitmap);
  143. else if (outside(input->inode_table, start, end) ||
  144. outside(itend - 1, start, end))
  145. ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
  146. (unsigned long long)input->inode_table, itend - 1);
  147. else if (input->inode_bitmap == input->block_bitmap)
  148. ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
  149. (unsigned long long)input->block_bitmap);
  150. else if (inside(input->block_bitmap, input->inode_table, itend))
  151. ext4_warning(sb, "Block bitmap (%llu) in inode table "
  152. "(%llu-%llu)",
  153. (unsigned long long)input->block_bitmap,
  154. (unsigned long long)input->inode_table, itend - 1);
  155. else if (inside(input->inode_bitmap, input->inode_table, itend))
  156. ext4_warning(sb, "Inode bitmap (%llu) in inode table "
  157. "(%llu-%llu)",
  158. (unsigned long long)input->inode_bitmap,
  159. (unsigned long long)input->inode_table, itend - 1);
  160. else if (inside(input->block_bitmap, start, metaend))
  161. ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
  162. (unsigned long long)input->block_bitmap,
  163. start, metaend - 1);
  164. else if (inside(input->inode_bitmap, start, metaend))
  165. ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
  166. (unsigned long long)input->inode_bitmap,
  167. start, metaend - 1);
  168. else if (inside(input->inode_table, start, metaend) ||
  169. inside(itend - 1, start, metaend))
  170. ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
  171. "(%llu-%llu)",
  172. (unsigned long long)input->inode_table,
  173. itend - 1, start, metaend - 1);
  174. else
  175. err = 0;
  176. brelse(bh);
  177. return err;
  178. }
  179. /*
  180. * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
  181. * group each time.
  182. */
  183. struct ext4_new_flex_group_data {
  184. struct ext4_new_group_data *groups; /* new_group_data for groups
  185. in the flex group */
  186. __u16 *bg_flags; /* block group flags of groups
  187. in @groups */
  188. ext4_group_t count; /* number of groups in @groups
  189. */
  190. };
  191. /*
  192. * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
  193. * @flexbg_size.
  194. *
  195. * Returns NULL on failure otherwise address of the allocated structure.
  196. */
  197. static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
  198. {
  199. struct ext4_new_flex_group_data *flex_gd;
  200. flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
  201. if (flex_gd == NULL)
  202. goto out3;
  203. if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
  204. goto out2;
  205. flex_gd->count = flexbg_size;
  206. flex_gd->groups = kmalloc_array(flexbg_size,
  207. sizeof(struct ext4_new_group_data),
  208. GFP_NOFS);
  209. if (flex_gd->groups == NULL)
  210. goto out2;
  211. flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
  212. GFP_NOFS);
  213. if (flex_gd->bg_flags == NULL)
  214. goto out1;
  215. return flex_gd;
  216. out1:
  217. kfree(flex_gd->groups);
  218. out2:
  219. kfree(flex_gd);
  220. out3:
  221. return NULL;
  222. }
  223. static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
  224. {
  225. kfree(flex_gd->bg_flags);
  226. kfree(flex_gd->groups);
  227. kfree(flex_gd);
  228. }
  229. /*
  230. * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
  231. * and inode tables for a flex group.
  232. *
  233. * This function is used by 64bit-resize. Note that this function allocates
  234. * group tables from the 1st group of groups contained by @flexgd, which may
  235. * be a partial of a flex group.
  236. *
  237. * @sb: super block of fs to which the groups belongs
  238. *
  239. * Returns 0 on a successful allocation of the metadata blocks in the
  240. * block group.
  241. */
  242. static int ext4_alloc_group_tables(struct super_block *sb,
  243. struct ext4_new_flex_group_data *flex_gd,
  244. int flexbg_size)
  245. {
  246. struct ext4_new_group_data *group_data = flex_gd->groups;
  247. ext4_fsblk_t start_blk;
  248. ext4_fsblk_t last_blk;
  249. ext4_group_t src_group;
  250. ext4_group_t bb_index = 0;
  251. ext4_group_t ib_index = 0;
  252. ext4_group_t it_index = 0;
  253. ext4_group_t group;
  254. ext4_group_t last_group;
  255. unsigned overhead;
  256. __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
  257. int i;
  258. BUG_ON(flex_gd->count == 0 || group_data == NULL);
  259. src_group = group_data[0].group;
  260. last_group = src_group + flex_gd->count - 1;
  261. BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
  262. (last_group & ~(flexbg_size - 1))));
  263. next_group:
  264. group = group_data[0].group;
  265. if (src_group >= group_data[0].group + flex_gd->count)
  266. return -ENOSPC;
  267. start_blk = ext4_group_first_block_no(sb, src_group);
  268. last_blk = start_blk + group_data[src_group - group].blocks_count;
  269. overhead = ext4_group_overhead_blocks(sb, src_group);
  270. start_blk += overhead;
  271. /* We collect contiguous blocks as much as possible. */
  272. src_group++;
  273. for (; src_group <= last_group; src_group++) {
  274. overhead = ext4_group_overhead_blocks(sb, src_group);
  275. if (overhead == 0)
  276. last_blk += group_data[src_group - group].blocks_count;
  277. else
  278. break;
  279. }
  280. /* Allocate block bitmaps */
  281. for (; bb_index < flex_gd->count; bb_index++) {
  282. if (start_blk >= last_blk)
  283. goto next_group;
  284. group_data[bb_index].block_bitmap = start_blk++;
  285. group = ext4_get_group_number(sb, start_blk - 1);
  286. group -= group_data[0].group;
  287. group_data[group].mdata_blocks++;
  288. flex_gd->bg_flags[group] &= uninit_mask;
  289. }
  290. /* Allocate inode bitmaps */
  291. for (; ib_index < flex_gd->count; ib_index++) {
  292. if (start_blk >= last_blk)
  293. goto next_group;
  294. group_data[ib_index].inode_bitmap = start_blk++;
  295. group = ext4_get_group_number(sb, start_blk - 1);
  296. group -= group_data[0].group;
  297. group_data[group].mdata_blocks++;
  298. flex_gd->bg_flags[group] &= uninit_mask;
  299. }
  300. /* Allocate inode tables */
  301. for (; it_index < flex_gd->count; it_index++) {
  302. unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
  303. ext4_fsblk_t next_group_start;
  304. if (start_blk + itb > last_blk)
  305. goto next_group;
  306. group_data[it_index].inode_table = start_blk;
  307. group = ext4_get_group_number(sb, start_blk);
  308. next_group_start = ext4_group_first_block_no(sb, group + 1);
  309. group -= group_data[0].group;
  310. if (start_blk + itb > next_group_start) {
  311. flex_gd->bg_flags[group + 1] &= uninit_mask;
  312. overhead = start_blk + itb - next_group_start;
  313. group_data[group + 1].mdata_blocks += overhead;
  314. itb -= overhead;
  315. }
  316. group_data[group].mdata_blocks += itb;
  317. flex_gd->bg_flags[group] &= uninit_mask;
  318. start_blk += EXT4_SB(sb)->s_itb_per_group;
  319. }
  320. /* Update free clusters count to exclude metadata blocks */
  321. for (i = 0; i < flex_gd->count; i++) {
  322. group_data[i].free_clusters_count -=
  323. EXT4_NUM_B2C(EXT4_SB(sb),
  324. group_data[i].mdata_blocks);
  325. }
  326. if (test_opt(sb, DEBUG)) {
  327. int i;
  328. group = group_data[0].group;
  329. printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
  330. "%d groups, flexbg size is %d:\n", flex_gd->count,
  331. flexbg_size);
  332. for (i = 0; i < flex_gd->count; i++) {
  333. ext4_debug(
  334. "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
  335. ext4_bg_has_super(sb, group + i) ? "normal" :
  336. "no-super", group + i,
  337. group_data[i].blocks_count,
  338. group_data[i].free_clusters_count,
  339. group_data[i].mdata_blocks);
  340. }
  341. }
  342. return 0;
  343. }
  344. static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
  345. ext4_fsblk_t blk)
  346. {
  347. struct buffer_head *bh;
  348. int err;
  349. bh = sb_getblk(sb, blk);
  350. if (unlikely(!bh))
  351. return ERR_PTR(-ENOMEM);
  352. BUFFER_TRACE(bh, "get_write_access");
  353. if ((err = ext4_journal_get_write_access(handle, bh))) {
  354. brelse(bh);
  355. bh = ERR_PTR(err);
  356. } else {
  357. memset(bh->b_data, 0, sb->s_blocksize);
  358. set_buffer_uptodate(bh);
  359. }
  360. return bh;
  361. }
  362. /*
  363. * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
  364. * If that fails, restart the transaction & regain write access for the
  365. * buffer head which is used for block_bitmap modifications.
  366. */
  367. static int extend_or_restart_transaction(handle_t *handle, int thresh)
  368. {
  369. int err;
  370. if (ext4_handle_has_enough_credits(handle, thresh))
  371. return 0;
  372. err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
  373. if (err < 0)
  374. return err;
  375. if (err) {
  376. err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
  377. if (err)
  378. return err;
  379. }
  380. return 0;
  381. }
  382. /*
  383. * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
  384. *
  385. * Helper function for ext4_setup_new_group_blocks() which set .
  386. *
  387. * @sb: super block
  388. * @handle: journal handle
  389. * @flex_gd: flex group data
  390. */
  391. static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
  392. struct ext4_new_flex_group_data *flex_gd,
  393. ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
  394. {
  395. struct ext4_sb_info *sbi = EXT4_SB(sb);
  396. ext4_group_t count = last_cluster - first_cluster + 1;
  397. ext4_group_t count2;
  398. ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
  399. last_cluster);
  400. for (count2 = count; count > 0;
  401. count -= count2, first_cluster += count2) {
  402. ext4_fsblk_t start;
  403. struct buffer_head *bh;
  404. ext4_group_t group;
  405. int err;
  406. group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
  407. start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
  408. group -= flex_gd->groups[0].group;
  409. count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
  410. if (count2 > count)
  411. count2 = count;
  412. if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
  413. BUG_ON(flex_gd->count > 1);
  414. continue;
  415. }
  416. err = extend_or_restart_transaction(handle, 1);
  417. if (err)
  418. return err;
  419. bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
  420. if (unlikely(!bh))
  421. return -ENOMEM;
  422. BUFFER_TRACE(bh, "get_write_access");
  423. err = ext4_journal_get_write_access(handle, bh);
  424. if (err) {
  425. brelse(bh);
  426. return err;
  427. }
  428. ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
  429. first_cluster, first_cluster - start, count2);
  430. ext4_set_bits(bh->b_data, first_cluster - start, count2);
  431. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  432. brelse(bh);
  433. if (unlikely(err))
  434. return err;
  435. }
  436. return 0;
  437. }
  438. /*
  439. * Set up the block and inode bitmaps, and the inode table for the new groups.
  440. * This doesn't need to be part of the main transaction, since we are only
  441. * changing blocks outside the actual filesystem. We still do journaling to
  442. * ensure the recovery is correct in case of a failure just after resize.
  443. * If any part of this fails, we simply abort the resize.
  444. *
  445. * setup_new_flex_group_blocks handles a flex group as follow:
  446. * 1. copy super block and GDT, and initialize group tables if necessary.
  447. * In this step, we only set bits in blocks bitmaps for blocks taken by
  448. * super block and GDT.
  449. * 2. allocate group tables in block bitmaps, that is, set bits in block
  450. * bitmap for blocks taken by group tables.
  451. */
  452. static int setup_new_flex_group_blocks(struct super_block *sb,
  453. struct ext4_new_flex_group_data *flex_gd)
  454. {
  455. int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
  456. ext4_fsblk_t start;
  457. ext4_fsblk_t block;
  458. struct ext4_sb_info *sbi = EXT4_SB(sb);
  459. struct ext4_super_block *es = sbi->s_es;
  460. struct ext4_new_group_data *group_data = flex_gd->groups;
  461. __u16 *bg_flags = flex_gd->bg_flags;
  462. handle_t *handle;
  463. ext4_group_t group, count;
  464. struct buffer_head *bh = NULL;
  465. int reserved_gdb, i, j, err = 0, err2;
  466. int meta_bg;
  467. BUG_ON(!flex_gd->count || !group_data ||
  468. group_data[0].group != sbi->s_groups_count);
  469. reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
  470. meta_bg = ext4_has_feature_meta_bg(sb);
  471. /* This transaction may be extended/restarted along the way */
  472. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
  473. if (IS_ERR(handle))
  474. return PTR_ERR(handle);
  475. group = group_data[0].group;
  476. for (i = 0; i < flex_gd->count; i++, group++) {
  477. unsigned long gdblocks;
  478. ext4_grpblk_t overhead;
  479. gdblocks = ext4_bg_num_gdb(sb, group);
  480. start = ext4_group_first_block_no(sb, group);
  481. if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
  482. goto handle_itb;
  483. if (meta_bg == 1) {
  484. ext4_group_t first_group;
  485. first_group = ext4_meta_bg_first_group(sb, group);
  486. if (first_group != group + 1 &&
  487. first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
  488. goto handle_itb;
  489. }
  490. block = start + ext4_bg_has_super(sb, group);
  491. /* Copy all of the GDT blocks into the backup in this group */
  492. for (j = 0; j < gdblocks; j++, block++) {
  493. struct buffer_head *gdb;
  494. ext4_debug("update backup group %#04llx\n", block);
  495. err = extend_or_restart_transaction(handle, 1);
  496. if (err)
  497. goto out;
  498. gdb = sb_getblk(sb, block);
  499. if (unlikely(!gdb)) {
  500. err = -ENOMEM;
  501. goto out;
  502. }
  503. BUFFER_TRACE(gdb, "get_write_access");
  504. err = ext4_journal_get_write_access(handle, gdb);
  505. if (err) {
  506. brelse(gdb);
  507. goto out;
  508. }
  509. memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
  510. s_group_desc, j)->b_data, gdb->b_size);
  511. set_buffer_uptodate(gdb);
  512. err = ext4_handle_dirty_metadata(handle, NULL, gdb);
  513. if (unlikely(err)) {
  514. brelse(gdb);
  515. goto out;
  516. }
  517. brelse(gdb);
  518. }
  519. /* Zero out all of the reserved backup group descriptor
  520. * table blocks
  521. */
  522. if (ext4_bg_has_super(sb, group)) {
  523. err = sb_issue_zeroout(sb, gdblocks + start + 1,
  524. reserved_gdb, GFP_NOFS);
  525. if (err)
  526. goto out;
  527. }
  528. handle_itb:
  529. /* Initialize group tables of the grop @group */
  530. if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
  531. goto handle_bb;
  532. /* Zero out all of the inode table blocks */
  533. block = group_data[i].inode_table;
  534. ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
  535. block, sbi->s_itb_per_group);
  536. err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
  537. GFP_NOFS);
  538. if (err)
  539. goto out;
  540. handle_bb:
  541. if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
  542. goto handle_ib;
  543. /* Initialize block bitmap of the @group */
  544. block = group_data[i].block_bitmap;
  545. err = extend_or_restart_transaction(handle, 1);
  546. if (err)
  547. goto out;
  548. bh = bclean(handle, sb, block);
  549. if (IS_ERR(bh)) {
  550. err = PTR_ERR(bh);
  551. goto out;
  552. }
  553. overhead = ext4_group_overhead_blocks(sb, group);
  554. if (overhead != 0) {
  555. ext4_debug("mark backup superblock %#04llx (+0)\n",
  556. start);
  557. ext4_set_bits(bh->b_data, 0,
  558. EXT4_NUM_B2C(sbi, overhead));
  559. }
  560. ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
  561. sb->s_blocksize * 8, bh->b_data);
  562. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  563. brelse(bh);
  564. if (err)
  565. goto out;
  566. handle_ib:
  567. if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
  568. continue;
  569. /* Initialize inode bitmap of the @group */
  570. block = group_data[i].inode_bitmap;
  571. err = extend_or_restart_transaction(handle, 1);
  572. if (err)
  573. goto out;
  574. /* Mark unused entries in inode bitmap used */
  575. bh = bclean(handle, sb, block);
  576. if (IS_ERR(bh)) {
  577. err = PTR_ERR(bh);
  578. goto out;
  579. }
  580. ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
  581. sb->s_blocksize * 8, bh->b_data);
  582. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  583. brelse(bh);
  584. if (err)
  585. goto out;
  586. }
  587. /* Mark group tables in block bitmap */
  588. for (j = 0; j < GROUP_TABLE_COUNT; j++) {
  589. count = group_table_count[j];
  590. start = (&group_data[0].block_bitmap)[j];
  591. block = start;
  592. for (i = 1; i < flex_gd->count; i++) {
  593. block += group_table_count[j];
  594. if (block == (&group_data[i].block_bitmap)[j]) {
  595. count += group_table_count[j];
  596. continue;
  597. }
  598. err = set_flexbg_block_bitmap(sb, handle,
  599. flex_gd,
  600. EXT4_B2C(sbi, start),
  601. EXT4_B2C(sbi,
  602. start + count
  603. - 1));
  604. if (err)
  605. goto out;
  606. count = group_table_count[j];
  607. start = (&group_data[i].block_bitmap)[j];
  608. block = start;
  609. }
  610. if (count) {
  611. err = set_flexbg_block_bitmap(sb, handle,
  612. flex_gd,
  613. EXT4_B2C(sbi, start),
  614. EXT4_B2C(sbi,
  615. start + count
  616. - 1));
  617. if (err)
  618. goto out;
  619. }
  620. }
  621. out:
  622. err2 = ext4_journal_stop(handle);
  623. if (err2 && !err)
  624. err = err2;
  625. return err;
  626. }
  627. /*
  628. * Iterate through the groups which hold BACKUP superblock/GDT copies in an
  629. * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
  630. * calling this for the first time. In a sparse filesystem it will be the
  631. * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
  632. * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
  633. */
  634. static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
  635. unsigned *five, unsigned *seven)
  636. {
  637. unsigned *min = three;
  638. int mult = 3;
  639. unsigned ret;
  640. if (!ext4_has_feature_sparse_super(sb)) {
  641. ret = *min;
  642. *min += 1;
  643. return ret;
  644. }
  645. if (*five < *min) {
  646. min = five;
  647. mult = 5;
  648. }
  649. if (*seven < *min) {
  650. min = seven;
  651. mult = 7;
  652. }
  653. ret = *min;
  654. *min *= mult;
  655. return ret;
  656. }
  657. /*
  658. * Check that all of the backup GDT blocks are held in the primary GDT block.
  659. * It is assumed that they are stored in group order. Returns the number of
  660. * groups in current filesystem that have BACKUPS, or -ve error code.
  661. */
  662. static int verify_reserved_gdb(struct super_block *sb,
  663. ext4_group_t end,
  664. struct buffer_head *primary)
  665. {
  666. const ext4_fsblk_t blk = primary->b_blocknr;
  667. unsigned three = 1;
  668. unsigned five = 5;
  669. unsigned seven = 7;
  670. unsigned grp;
  671. __le32 *p = (__le32 *)primary->b_data;
  672. int gdbackups = 0;
  673. while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
  674. if (le32_to_cpu(*p++) !=
  675. grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
  676. ext4_warning(sb, "reserved GDT %llu"
  677. " missing grp %d (%llu)",
  678. blk, grp,
  679. grp *
  680. (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
  681. blk);
  682. return -EINVAL;
  683. }
  684. if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
  685. return -EFBIG;
  686. }
  687. return gdbackups;
  688. }
  689. /*
  690. * Called when we need to bring a reserved group descriptor table block into
  691. * use from the resize inode. The primary copy of the new GDT block currently
  692. * is an indirect block (under the double indirect block in the resize inode).
  693. * The new backup GDT blocks will be stored as leaf blocks in this indirect
  694. * block, in group order. Even though we know all the block numbers we need,
  695. * we check to ensure that the resize inode has actually reserved these blocks.
  696. *
  697. * Don't need to update the block bitmaps because the blocks are still in use.
  698. *
  699. * We get all of the error cases out of the way, so that we are sure to not
  700. * fail once we start modifying the data on disk, because JBD has no rollback.
  701. */
  702. static int add_new_gdb(handle_t *handle, struct inode *inode,
  703. ext4_group_t group)
  704. {
  705. struct super_block *sb = inode->i_sb;
  706. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  707. unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  708. ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
  709. struct buffer_head **o_group_desc, **n_group_desc = NULL;
  710. struct buffer_head *dind = NULL;
  711. struct buffer_head *gdb_bh = NULL;
  712. int gdbackups;
  713. struct ext4_iloc iloc = { .bh = NULL };
  714. __le32 *data;
  715. int err;
  716. if (test_opt(sb, DEBUG))
  717. printk(KERN_DEBUG
  718. "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
  719. gdb_num);
  720. gdb_bh = ext4_sb_bread(sb, gdblock, 0);
  721. if (IS_ERR(gdb_bh))
  722. return PTR_ERR(gdb_bh);
  723. gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
  724. if (gdbackups < 0) {
  725. err = gdbackups;
  726. goto errout;
  727. }
  728. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  729. dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
  730. if (IS_ERR(dind)) {
  731. err = PTR_ERR(dind);
  732. dind = NULL;
  733. goto errout;
  734. }
  735. data = (__le32 *)dind->b_data;
  736. if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
  737. ext4_warning(sb, "new group %u GDT block %llu not reserved",
  738. group, gdblock);
  739. err = -EINVAL;
  740. goto errout;
  741. }
  742. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  743. err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
  744. if (unlikely(err))
  745. goto errout;
  746. BUFFER_TRACE(gdb_bh, "get_write_access");
  747. err = ext4_journal_get_write_access(handle, gdb_bh);
  748. if (unlikely(err))
  749. goto errout;
  750. BUFFER_TRACE(dind, "get_write_access");
  751. err = ext4_journal_get_write_access(handle, dind);
  752. if (unlikely(err)) {
  753. ext4_std_error(sb, err);
  754. goto errout;
  755. }
  756. /* ext4_reserve_inode_write() gets a reference on the iloc */
  757. err = ext4_reserve_inode_write(handle, inode, &iloc);
  758. if (unlikely(err))
  759. goto errout;
  760. n_group_desc = ext4_kvmalloc((gdb_num + 1) *
  761. sizeof(struct buffer_head *),
  762. GFP_NOFS);
  763. if (!n_group_desc) {
  764. err = -ENOMEM;
  765. ext4_warning(sb, "not enough memory for %lu groups",
  766. gdb_num + 1);
  767. goto errout;
  768. }
  769. /*
  770. * Finally, we have all of the possible failures behind us...
  771. *
  772. * Remove new GDT block from inode double-indirect block and clear out
  773. * the new GDT block for use (which also "frees" the backup GDT blocks
  774. * from the reserved inode). We don't need to change the bitmaps for
  775. * these blocks, because they are marked as in-use from being in the
  776. * reserved inode, and will become GDT blocks (primary and backup).
  777. */
  778. data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
  779. err = ext4_handle_dirty_metadata(handle, NULL, dind);
  780. if (unlikely(err)) {
  781. ext4_std_error(sb, err);
  782. goto errout;
  783. }
  784. inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
  785. (9 - EXT4_SB(sb)->s_cluster_bits);
  786. ext4_mark_iloc_dirty(handle, inode, &iloc);
  787. memset(gdb_bh->b_data, 0, sb->s_blocksize);
  788. err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
  789. if (unlikely(err)) {
  790. ext4_std_error(sb, err);
  791. iloc.bh = NULL;
  792. goto errout;
  793. }
  794. brelse(dind);
  795. rcu_read_lock();
  796. o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
  797. memcpy(n_group_desc, o_group_desc,
  798. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  799. rcu_read_unlock();
  800. n_group_desc[gdb_num] = gdb_bh;
  801. rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
  802. EXT4_SB(sb)->s_gdb_count++;
  803. ext4_kvfree_array_rcu(o_group_desc);
  804. le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
  805. err = ext4_handle_dirty_super(handle, sb);
  806. if (err)
  807. ext4_std_error(sb, err);
  808. return err;
  809. errout:
  810. kvfree(n_group_desc);
  811. brelse(iloc.bh);
  812. brelse(dind);
  813. brelse(gdb_bh);
  814. ext4_debug("leaving with error %d\n", err);
  815. return err;
  816. }
  817. /*
  818. * add_new_gdb_meta_bg is the sister of add_new_gdb.
  819. */
  820. static int add_new_gdb_meta_bg(struct super_block *sb,
  821. handle_t *handle, ext4_group_t group) {
  822. ext4_fsblk_t gdblock;
  823. struct buffer_head *gdb_bh;
  824. struct buffer_head **o_group_desc, **n_group_desc;
  825. unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  826. int err;
  827. gdblock = ext4_meta_bg_first_block_no(sb, group) +
  828. ext4_bg_has_super(sb, group);
  829. gdb_bh = ext4_sb_bread(sb, gdblock, 0);
  830. if (IS_ERR(gdb_bh))
  831. return PTR_ERR(gdb_bh);
  832. n_group_desc = ext4_kvmalloc((gdb_num + 1) *
  833. sizeof(struct buffer_head *),
  834. GFP_NOFS);
  835. if (!n_group_desc) {
  836. brelse(gdb_bh);
  837. err = -ENOMEM;
  838. ext4_warning(sb, "not enough memory for %lu groups",
  839. gdb_num + 1);
  840. return err;
  841. }
  842. rcu_read_lock();
  843. o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
  844. memcpy(n_group_desc, o_group_desc,
  845. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  846. rcu_read_unlock();
  847. n_group_desc[gdb_num] = gdb_bh;
  848. BUFFER_TRACE(gdb_bh, "get_write_access");
  849. err = ext4_journal_get_write_access(handle, gdb_bh);
  850. if (err) {
  851. kvfree(n_group_desc);
  852. brelse(gdb_bh);
  853. return err;
  854. }
  855. rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
  856. EXT4_SB(sb)->s_gdb_count++;
  857. ext4_kvfree_array_rcu(o_group_desc);
  858. return err;
  859. }
  860. /*
  861. * Called when we are adding a new group which has a backup copy of each of
  862. * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
  863. * We need to add these reserved backup GDT blocks to the resize inode, so
  864. * that they are kept for future resizing and not allocated to files.
  865. *
  866. * Each reserved backup GDT block will go into a different indirect block.
  867. * The indirect blocks are actually the primary reserved GDT blocks,
  868. * so we know in advance what their block numbers are. We only get the
  869. * double-indirect block to verify it is pointing to the primary reserved
  870. * GDT blocks so we don't overwrite a data block by accident. The reserved
  871. * backup GDT blocks are stored in their reserved primary GDT block.
  872. */
  873. static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
  874. ext4_group_t group)
  875. {
  876. struct super_block *sb = inode->i_sb;
  877. int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  878. int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
  879. struct buffer_head **primary;
  880. struct buffer_head *dind;
  881. struct ext4_iloc iloc;
  882. ext4_fsblk_t blk;
  883. __le32 *data, *end;
  884. int gdbackups = 0;
  885. int res, i;
  886. int err;
  887. primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
  888. if (!primary)
  889. return -ENOMEM;
  890. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  891. dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
  892. if (IS_ERR(dind)) {
  893. err = PTR_ERR(dind);
  894. dind = NULL;
  895. goto exit_free;
  896. }
  897. blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
  898. data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
  899. EXT4_ADDR_PER_BLOCK(sb));
  900. end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
  901. /* Get each reserved primary GDT block and verify it holds backups */
  902. for (res = 0; res < reserved_gdb; res++, blk++) {
  903. if (le32_to_cpu(*data) != blk) {
  904. ext4_warning(sb, "reserved block %llu"
  905. " not at offset %ld",
  906. blk,
  907. (long)(data - (__le32 *)dind->b_data));
  908. err = -EINVAL;
  909. goto exit_bh;
  910. }
  911. primary[res] = ext4_sb_bread(sb, blk, 0);
  912. if (IS_ERR(primary[res])) {
  913. err = PTR_ERR(primary[res]);
  914. primary[res] = NULL;
  915. goto exit_bh;
  916. }
  917. gdbackups = verify_reserved_gdb(sb, group, primary[res]);
  918. if (gdbackups < 0) {
  919. brelse(primary[res]);
  920. err = gdbackups;
  921. goto exit_bh;
  922. }
  923. if (++data >= end)
  924. data = (__le32 *)dind->b_data;
  925. }
  926. for (i = 0; i < reserved_gdb; i++) {
  927. BUFFER_TRACE(primary[i], "get_write_access");
  928. if ((err = ext4_journal_get_write_access(handle, primary[i])))
  929. goto exit_bh;
  930. }
  931. if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
  932. goto exit_bh;
  933. /*
  934. * Finally we can add each of the reserved backup GDT blocks from
  935. * the new group to its reserved primary GDT block.
  936. */
  937. blk = group * EXT4_BLOCKS_PER_GROUP(sb);
  938. for (i = 0; i < reserved_gdb; i++) {
  939. int err2;
  940. data = (__le32 *)primary[i]->b_data;
  941. /* printk("reserving backup %lu[%u] = %lu\n",
  942. primary[i]->b_blocknr, gdbackups,
  943. blk + primary[i]->b_blocknr); */
  944. data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
  945. err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
  946. if (!err)
  947. err = err2;
  948. }
  949. inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
  950. ext4_mark_iloc_dirty(handle, inode, &iloc);
  951. exit_bh:
  952. while (--res >= 0)
  953. brelse(primary[res]);
  954. brelse(dind);
  955. exit_free:
  956. kfree(primary);
  957. return err;
  958. }
  959. /*
  960. * Update the backup copies of the ext4 metadata. These don't need to be part
  961. * of the main resize transaction, because e2fsck will re-write them if there
  962. * is a problem (basically only OOM will cause a problem). However, we
  963. * _should_ update the backups if possible, in case the primary gets trashed
  964. * for some reason and we need to run e2fsck from a backup superblock. The
  965. * important part is that the new block and inode counts are in the backup
  966. * superblocks, and the location of the new group metadata in the GDT backups.
  967. *
  968. * We do not need take the s_resize_lock for this, because these
  969. * blocks are not otherwise touched by the filesystem code when it is
  970. * mounted. We don't need to worry about last changing from
  971. * sbi->s_groups_count, because the worst that can happen is that we
  972. * do not copy the full number of backups at this time. The resize
  973. * which changed s_groups_count will backup again.
  974. */
  975. static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
  976. int size, int meta_bg)
  977. {
  978. struct ext4_sb_info *sbi = EXT4_SB(sb);
  979. ext4_group_t last;
  980. const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
  981. unsigned three = 1;
  982. unsigned five = 5;
  983. unsigned seven = 7;
  984. ext4_group_t group = 0;
  985. int rest = sb->s_blocksize - size;
  986. handle_t *handle;
  987. int err = 0, err2;
  988. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
  989. if (IS_ERR(handle)) {
  990. group = 1;
  991. err = PTR_ERR(handle);
  992. goto exit_err;
  993. }
  994. if (meta_bg == 0) {
  995. group = ext4_list_backups(sb, &three, &five, &seven);
  996. last = sbi->s_groups_count;
  997. } else {
  998. group = ext4_get_group_number(sb, blk_off) + 1;
  999. last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
  1000. }
  1001. while (group < sbi->s_groups_count) {
  1002. struct buffer_head *bh;
  1003. ext4_fsblk_t backup_block;
  1004. /* Out of journal space, and can't get more - abort - so sad */
  1005. if (ext4_handle_valid(handle) &&
  1006. handle->h_buffer_credits == 0 &&
  1007. ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
  1008. (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
  1009. break;
  1010. if (meta_bg == 0)
  1011. backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
  1012. else
  1013. backup_block = (ext4_group_first_block_no(sb, group) +
  1014. ext4_bg_has_super(sb, group));
  1015. bh = sb_getblk(sb, backup_block);
  1016. if (unlikely(!bh)) {
  1017. err = -ENOMEM;
  1018. break;
  1019. }
  1020. ext4_debug("update metadata backup %llu(+%llu)\n",
  1021. backup_block, backup_block -
  1022. ext4_group_first_block_no(sb, group));
  1023. BUFFER_TRACE(bh, "get_write_access");
  1024. if ((err = ext4_journal_get_write_access(handle, bh))) {
  1025. brelse(bh);
  1026. break;
  1027. }
  1028. lock_buffer(bh);
  1029. memcpy(bh->b_data, data, size);
  1030. if (rest)
  1031. memset(bh->b_data + size, 0, rest);
  1032. set_buffer_uptodate(bh);
  1033. unlock_buffer(bh);
  1034. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  1035. if (unlikely(err))
  1036. ext4_std_error(sb, err);
  1037. brelse(bh);
  1038. if (meta_bg == 0)
  1039. group = ext4_list_backups(sb, &three, &five, &seven);
  1040. else if (group == last)
  1041. break;
  1042. else
  1043. group = last;
  1044. }
  1045. if ((err2 = ext4_journal_stop(handle)) && !err)
  1046. err = err2;
  1047. /*
  1048. * Ugh! Need to have e2fsck write the backup copies. It is too
  1049. * late to revert the resize, we shouldn't fail just because of
  1050. * the backup copies (they are only needed in case of corruption).
  1051. *
  1052. * However, if we got here we have a journal problem too, so we
  1053. * can't really start a transaction to mark the superblock.
  1054. * Chicken out and just set the flag on the hope it will be written
  1055. * to disk, and if not - we will simply wait until next fsck.
  1056. */
  1057. exit_err:
  1058. if (err) {
  1059. ext4_warning(sb, "can't update backup for group %u (err %d), "
  1060. "forcing fsck on next reboot", group, err);
  1061. sbi->s_mount_state &= ~EXT4_VALID_FS;
  1062. sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1063. mark_buffer_dirty(sbi->s_sbh);
  1064. }
  1065. }
  1066. /*
  1067. * ext4_add_new_descs() adds @count group descriptor of groups
  1068. * starting at @group
  1069. *
  1070. * @handle: journal handle
  1071. * @sb: super block
  1072. * @group: the group no. of the first group desc to be added
  1073. * @resize_inode: the resize inode
  1074. * @count: number of group descriptors to be added
  1075. */
  1076. static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
  1077. ext4_group_t group, struct inode *resize_inode,
  1078. ext4_group_t count)
  1079. {
  1080. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1081. struct ext4_super_block *es = sbi->s_es;
  1082. struct buffer_head *gdb_bh;
  1083. int i, gdb_off, gdb_num, err = 0;
  1084. int meta_bg;
  1085. meta_bg = ext4_has_feature_meta_bg(sb);
  1086. for (i = 0; i < count; i++, group++) {
  1087. int reserved_gdb = ext4_bg_has_super(sb, group) ?
  1088. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  1089. gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
  1090. gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1091. /*
  1092. * We will only either add reserved group blocks to a backup group
  1093. * or remove reserved blocks for the first group in a new group block.
  1094. * Doing both would be mean more complex code, and sane people don't
  1095. * use non-sparse filesystems anymore. This is already checked above.
  1096. */
  1097. if (gdb_off) {
  1098. gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
  1099. gdb_num);
  1100. BUFFER_TRACE(gdb_bh, "get_write_access");
  1101. err = ext4_journal_get_write_access(handle, gdb_bh);
  1102. if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
  1103. err = reserve_backup_gdb(handle, resize_inode, group);
  1104. } else if (meta_bg != 0) {
  1105. err = add_new_gdb_meta_bg(sb, handle, group);
  1106. } else {
  1107. err = add_new_gdb(handle, resize_inode, group);
  1108. }
  1109. if (err)
  1110. break;
  1111. }
  1112. return err;
  1113. }
  1114. static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
  1115. {
  1116. struct buffer_head *bh = sb_getblk(sb, block);
  1117. if (unlikely(!bh))
  1118. return NULL;
  1119. if (!bh_uptodate_or_lock(bh)) {
  1120. if (bh_submit_read(bh) < 0) {
  1121. brelse(bh);
  1122. return NULL;
  1123. }
  1124. }
  1125. return bh;
  1126. }
  1127. static int ext4_set_bitmap_checksums(struct super_block *sb,
  1128. ext4_group_t group,
  1129. struct ext4_group_desc *gdp,
  1130. struct ext4_new_group_data *group_data)
  1131. {
  1132. struct buffer_head *bh;
  1133. if (!ext4_has_metadata_csum(sb))
  1134. return 0;
  1135. bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
  1136. if (!bh)
  1137. return -EIO;
  1138. ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
  1139. EXT4_INODES_PER_GROUP(sb) / 8);
  1140. brelse(bh);
  1141. bh = ext4_get_bitmap(sb, group_data->block_bitmap);
  1142. if (!bh)
  1143. return -EIO;
  1144. ext4_block_bitmap_csum_set(sb, group, gdp, bh);
  1145. brelse(bh);
  1146. return 0;
  1147. }
  1148. /*
  1149. * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
  1150. */
  1151. static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
  1152. struct ext4_new_flex_group_data *flex_gd)
  1153. {
  1154. struct ext4_new_group_data *group_data = flex_gd->groups;
  1155. struct ext4_group_desc *gdp;
  1156. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1157. struct buffer_head *gdb_bh;
  1158. ext4_group_t group;
  1159. __u16 *bg_flags = flex_gd->bg_flags;
  1160. int i, gdb_off, gdb_num, err = 0;
  1161. for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
  1162. group = group_data->group;
  1163. gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
  1164. gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1165. /*
  1166. * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
  1167. */
  1168. gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
  1169. /* Update group descriptor block for new group */
  1170. gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
  1171. gdb_off * EXT4_DESC_SIZE(sb));
  1172. memset(gdp, 0, EXT4_DESC_SIZE(sb));
  1173. ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
  1174. ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
  1175. err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
  1176. if (err) {
  1177. ext4_std_error(sb, err);
  1178. break;
  1179. }
  1180. ext4_inode_table_set(sb, gdp, group_data->inode_table);
  1181. ext4_free_group_clusters_set(sb, gdp,
  1182. group_data->free_clusters_count);
  1183. ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
  1184. if (ext4_has_group_desc_csum(sb))
  1185. ext4_itable_unused_set(sb, gdp,
  1186. EXT4_INODES_PER_GROUP(sb));
  1187. gdp->bg_flags = cpu_to_le16(*bg_flags);
  1188. ext4_group_desc_csum_set(sb, group, gdp);
  1189. err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
  1190. if (unlikely(err)) {
  1191. ext4_std_error(sb, err);
  1192. break;
  1193. }
  1194. /*
  1195. * We can allocate memory for mb_alloc based on the new group
  1196. * descriptor
  1197. */
  1198. err = ext4_mb_add_groupinfo(sb, group, gdp);
  1199. if (err)
  1200. break;
  1201. }
  1202. return err;
  1203. }
  1204. /*
  1205. * ext4_update_super() updates the super block so that the newly added
  1206. * groups can be seen by the filesystem.
  1207. *
  1208. * @sb: super block
  1209. * @flex_gd: new added groups
  1210. */
  1211. static void ext4_update_super(struct super_block *sb,
  1212. struct ext4_new_flex_group_data *flex_gd)
  1213. {
  1214. ext4_fsblk_t blocks_count = 0;
  1215. ext4_fsblk_t free_blocks = 0;
  1216. ext4_fsblk_t reserved_blocks = 0;
  1217. struct ext4_new_group_data *group_data = flex_gd->groups;
  1218. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1219. struct ext4_super_block *es = sbi->s_es;
  1220. int i;
  1221. BUG_ON(flex_gd->count == 0 || group_data == NULL);
  1222. /*
  1223. * Make the new blocks and inodes valid next. We do this before
  1224. * increasing the group count so that once the group is enabled,
  1225. * all of its blocks and inodes are already valid.
  1226. *
  1227. * We always allocate group-by-group, then block-by-block or
  1228. * inode-by-inode within a group, so enabling these
  1229. * blocks/inodes before the group is live won't actually let us
  1230. * allocate the new space yet.
  1231. */
  1232. for (i = 0; i < flex_gd->count; i++) {
  1233. blocks_count += group_data[i].blocks_count;
  1234. free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
  1235. }
  1236. reserved_blocks = ext4_r_blocks_count(es) * 100;
  1237. reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
  1238. reserved_blocks *= blocks_count;
  1239. do_div(reserved_blocks, 100);
  1240. ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
  1241. ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
  1242. le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
  1243. flex_gd->count);
  1244. le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
  1245. flex_gd->count);
  1246. ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
  1247. /*
  1248. * We need to protect s_groups_count against other CPUs seeing
  1249. * inconsistent state in the superblock.
  1250. *
  1251. * The precise rules we use are:
  1252. *
  1253. * * Writers must perform a smp_wmb() after updating all
  1254. * dependent data and before modifying the groups count
  1255. *
  1256. * * Readers must perform an smp_rmb() after reading the groups
  1257. * count and before reading any dependent data.
  1258. *
  1259. * NB. These rules can be relaxed when checking the group count
  1260. * while freeing data, as we can only allocate from a block
  1261. * group after serialising against the group count, and we can
  1262. * only then free after serialising in turn against that
  1263. * allocation.
  1264. */
  1265. smp_wmb();
  1266. /* Update the global fs size fields */
  1267. sbi->s_groups_count += flex_gd->count;
  1268. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  1269. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  1270. /* Update the reserved block counts only once the new group is
  1271. * active. */
  1272. ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
  1273. reserved_blocks);
  1274. /* Update the free space counts */
  1275. percpu_counter_add(&sbi->s_freeclusters_counter,
  1276. EXT4_NUM_B2C(sbi, free_blocks));
  1277. percpu_counter_add(&sbi->s_freeinodes_counter,
  1278. EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
  1279. ext4_debug("free blocks count %llu",
  1280. percpu_counter_read(&sbi->s_freeclusters_counter));
  1281. if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
  1282. ext4_group_t flex_group;
  1283. struct flex_groups *fg;
  1284. flex_group = ext4_flex_group(sbi, group_data[0].group);
  1285. fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
  1286. atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
  1287. &fg->free_clusters);
  1288. atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
  1289. &fg->free_inodes);
  1290. }
  1291. /*
  1292. * Update the fs overhead information
  1293. */
  1294. ext4_calculate_overhead(sb);
  1295. if (test_opt(sb, DEBUG))
  1296. printk(KERN_DEBUG "EXT4-fs: added group %u:"
  1297. "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
  1298. blocks_count, free_blocks, reserved_blocks);
  1299. }
  1300. /* Add a flex group to an fs. Ensure we handle all possible error conditions
  1301. * _before_ we start modifying the filesystem, because we cannot abort the
  1302. * transaction and not have it write the data to disk.
  1303. */
  1304. static int ext4_flex_group_add(struct super_block *sb,
  1305. struct inode *resize_inode,
  1306. struct ext4_new_flex_group_data *flex_gd)
  1307. {
  1308. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1309. struct ext4_super_block *es = sbi->s_es;
  1310. ext4_fsblk_t o_blocks_count;
  1311. ext4_grpblk_t last;
  1312. ext4_group_t group;
  1313. handle_t *handle;
  1314. unsigned reserved_gdb;
  1315. int err = 0, err2 = 0, credit;
  1316. BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
  1317. reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
  1318. o_blocks_count = ext4_blocks_count(es);
  1319. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1320. BUG_ON(last);
  1321. err = setup_new_flex_group_blocks(sb, flex_gd);
  1322. if (err)
  1323. goto exit;
  1324. /*
  1325. * We will always be modifying at least the superblock and GDT
  1326. * blocks. If we are adding a group past the last current GDT block,
  1327. * we will also modify the inode and the dindirect block. If we
  1328. * are adding a group with superblock/GDT backups we will also
  1329. * modify each of the reserved GDT dindirect blocks.
  1330. */
  1331. credit = 3; /* sb, resize inode, resize inode dindirect */
  1332. /* GDT blocks */
  1333. credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
  1334. credit += reserved_gdb; /* Reserved GDT dindirect blocks */
  1335. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
  1336. if (IS_ERR(handle)) {
  1337. err = PTR_ERR(handle);
  1338. goto exit;
  1339. }
  1340. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  1341. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  1342. if (err)
  1343. goto exit_journal;
  1344. group = flex_gd->groups[0].group;
  1345. BUG_ON(group != sbi->s_groups_count);
  1346. err = ext4_add_new_descs(handle, sb, group,
  1347. resize_inode, flex_gd->count);
  1348. if (err)
  1349. goto exit_journal;
  1350. err = ext4_setup_new_descs(handle, sb, flex_gd);
  1351. if (err)
  1352. goto exit_journal;
  1353. ext4_update_super(sb, flex_gd);
  1354. err = ext4_handle_dirty_super(handle, sb);
  1355. exit_journal:
  1356. err2 = ext4_journal_stop(handle);
  1357. if (!err)
  1358. err = err2;
  1359. if (!err) {
  1360. int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1361. int gdb_num_end = ((group + flex_gd->count - 1) /
  1362. EXT4_DESC_PER_BLOCK(sb));
  1363. int meta_bg = ext4_has_feature_meta_bg(sb);
  1364. sector_t old_gdb = 0;
  1365. update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
  1366. sizeof(struct ext4_super_block), 0);
  1367. for (; gdb_num <= gdb_num_end; gdb_num++) {
  1368. struct buffer_head *gdb_bh;
  1369. gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
  1370. gdb_num);
  1371. if (old_gdb == gdb_bh->b_blocknr)
  1372. continue;
  1373. update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
  1374. gdb_bh->b_size, meta_bg);
  1375. old_gdb = gdb_bh->b_blocknr;
  1376. }
  1377. }
  1378. exit:
  1379. return err;
  1380. }
  1381. static int ext4_setup_next_flex_gd(struct super_block *sb,
  1382. struct ext4_new_flex_group_data *flex_gd,
  1383. ext4_fsblk_t n_blocks_count,
  1384. unsigned long flexbg_size)
  1385. {
  1386. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1387. struct ext4_super_block *es = sbi->s_es;
  1388. struct ext4_new_group_data *group_data = flex_gd->groups;
  1389. ext4_fsblk_t o_blocks_count;
  1390. ext4_group_t n_group;
  1391. ext4_group_t group;
  1392. ext4_group_t last_group;
  1393. ext4_grpblk_t last;
  1394. ext4_grpblk_t clusters_per_group;
  1395. unsigned long i;
  1396. clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
  1397. o_blocks_count = ext4_blocks_count(es);
  1398. if (o_blocks_count == n_blocks_count)
  1399. return 0;
  1400. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1401. BUG_ON(last);
  1402. ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
  1403. last_group = group | (flexbg_size - 1);
  1404. if (last_group > n_group)
  1405. last_group = n_group;
  1406. flex_gd->count = last_group - group + 1;
  1407. for (i = 0; i < flex_gd->count; i++) {
  1408. int overhead;
  1409. group_data[i].group = group + i;
  1410. group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
  1411. overhead = ext4_group_overhead_blocks(sb, group + i);
  1412. group_data[i].mdata_blocks = overhead;
  1413. group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
  1414. if (ext4_has_group_desc_csum(sb)) {
  1415. flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
  1416. EXT4_BG_INODE_UNINIT;
  1417. if (!test_opt(sb, INIT_INODE_TABLE))
  1418. flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
  1419. } else
  1420. flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
  1421. }
  1422. if (last_group == n_group && ext4_has_group_desc_csum(sb))
  1423. /* We need to initialize block bitmap of last group. */
  1424. flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
  1425. if ((last_group == n_group) && (last != clusters_per_group - 1)) {
  1426. group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
  1427. group_data[i - 1].free_clusters_count -= clusters_per_group -
  1428. last - 1;
  1429. }
  1430. return 1;
  1431. }
  1432. /* Add group descriptor data to an existing or new group descriptor block.
  1433. * Ensure we handle all possible error conditions _before_ we start modifying
  1434. * the filesystem, because we cannot abort the transaction and not have it
  1435. * write the data to disk.
  1436. *
  1437. * If we are on a GDT block boundary, we need to get the reserved GDT block.
  1438. * Otherwise, we may need to add backup GDT blocks for a sparse group.
  1439. *
  1440. * We only need to hold the superblock lock while we are actually adding
  1441. * in the new group's counts to the superblock. Prior to that we have
  1442. * not really "added" the group at all. We re-check that we are still
  1443. * adding in the last group in case things have changed since verifying.
  1444. */
  1445. int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
  1446. {
  1447. struct ext4_new_flex_group_data flex_gd;
  1448. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1449. struct ext4_super_block *es = sbi->s_es;
  1450. int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
  1451. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  1452. struct inode *inode = NULL;
  1453. int gdb_off;
  1454. int err;
  1455. __u16 bg_flags = 0;
  1456. gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
  1457. if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
  1458. ext4_warning(sb, "Can't resize non-sparse filesystem further");
  1459. return -EPERM;
  1460. }
  1461. if (ext4_blocks_count(es) + input->blocks_count <
  1462. ext4_blocks_count(es)) {
  1463. ext4_warning(sb, "blocks_count overflow");
  1464. return -EINVAL;
  1465. }
  1466. if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
  1467. le32_to_cpu(es->s_inodes_count)) {
  1468. ext4_warning(sb, "inodes_count overflow");
  1469. return -EINVAL;
  1470. }
  1471. if (reserved_gdb || gdb_off == 0) {
  1472. if (!ext4_has_feature_resize_inode(sb) ||
  1473. !le16_to_cpu(es->s_reserved_gdt_blocks)) {
  1474. ext4_warning(sb,
  1475. "No reserved GDT blocks, can't resize");
  1476. return -EPERM;
  1477. }
  1478. inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
  1479. if (IS_ERR(inode)) {
  1480. ext4_warning(sb, "Error opening resize inode");
  1481. return PTR_ERR(inode);
  1482. }
  1483. }
  1484. err = verify_group_input(sb, input);
  1485. if (err)
  1486. goto out;
  1487. err = ext4_alloc_flex_bg_array(sb, input->group + 1);
  1488. if (err)
  1489. goto out;
  1490. err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
  1491. if (err)
  1492. goto out;
  1493. flex_gd.count = 1;
  1494. flex_gd.groups = input;
  1495. flex_gd.bg_flags = &bg_flags;
  1496. err = ext4_flex_group_add(sb, inode, &flex_gd);
  1497. out:
  1498. iput(inode);
  1499. return err;
  1500. } /* ext4_group_add */
  1501. /*
  1502. * extend a group without checking assuming that checking has been done.
  1503. */
  1504. static int ext4_group_extend_no_check(struct super_block *sb,
  1505. ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
  1506. {
  1507. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  1508. handle_t *handle;
  1509. int err = 0, err2;
  1510. /* We will update the superblock, one block bitmap, and
  1511. * one group descriptor via ext4_group_add_blocks().
  1512. */
  1513. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
  1514. if (IS_ERR(handle)) {
  1515. err = PTR_ERR(handle);
  1516. ext4_warning(sb, "error %d on journal start", err);
  1517. return err;
  1518. }
  1519. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  1520. err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
  1521. if (err) {
  1522. ext4_warning(sb, "error %d on journal write access", err);
  1523. goto errout;
  1524. }
  1525. ext4_blocks_count_set(es, o_blocks_count + add);
  1526. ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
  1527. ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
  1528. o_blocks_count + add);
  1529. /* We add the blocks to the bitmap and set the group need init bit */
  1530. err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
  1531. if (err)
  1532. goto errout;
  1533. ext4_handle_dirty_super(handle, sb);
  1534. ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
  1535. o_blocks_count + add);
  1536. errout:
  1537. err2 = ext4_journal_stop(handle);
  1538. if (err2 && !err)
  1539. err = err2;
  1540. if (!err) {
  1541. if (test_opt(sb, DEBUG))
  1542. printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
  1543. "blocks\n", ext4_blocks_count(es));
  1544. update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
  1545. (char *)es, sizeof(struct ext4_super_block), 0);
  1546. }
  1547. return err;
  1548. }
  1549. /*
  1550. * Extend the filesystem to the new number of blocks specified. This entry
  1551. * point is only used to extend the current filesystem to the end of the last
  1552. * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
  1553. * for emergencies (because it has no dependencies on reserved blocks).
  1554. *
  1555. * If we _really_ wanted, we could use default values to call ext4_group_add()
  1556. * allow the "remount" trick to work for arbitrary resizing, assuming enough
  1557. * GDT blocks are reserved to grow to the desired size.
  1558. */
  1559. int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
  1560. ext4_fsblk_t n_blocks_count)
  1561. {
  1562. ext4_fsblk_t o_blocks_count;
  1563. ext4_grpblk_t last;
  1564. ext4_grpblk_t add;
  1565. struct buffer_head *bh;
  1566. int err;
  1567. ext4_group_t group;
  1568. o_blocks_count = ext4_blocks_count(es);
  1569. if (test_opt(sb, DEBUG))
  1570. ext4_msg(sb, KERN_DEBUG,
  1571. "extending last group from %llu to %llu blocks",
  1572. o_blocks_count, n_blocks_count);
  1573. if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
  1574. return 0;
  1575. if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
  1576. ext4_msg(sb, KERN_ERR,
  1577. "filesystem too large to resize to %llu blocks safely",
  1578. n_blocks_count);
  1579. if (sizeof(sector_t) < 8)
  1580. ext4_warning(sb, "CONFIG_LBDAF not enabled");
  1581. return -EINVAL;
  1582. }
  1583. if (n_blocks_count < o_blocks_count) {
  1584. ext4_warning(sb, "can't shrink FS - resize aborted");
  1585. return -EINVAL;
  1586. }
  1587. /* Handle the remaining blocks in the last group only. */
  1588. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1589. if (last == 0) {
  1590. ext4_warning(sb, "need to use ext2online to resize further");
  1591. return -EPERM;
  1592. }
  1593. add = EXT4_BLOCKS_PER_GROUP(sb) - last;
  1594. if (o_blocks_count + add < o_blocks_count) {
  1595. ext4_warning(sb, "blocks_count overflow");
  1596. return -EINVAL;
  1597. }
  1598. if (o_blocks_count + add > n_blocks_count)
  1599. add = n_blocks_count - o_blocks_count;
  1600. if (o_blocks_count + add < n_blocks_count)
  1601. ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
  1602. o_blocks_count + add, add);
  1603. /* See if the device is actually as big as what was requested */
  1604. bh = sb_bread(sb, o_blocks_count + add - 1);
  1605. if (!bh) {
  1606. ext4_warning(sb, "can't read last block, resize aborted");
  1607. return -ENOSPC;
  1608. }
  1609. brelse(bh);
  1610. err = ext4_group_extend_no_check(sb, o_blocks_count, add);
  1611. return err;
  1612. } /* ext4_group_extend */
  1613. static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
  1614. {
  1615. return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
  1616. }
  1617. /*
  1618. * Release the resize inode and drop the resize_inode feature if there
  1619. * are no more reserved gdt blocks, and then convert the file system
  1620. * to enable meta_bg
  1621. */
  1622. static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
  1623. {
  1624. handle_t *handle;
  1625. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1626. struct ext4_super_block *es = sbi->s_es;
  1627. struct ext4_inode_info *ei = EXT4_I(inode);
  1628. ext4_fsblk_t nr;
  1629. int i, ret, err = 0;
  1630. int credits = 1;
  1631. ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
  1632. if (inode) {
  1633. if (es->s_reserved_gdt_blocks) {
  1634. ext4_error(sb, "Unexpected non-zero "
  1635. "s_reserved_gdt_blocks");
  1636. return -EPERM;
  1637. }
  1638. /* Do a quick sanity check of the resize inode */
  1639. if (inode->i_blocks != 1 << (inode->i_blkbits -
  1640. (9 - sbi->s_cluster_bits)))
  1641. goto invalid_resize_inode;
  1642. for (i = 0; i < EXT4_N_BLOCKS; i++) {
  1643. if (i == EXT4_DIND_BLOCK) {
  1644. if (ei->i_data[i])
  1645. continue;
  1646. else
  1647. goto invalid_resize_inode;
  1648. }
  1649. if (ei->i_data[i])
  1650. goto invalid_resize_inode;
  1651. }
  1652. credits += 3; /* block bitmap, bg descriptor, resize inode */
  1653. }
  1654. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
  1655. if (IS_ERR(handle))
  1656. return PTR_ERR(handle);
  1657. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  1658. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  1659. if (err)
  1660. goto errout;
  1661. ext4_clear_feature_resize_inode(sb);
  1662. ext4_set_feature_meta_bg(sb);
  1663. sbi->s_es->s_first_meta_bg =
  1664. cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
  1665. err = ext4_handle_dirty_super(handle, sb);
  1666. if (err) {
  1667. ext4_std_error(sb, err);
  1668. goto errout;
  1669. }
  1670. if (inode) {
  1671. nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
  1672. ext4_free_blocks(handle, inode, NULL, nr, 1,
  1673. EXT4_FREE_BLOCKS_METADATA |
  1674. EXT4_FREE_BLOCKS_FORGET);
  1675. ei->i_data[EXT4_DIND_BLOCK] = 0;
  1676. inode->i_blocks = 0;
  1677. err = ext4_mark_inode_dirty(handle, inode);
  1678. if (err)
  1679. ext4_std_error(sb, err);
  1680. }
  1681. errout:
  1682. ret = ext4_journal_stop(handle);
  1683. if (!err)
  1684. err = ret;
  1685. return ret;
  1686. invalid_resize_inode:
  1687. ext4_error(sb, "corrupted/inconsistent resize inode");
  1688. return -EINVAL;
  1689. }
  1690. /*
  1691. * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
  1692. *
  1693. * @sb: super block of the fs to be resized
  1694. * @n_blocks_count: the number of blocks resides in the resized fs
  1695. */
  1696. int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
  1697. {
  1698. struct ext4_new_flex_group_data *flex_gd = NULL;
  1699. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1700. struct ext4_super_block *es = sbi->s_es;
  1701. struct buffer_head *bh;
  1702. struct inode *resize_inode = NULL;
  1703. ext4_grpblk_t add, offset;
  1704. unsigned long n_desc_blocks;
  1705. unsigned long o_desc_blocks;
  1706. ext4_group_t o_group;
  1707. ext4_group_t n_group;
  1708. ext4_fsblk_t o_blocks_count;
  1709. ext4_fsblk_t n_blocks_count_retry = 0;
  1710. unsigned long last_update_time = 0;
  1711. int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
  1712. int meta_bg;
  1713. /* See if the device is actually as big as what was requested */
  1714. bh = sb_bread(sb, n_blocks_count - 1);
  1715. if (!bh) {
  1716. ext4_warning(sb, "can't read last block, resize aborted");
  1717. return -ENOSPC;
  1718. }
  1719. brelse(bh);
  1720. retry:
  1721. o_blocks_count = ext4_blocks_count(es);
  1722. ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
  1723. "to %llu blocks", o_blocks_count, n_blocks_count);
  1724. if (n_blocks_count < o_blocks_count) {
  1725. /* On-line shrinking not supported */
  1726. ext4_warning(sb, "can't shrink FS - resize aborted");
  1727. return -EINVAL;
  1728. }
  1729. if (n_blocks_count == o_blocks_count)
  1730. /* Nothing need to do */
  1731. return 0;
  1732. n_group = ext4_get_group_number(sb, n_blocks_count - 1);
  1733. if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
  1734. ext4_warning(sb, "resize would cause inodes_count overflow");
  1735. return -EINVAL;
  1736. }
  1737. ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
  1738. n_desc_blocks = num_desc_blocks(sb, n_group + 1);
  1739. o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
  1740. meta_bg = ext4_has_feature_meta_bg(sb);
  1741. if (ext4_has_feature_resize_inode(sb)) {
  1742. if (meta_bg) {
  1743. ext4_error(sb, "resize_inode and meta_bg enabled "
  1744. "simultaneously");
  1745. return -EINVAL;
  1746. }
  1747. if (n_desc_blocks > o_desc_blocks +
  1748. le16_to_cpu(es->s_reserved_gdt_blocks)) {
  1749. n_blocks_count_retry = n_blocks_count;
  1750. n_desc_blocks = o_desc_blocks +
  1751. le16_to_cpu(es->s_reserved_gdt_blocks);
  1752. n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
  1753. n_blocks_count = (ext4_fsblk_t)n_group *
  1754. EXT4_BLOCKS_PER_GROUP(sb) +
  1755. le32_to_cpu(es->s_first_data_block);
  1756. n_group--; /* set to last group number */
  1757. }
  1758. if (!resize_inode)
  1759. resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
  1760. EXT4_IGET_SPECIAL);
  1761. if (IS_ERR(resize_inode)) {
  1762. ext4_warning(sb, "Error opening resize inode");
  1763. return PTR_ERR(resize_inode);
  1764. }
  1765. }
  1766. if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
  1767. err = ext4_convert_meta_bg(sb, resize_inode);
  1768. if (err)
  1769. goto out;
  1770. if (resize_inode) {
  1771. iput(resize_inode);
  1772. resize_inode = NULL;
  1773. }
  1774. if (n_blocks_count_retry) {
  1775. n_blocks_count = n_blocks_count_retry;
  1776. n_blocks_count_retry = 0;
  1777. goto retry;
  1778. }
  1779. }
  1780. /*
  1781. * Make sure the last group has enough space so that it's
  1782. * guaranteed to have enough space for all metadata blocks
  1783. * that it might need to hold. (We might not need to store
  1784. * the inode table blocks in the last block group, but there
  1785. * will be cases where this might be needed.)
  1786. */
  1787. if ((ext4_group_first_block_no(sb, n_group) +
  1788. ext4_group_overhead_blocks(sb, n_group) + 2 +
  1789. sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
  1790. n_blocks_count = ext4_group_first_block_no(sb, n_group);
  1791. n_group--;
  1792. n_blocks_count_retry = 0;
  1793. if (resize_inode) {
  1794. iput(resize_inode);
  1795. resize_inode = NULL;
  1796. }
  1797. goto retry;
  1798. }
  1799. /* extend the last group */
  1800. if (n_group == o_group)
  1801. add = n_blocks_count - o_blocks_count;
  1802. else
  1803. add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
  1804. if (add > 0) {
  1805. err = ext4_group_extend_no_check(sb, o_blocks_count, add);
  1806. if (err)
  1807. goto out;
  1808. }
  1809. if (ext4_blocks_count(es) == n_blocks_count)
  1810. goto out;
  1811. err = ext4_alloc_flex_bg_array(sb, n_group + 1);
  1812. if (err)
  1813. goto out;
  1814. err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
  1815. if (err)
  1816. goto out;
  1817. flex_gd = alloc_flex_gd(flexbg_size);
  1818. if (flex_gd == NULL) {
  1819. err = -ENOMEM;
  1820. goto out;
  1821. }
  1822. /* Add flex groups. Note that a regular group is a
  1823. * flex group with 1 group.
  1824. */
  1825. while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
  1826. flexbg_size)) {
  1827. if (jiffies - last_update_time > HZ * 10) {
  1828. if (last_update_time)
  1829. ext4_msg(sb, KERN_INFO,
  1830. "resized to %llu blocks",
  1831. ext4_blocks_count(es));
  1832. last_update_time = jiffies;
  1833. }
  1834. if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
  1835. break;
  1836. err = ext4_flex_group_add(sb, resize_inode, flex_gd);
  1837. if (unlikely(err))
  1838. break;
  1839. }
  1840. if (!err && n_blocks_count_retry) {
  1841. n_blocks_count = n_blocks_count_retry;
  1842. n_blocks_count_retry = 0;
  1843. free_flex_gd(flex_gd);
  1844. flex_gd = NULL;
  1845. if (resize_inode) {
  1846. iput(resize_inode);
  1847. resize_inode = NULL;
  1848. }
  1849. goto retry;
  1850. }
  1851. out:
  1852. if (flex_gd)
  1853. free_flex_gd(flex_gd);
  1854. if (resize_inode != NULL)
  1855. iput(resize_inode);
  1856. if (err)
  1857. ext4_warning(sb, "error (%d) occurred during "
  1858. "file system resize", err);
  1859. ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
  1860. ext4_blocks_count(es));
  1861. return err;
  1862. }