raid56.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2012 Fusion-io All rights reserved.
  4. * Copyright (C) 2012 Intel Corp. All rights reserved.
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/bio.h>
  8. #include <linux/slab.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/raid/pq.h>
  11. #include <linux/hash.h>
  12. #include <linux/list_sort.h>
  13. #include <linux/raid/xor.h>
  14. #include <linux/mm.h>
  15. #include "ctree.h"
  16. #include "disk-io.h"
  17. #include "volumes.h"
  18. #include "raid56.h"
  19. #include "async-thread.h"
  20. /* set when additional merges to this rbio are not allowed */
  21. #define RBIO_RMW_LOCKED_BIT 1
  22. /*
  23. * set when this rbio is sitting in the hash, but it is just a cache
  24. * of past RMW
  25. */
  26. #define RBIO_CACHE_BIT 2
  27. /*
  28. * set when it is safe to trust the stripe_pages for caching
  29. */
  30. #define RBIO_CACHE_READY_BIT 3
  31. #define RBIO_CACHE_SIZE 1024
  32. enum btrfs_rbio_ops {
  33. BTRFS_RBIO_WRITE,
  34. BTRFS_RBIO_READ_REBUILD,
  35. BTRFS_RBIO_PARITY_SCRUB,
  36. BTRFS_RBIO_REBUILD_MISSING,
  37. };
  38. struct btrfs_raid_bio {
  39. struct btrfs_fs_info *fs_info;
  40. struct btrfs_bio *bbio;
  41. /* while we're doing rmw on a stripe
  42. * we put it into a hash table so we can
  43. * lock the stripe and merge more rbios
  44. * into it.
  45. */
  46. struct list_head hash_list;
  47. /*
  48. * LRU list for the stripe cache
  49. */
  50. struct list_head stripe_cache;
  51. /*
  52. * for scheduling work in the helper threads
  53. */
  54. struct btrfs_work work;
  55. /*
  56. * bio list and bio_list_lock are used
  57. * to add more bios into the stripe
  58. * in hopes of avoiding the full rmw
  59. */
  60. struct bio_list bio_list;
  61. spinlock_t bio_list_lock;
  62. /* also protected by the bio_list_lock, the
  63. * plug list is used by the plugging code
  64. * to collect partial bios while plugged. The
  65. * stripe locking code also uses it to hand off
  66. * the stripe lock to the next pending IO
  67. */
  68. struct list_head plug_list;
  69. /*
  70. * flags that tell us if it is safe to
  71. * merge with this bio
  72. */
  73. unsigned long flags;
  74. /* size of each individual stripe on disk */
  75. int stripe_len;
  76. /* number of data stripes (no p/q) */
  77. int nr_data;
  78. int real_stripes;
  79. int stripe_npages;
  80. /*
  81. * set if we're doing a parity rebuild
  82. * for a read from higher up, which is handled
  83. * differently from a parity rebuild as part of
  84. * rmw
  85. */
  86. enum btrfs_rbio_ops operation;
  87. /* first bad stripe */
  88. int faila;
  89. /* second bad stripe (for raid6 use) */
  90. int failb;
  91. int scrubp;
  92. /*
  93. * number of pages needed to represent the full
  94. * stripe
  95. */
  96. int nr_pages;
  97. /*
  98. * size of all the bios in the bio_list. This
  99. * helps us decide if the rbio maps to a full
  100. * stripe or not
  101. */
  102. int bio_list_bytes;
  103. int generic_bio_cnt;
  104. refcount_t refs;
  105. atomic_t stripes_pending;
  106. atomic_t error;
  107. /*
  108. * these are two arrays of pointers. We allocate the
  109. * rbio big enough to hold them both and setup their
  110. * locations when the rbio is allocated
  111. */
  112. /* pointers to pages that we allocated for
  113. * reading/writing stripes directly from the disk (including P/Q)
  114. */
  115. struct page **stripe_pages;
  116. /*
  117. * pointers to the pages in the bio_list. Stored
  118. * here for faster lookup
  119. */
  120. struct page **bio_pages;
  121. /*
  122. * bitmap to record which horizontal stripe has data
  123. */
  124. unsigned long *dbitmap;
  125. /* allocated with real_stripes-many pointers for finish_*() calls */
  126. void **finish_pointers;
  127. /* allocated with stripe_npages-many bits for finish_*() calls */
  128. unsigned long *finish_pbitmap;
  129. };
  130. static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
  131. static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
  132. static void rmw_work(struct btrfs_work *work);
  133. static void read_rebuild_work(struct btrfs_work *work);
  134. static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
  135. static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
  136. static void __free_raid_bio(struct btrfs_raid_bio *rbio);
  137. static void index_rbio_pages(struct btrfs_raid_bio *rbio);
  138. static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
  139. static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
  140. int need_check);
  141. static void scrub_parity_work(struct btrfs_work *work);
  142. static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
  143. {
  144. btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
  145. btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
  146. }
  147. /*
  148. * the stripe hash table is used for locking, and to collect
  149. * bios in hopes of making a full stripe
  150. */
  151. int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
  152. {
  153. struct btrfs_stripe_hash_table *table;
  154. struct btrfs_stripe_hash_table *x;
  155. struct btrfs_stripe_hash *cur;
  156. struct btrfs_stripe_hash *h;
  157. int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
  158. int i;
  159. int table_size;
  160. if (info->stripe_hash_table)
  161. return 0;
  162. /*
  163. * The table is large, starting with order 4 and can go as high as
  164. * order 7 in case lock debugging is turned on.
  165. *
  166. * Try harder to allocate and fallback to vmalloc to lower the chance
  167. * of a failing mount.
  168. */
  169. table_size = sizeof(*table) + sizeof(*h) * num_entries;
  170. table = kvzalloc(table_size, GFP_KERNEL);
  171. if (!table)
  172. return -ENOMEM;
  173. spin_lock_init(&table->cache_lock);
  174. INIT_LIST_HEAD(&table->stripe_cache);
  175. h = table->table;
  176. for (i = 0; i < num_entries; i++) {
  177. cur = h + i;
  178. INIT_LIST_HEAD(&cur->hash_list);
  179. spin_lock_init(&cur->lock);
  180. }
  181. x = cmpxchg(&info->stripe_hash_table, NULL, table);
  182. if (x)
  183. kvfree(x);
  184. return 0;
  185. }
  186. /*
  187. * caching an rbio means to copy anything from the
  188. * bio_pages array into the stripe_pages array. We
  189. * use the page uptodate bit in the stripe cache array
  190. * to indicate if it has valid data
  191. *
  192. * once the caching is done, we set the cache ready
  193. * bit.
  194. */
  195. static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
  196. {
  197. int i;
  198. char *s;
  199. char *d;
  200. int ret;
  201. ret = alloc_rbio_pages(rbio);
  202. if (ret)
  203. return;
  204. for (i = 0; i < rbio->nr_pages; i++) {
  205. if (!rbio->bio_pages[i])
  206. continue;
  207. s = kmap(rbio->bio_pages[i]);
  208. d = kmap(rbio->stripe_pages[i]);
  209. copy_page(d, s);
  210. kunmap(rbio->bio_pages[i]);
  211. kunmap(rbio->stripe_pages[i]);
  212. SetPageUptodate(rbio->stripe_pages[i]);
  213. }
  214. set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  215. }
  216. /*
  217. * we hash on the first logical address of the stripe
  218. */
  219. static int rbio_bucket(struct btrfs_raid_bio *rbio)
  220. {
  221. u64 num = rbio->bbio->raid_map[0];
  222. /*
  223. * we shift down quite a bit. We're using byte
  224. * addressing, and most of the lower bits are zeros.
  225. * This tends to upset hash_64, and it consistently
  226. * returns just one or two different values.
  227. *
  228. * shifting off the lower bits fixes things.
  229. */
  230. return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
  231. }
  232. /*
  233. * stealing an rbio means taking all the uptodate pages from the stripe
  234. * array in the source rbio and putting them into the destination rbio
  235. */
  236. static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
  237. {
  238. int i;
  239. struct page *s;
  240. struct page *d;
  241. if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
  242. return;
  243. for (i = 0; i < dest->nr_pages; i++) {
  244. s = src->stripe_pages[i];
  245. if (!s || !PageUptodate(s)) {
  246. continue;
  247. }
  248. d = dest->stripe_pages[i];
  249. if (d)
  250. __free_page(d);
  251. dest->stripe_pages[i] = s;
  252. src->stripe_pages[i] = NULL;
  253. }
  254. }
  255. /*
  256. * merging means we take the bio_list from the victim and
  257. * splice it into the destination. The victim should
  258. * be discarded afterwards.
  259. *
  260. * must be called with dest->rbio_list_lock held
  261. */
  262. static void merge_rbio(struct btrfs_raid_bio *dest,
  263. struct btrfs_raid_bio *victim)
  264. {
  265. bio_list_merge(&dest->bio_list, &victim->bio_list);
  266. dest->bio_list_bytes += victim->bio_list_bytes;
  267. dest->generic_bio_cnt += victim->generic_bio_cnt;
  268. bio_list_init(&victim->bio_list);
  269. }
  270. /*
  271. * used to prune items that are in the cache. The caller
  272. * must hold the hash table lock.
  273. */
  274. static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
  275. {
  276. int bucket = rbio_bucket(rbio);
  277. struct btrfs_stripe_hash_table *table;
  278. struct btrfs_stripe_hash *h;
  279. int freeit = 0;
  280. /*
  281. * check the bit again under the hash table lock.
  282. */
  283. if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
  284. return;
  285. table = rbio->fs_info->stripe_hash_table;
  286. h = table->table + bucket;
  287. /* hold the lock for the bucket because we may be
  288. * removing it from the hash table
  289. */
  290. spin_lock(&h->lock);
  291. /*
  292. * hold the lock for the bio list because we need
  293. * to make sure the bio list is empty
  294. */
  295. spin_lock(&rbio->bio_list_lock);
  296. if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
  297. list_del_init(&rbio->stripe_cache);
  298. table->cache_size -= 1;
  299. freeit = 1;
  300. /* if the bio list isn't empty, this rbio is
  301. * still involved in an IO. We take it out
  302. * of the cache list, and drop the ref that
  303. * was held for the list.
  304. *
  305. * If the bio_list was empty, we also remove
  306. * the rbio from the hash_table, and drop
  307. * the corresponding ref
  308. */
  309. if (bio_list_empty(&rbio->bio_list)) {
  310. if (!list_empty(&rbio->hash_list)) {
  311. list_del_init(&rbio->hash_list);
  312. refcount_dec(&rbio->refs);
  313. BUG_ON(!list_empty(&rbio->plug_list));
  314. }
  315. }
  316. }
  317. spin_unlock(&rbio->bio_list_lock);
  318. spin_unlock(&h->lock);
  319. if (freeit)
  320. __free_raid_bio(rbio);
  321. }
  322. /*
  323. * prune a given rbio from the cache
  324. */
  325. static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
  326. {
  327. struct btrfs_stripe_hash_table *table;
  328. unsigned long flags;
  329. if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
  330. return;
  331. table = rbio->fs_info->stripe_hash_table;
  332. spin_lock_irqsave(&table->cache_lock, flags);
  333. __remove_rbio_from_cache(rbio);
  334. spin_unlock_irqrestore(&table->cache_lock, flags);
  335. }
  336. /*
  337. * remove everything in the cache
  338. */
  339. static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
  340. {
  341. struct btrfs_stripe_hash_table *table;
  342. unsigned long flags;
  343. struct btrfs_raid_bio *rbio;
  344. table = info->stripe_hash_table;
  345. spin_lock_irqsave(&table->cache_lock, flags);
  346. while (!list_empty(&table->stripe_cache)) {
  347. rbio = list_entry(table->stripe_cache.next,
  348. struct btrfs_raid_bio,
  349. stripe_cache);
  350. __remove_rbio_from_cache(rbio);
  351. }
  352. spin_unlock_irqrestore(&table->cache_lock, flags);
  353. }
  354. /*
  355. * remove all cached entries and free the hash table
  356. * used by unmount
  357. */
  358. void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
  359. {
  360. if (!info->stripe_hash_table)
  361. return;
  362. btrfs_clear_rbio_cache(info);
  363. kvfree(info->stripe_hash_table);
  364. info->stripe_hash_table = NULL;
  365. }
  366. /*
  367. * insert an rbio into the stripe cache. It
  368. * must have already been prepared by calling
  369. * cache_rbio_pages
  370. *
  371. * If this rbio was already cached, it gets
  372. * moved to the front of the lru.
  373. *
  374. * If the size of the rbio cache is too big, we
  375. * prune an item.
  376. */
  377. static void cache_rbio(struct btrfs_raid_bio *rbio)
  378. {
  379. struct btrfs_stripe_hash_table *table;
  380. unsigned long flags;
  381. if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
  382. return;
  383. table = rbio->fs_info->stripe_hash_table;
  384. spin_lock_irqsave(&table->cache_lock, flags);
  385. spin_lock(&rbio->bio_list_lock);
  386. /* bump our ref if we were not in the list before */
  387. if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
  388. refcount_inc(&rbio->refs);
  389. if (!list_empty(&rbio->stripe_cache)){
  390. list_move(&rbio->stripe_cache, &table->stripe_cache);
  391. } else {
  392. list_add(&rbio->stripe_cache, &table->stripe_cache);
  393. table->cache_size += 1;
  394. }
  395. spin_unlock(&rbio->bio_list_lock);
  396. if (table->cache_size > RBIO_CACHE_SIZE) {
  397. struct btrfs_raid_bio *found;
  398. found = list_entry(table->stripe_cache.prev,
  399. struct btrfs_raid_bio,
  400. stripe_cache);
  401. if (found != rbio)
  402. __remove_rbio_from_cache(found);
  403. }
  404. spin_unlock_irqrestore(&table->cache_lock, flags);
  405. }
  406. /*
  407. * helper function to run the xor_blocks api. It is only
  408. * able to do MAX_XOR_BLOCKS at a time, so we need to
  409. * loop through.
  410. */
  411. static void run_xor(void **pages, int src_cnt, ssize_t len)
  412. {
  413. int src_off = 0;
  414. int xor_src_cnt = 0;
  415. void *dest = pages[src_cnt];
  416. while(src_cnt > 0) {
  417. xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
  418. xor_blocks(xor_src_cnt, len, dest, pages + src_off);
  419. src_cnt -= xor_src_cnt;
  420. src_off += xor_src_cnt;
  421. }
  422. }
  423. /*
  424. * Returns true if the bio list inside this rbio covers an entire stripe (no
  425. * rmw required).
  426. */
  427. static int rbio_is_full(struct btrfs_raid_bio *rbio)
  428. {
  429. unsigned long flags;
  430. unsigned long size = rbio->bio_list_bytes;
  431. int ret = 1;
  432. spin_lock_irqsave(&rbio->bio_list_lock, flags);
  433. if (size != rbio->nr_data * rbio->stripe_len)
  434. ret = 0;
  435. BUG_ON(size > rbio->nr_data * rbio->stripe_len);
  436. spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
  437. return ret;
  438. }
  439. /*
  440. * returns 1 if it is safe to merge two rbios together.
  441. * The merging is safe if the two rbios correspond to
  442. * the same stripe and if they are both going in the same
  443. * direction (read vs write), and if neither one is
  444. * locked for final IO
  445. *
  446. * The caller is responsible for locking such that
  447. * rmw_locked is safe to test
  448. */
  449. static int rbio_can_merge(struct btrfs_raid_bio *last,
  450. struct btrfs_raid_bio *cur)
  451. {
  452. if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
  453. test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
  454. return 0;
  455. /*
  456. * we can't merge with cached rbios, since the
  457. * idea is that when we merge the destination
  458. * rbio is going to run our IO for us. We can
  459. * steal from cached rbios though, other functions
  460. * handle that.
  461. */
  462. if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
  463. test_bit(RBIO_CACHE_BIT, &cur->flags))
  464. return 0;
  465. if (last->bbio->raid_map[0] !=
  466. cur->bbio->raid_map[0])
  467. return 0;
  468. /* we can't merge with different operations */
  469. if (last->operation != cur->operation)
  470. return 0;
  471. /*
  472. * We've need read the full stripe from the drive.
  473. * check and repair the parity and write the new results.
  474. *
  475. * We're not allowed to add any new bios to the
  476. * bio list here, anyone else that wants to
  477. * change this stripe needs to do their own rmw.
  478. */
  479. if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
  480. return 0;
  481. if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
  482. return 0;
  483. if (last->operation == BTRFS_RBIO_READ_REBUILD) {
  484. int fa = last->faila;
  485. int fb = last->failb;
  486. int cur_fa = cur->faila;
  487. int cur_fb = cur->failb;
  488. if (last->faila >= last->failb) {
  489. fa = last->failb;
  490. fb = last->faila;
  491. }
  492. if (cur->faila >= cur->failb) {
  493. cur_fa = cur->failb;
  494. cur_fb = cur->faila;
  495. }
  496. if (fa != cur_fa || fb != cur_fb)
  497. return 0;
  498. }
  499. return 1;
  500. }
  501. static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
  502. int index)
  503. {
  504. return stripe * rbio->stripe_npages + index;
  505. }
  506. /*
  507. * these are just the pages from the rbio array, not from anything
  508. * the FS sent down to us
  509. */
  510. static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
  511. int index)
  512. {
  513. return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
  514. }
  515. /*
  516. * helper to index into the pstripe
  517. */
  518. static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
  519. {
  520. return rbio_stripe_page(rbio, rbio->nr_data, index);
  521. }
  522. /*
  523. * helper to index into the qstripe, returns null
  524. * if there is no qstripe
  525. */
  526. static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
  527. {
  528. if (rbio->nr_data + 1 == rbio->real_stripes)
  529. return NULL;
  530. return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
  531. }
  532. /*
  533. * The first stripe in the table for a logical address
  534. * has the lock. rbios are added in one of three ways:
  535. *
  536. * 1) Nobody has the stripe locked yet. The rbio is given
  537. * the lock and 0 is returned. The caller must start the IO
  538. * themselves.
  539. *
  540. * 2) Someone has the stripe locked, but we're able to merge
  541. * with the lock owner. The rbio is freed and the IO will
  542. * start automatically along with the existing rbio. 1 is returned.
  543. *
  544. * 3) Someone has the stripe locked, but we're not able to merge.
  545. * The rbio is added to the lock owner's plug list, or merged into
  546. * an rbio already on the plug list. When the lock owner unlocks,
  547. * the next rbio on the list is run and the IO is started automatically.
  548. * 1 is returned
  549. *
  550. * If we return 0, the caller still owns the rbio and must continue with
  551. * IO submission. If we return 1, the caller must assume the rbio has
  552. * already been freed.
  553. */
  554. static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
  555. {
  556. int bucket = rbio_bucket(rbio);
  557. struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
  558. struct btrfs_raid_bio *cur;
  559. struct btrfs_raid_bio *pending;
  560. unsigned long flags;
  561. struct btrfs_raid_bio *freeit = NULL;
  562. struct btrfs_raid_bio *cache_drop = NULL;
  563. int ret = 0;
  564. spin_lock_irqsave(&h->lock, flags);
  565. list_for_each_entry(cur, &h->hash_list, hash_list) {
  566. if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
  567. spin_lock(&cur->bio_list_lock);
  568. /* can we steal this cached rbio's pages? */
  569. if (bio_list_empty(&cur->bio_list) &&
  570. list_empty(&cur->plug_list) &&
  571. test_bit(RBIO_CACHE_BIT, &cur->flags) &&
  572. !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
  573. list_del_init(&cur->hash_list);
  574. refcount_dec(&cur->refs);
  575. steal_rbio(cur, rbio);
  576. cache_drop = cur;
  577. spin_unlock(&cur->bio_list_lock);
  578. goto lockit;
  579. }
  580. /* can we merge into the lock owner? */
  581. if (rbio_can_merge(cur, rbio)) {
  582. merge_rbio(cur, rbio);
  583. spin_unlock(&cur->bio_list_lock);
  584. freeit = rbio;
  585. ret = 1;
  586. goto out;
  587. }
  588. /*
  589. * we couldn't merge with the running
  590. * rbio, see if we can merge with the
  591. * pending ones. We don't have to
  592. * check for rmw_locked because there
  593. * is no way they are inside finish_rmw
  594. * right now
  595. */
  596. list_for_each_entry(pending, &cur->plug_list,
  597. plug_list) {
  598. if (rbio_can_merge(pending, rbio)) {
  599. merge_rbio(pending, rbio);
  600. spin_unlock(&cur->bio_list_lock);
  601. freeit = rbio;
  602. ret = 1;
  603. goto out;
  604. }
  605. }
  606. /* no merging, put us on the tail of the plug list,
  607. * our rbio will be started with the currently
  608. * running rbio unlocks
  609. */
  610. list_add_tail(&rbio->plug_list, &cur->plug_list);
  611. spin_unlock(&cur->bio_list_lock);
  612. ret = 1;
  613. goto out;
  614. }
  615. }
  616. lockit:
  617. refcount_inc(&rbio->refs);
  618. list_add(&rbio->hash_list, &h->hash_list);
  619. out:
  620. spin_unlock_irqrestore(&h->lock, flags);
  621. if (cache_drop)
  622. remove_rbio_from_cache(cache_drop);
  623. if (freeit)
  624. __free_raid_bio(freeit);
  625. return ret;
  626. }
  627. /*
  628. * called as rmw or parity rebuild is completed. If the plug list has more
  629. * rbios waiting for this stripe, the next one on the list will be started
  630. */
  631. static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
  632. {
  633. int bucket;
  634. struct btrfs_stripe_hash *h;
  635. unsigned long flags;
  636. int keep_cache = 0;
  637. bucket = rbio_bucket(rbio);
  638. h = rbio->fs_info->stripe_hash_table->table + bucket;
  639. if (list_empty(&rbio->plug_list))
  640. cache_rbio(rbio);
  641. spin_lock_irqsave(&h->lock, flags);
  642. spin_lock(&rbio->bio_list_lock);
  643. if (!list_empty(&rbio->hash_list)) {
  644. /*
  645. * if we're still cached and there is no other IO
  646. * to perform, just leave this rbio here for others
  647. * to steal from later
  648. */
  649. if (list_empty(&rbio->plug_list) &&
  650. test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
  651. keep_cache = 1;
  652. clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
  653. BUG_ON(!bio_list_empty(&rbio->bio_list));
  654. goto done;
  655. }
  656. list_del_init(&rbio->hash_list);
  657. refcount_dec(&rbio->refs);
  658. /*
  659. * we use the plug list to hold all the rbios
  660. * waiting for the chance to lock this stripe.
  661. * hand the lock over to one of them.
  662. */
  663. if (!list_empty(&rbio->plug_list)) {
  664. struct btrfs_raid_bio *next;
  665. struct list_head *head = rbio->plug_list.next;
  666. next = list_entry(head, struct btrfs_raid_bio,
  667. plug_list);
  668. list_del_init(&rbio->plug_list);
  669. list_add(&next->hash_list, &h->hash_list);
  670. refcount_inc(&next->refs);
  671. spin_unlock(&rbio->bio_list_lock);
  672. spin_unlock_irqrestore(&h->lock, flags);
  673. if (next->operation == BTRFS_RBIO_READ_REBUILD)
  674. start_async_work(next, read_rebuild_work);
  675. else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
  676. steal_rbio(rbio, next);
  677. start_async_work(next, read_rebuild_work);
  678. } else if (next->operation == BTRFS_RBIO_WRITE) {
  679. steal_rbio(rbio, next);
  680. start_async_work(next, rmw_work);
  681. } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
  682. steal_rbio(rbio, next);
  683. start_async_work(next, scrub_parity_work);
  684. }
  685. goto done_nolock;
  686. }
  687. }
  688. done:
  689. spin_unlock(&rbio->bio_list_lock);
  690. spin_unlock_irqrestore(&h->lock, flags);
  691. done_nolock:
  692. if (!keep_cache)
  693. remove_rbio_from_cache(rbio);
  694. }
  695. static void __free_raid_bio(struct btrfs_raid_bio *rbio)
  696. {
  697. int i;
  698. if (!refcount_dec_and_test(&rbio->refs))
  699. return;
  700. WARN_ON(!list_empty(&rbio->stripe_cache));
  701. WARN_ON(!list_empty(&rbio->hash_list));
  702. WARN_ON(!bio_list_empty(&rbio->bio_list));
  703. for (i = 0; i < rbio->nr_pages; i++) {
  704. if (rbio->stripe_pages[i]) {
  705. __free_page(rbio->stripe_pages[i]);
  706. rbio->stripe_pages[i] = NULL;
  707. }
  708. }
  709. btrfs_put_bbio(rbio->bbio);
  710. kfree(rbio);
  711. }
  712. static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
  713. {
  714. struct bio *next;
  715. while (cur) {
  716. next = cur->bi_next;
  717. cur->bi_next = NULL;
  718. cur->bi_status = err;
  719. bio_endio(cur);
  720. cur = next;
  721. }
  722. }
  723. /*
  724. * this frees the rbio and runs through all the bios in the
  725. * bio_list and calls end_io on them
  726. */
  727. static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
  728. {
  729. struct bio *cur = bio_list_get(&rbio->bio_list);
  730. struct bio *extra;
  731. if (rbio->generic_bio_cnt)
  732. btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
  733. /*
  734. * At this moment, rbio->bio_list is empty, however since rbio does not
  735. * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
  736. * hash list, rbio may be merged with others so that rbio->bio_list
  737. * becomes non-empty.
  738. * Once unlock_stripe() is done, rbio->bio_list will not be updated any
  739. * more and we can call bio_endio() on all queued bios.
  740. */
  741. unlock_stripe(rbio);
  742. extra = bio_list_get(&rbio->bio_list);
  743. __free_raid_bio(rbio);
  744. rbio_endio_bio_list(cur, err);
  745. if (extra)
  746. rbio_endio_bio_list(extra, err);
  747. }
  748. /*
  749. * end io function used by finish_rmw. When we finally
  750. * get here, we've written a full stripe
  751. */
  752. static void raid_write_end_io(struct bio *bio)
  753. {
  754. struct btrfs_raid_bio *rbio = bio->bi_private;
  755. blk_status_t err = bio->bi_status;
  756. int max_errors;
  757. if (err)
  758. fail_bio_stripe(rbio, bio);
  759. bio_put(bio);
  760. if (!atomic_dec_and_test(&rbio->stripes_pending))
  761. return;
  762. err = BLK_STS_OK;
  763. /* OK, we have read all the stripes we need to. */
  764. max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
  765. 0 : rbio->bbio->max_errors;
  766. if (atomic_read(&rbio->error) > max_errors)
  767. err = BLK_STS_IOERR;
  768. rbio_orig_end_io(rbio, err);
  769. }
  770. /*
  771. * the read/modify/write code wants to use the original bio for
  772. * any pages it included, and then use the rbio for everything
  773. * else. This function decides if a given index (stripe number)
  774. * and page number in that stripe fall inside the original bio
  775. * or the rbio.
  776. *
  777. * if you set bio_list_only, you'll get a NULL back for any ranges
  778. * that are outside the bio_list
  779. *
  780. * This doesn't take any refs on anything, you get a bare page pointer
  781. * and the caller must bump refs as required.
  782. *
  783. * You must call index_rbio_pages once before you can trust
  784. * the answers from this function.
  785. */
  786. static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
  787. int index, int pagenr, int bio_list_only)
  788. {
  789. int chunk_page;
  790. struct page *p = NULL;
  791. chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
  792. spin_lock_irq(&rbio->bio_list_lock);
  793. p = rbio->bio_pages[chunk_page];
  794. spin_unlock_irq(&rbio->bio_list_lock);
  795. if (p || bio_list_only)
  796. return p;
  797. return rbio->stripe_pages[chunk_page];
  798. }
  799. /*
  800. * number of pages we need for the entire stripe across all the
  801. * drives
  802. */
  803. static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
  804. {
  805. return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
  806. }
  807. /*
  808. * allocation and initial setup for the btrfs_raid_bio. Not
  809. * this does not allocate any pages for rbio->pages.
  810. */
  811. static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
  812. struct btrfs_bio *bbio,
  813. u64 stripe_len)
  814. {
  815. struct btrfs_raid_bio *rbio;
  816. int nr_data = 0;
  817. int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
  818. int num_pages = rbio_nr_pages(stripe_len, real_stripes);
  819. int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
  820. void *p;
  821. rbio = kzalloc(sizeof(*rbio) +
  822. sizeof(*rbio->stripe_pages) * num_pages +
  823. sizeof(*rbio->bio_pages) * num_pages +
  824. sizeof(*rbio->finish_pointers) * real_stripes +
  825. sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
  826. sizeof(*rbio->finish_pbitmap) *
  827. BITS_TO_LONGS(stripe_npages),
  828. GFP_NOFS);
  829. if (!rbio)
  830. return ERR_PTR(-ENOMEM);
  831. bio_list_init(&rbio->bio_list);
  832. INIT_LIST_HEAD(&rbio->plug_list);
  833. spin_lock_init(&rbio->bio_list_lock);
  834. INIT_LIST_HEAD(&rbio->stripe_cache);
  835. INIT_LIST_HEAD(&rbio->hash_list);
  836. rbio->bbio = bbio;
  837. rbio->fs_info = fs_info;
  838. rbio->stripe_len = stripe_len;
  839. rbio->nr_pages = num_pages;
  840. rbio->real_stripes = real_stripes;
  841. rbio->stripe_npages = stripe_npages;
  842. rbio->faila = -1;
  843. rbio->failb = -1;
  844. refcount_set(&rbio->refs, 1);
  845. atomic_set(&rbio->error, 0);
  846. atomic_set(&rbio->stripes_pending, 0);
  847. /*
  848. * the stripe_pages, bio_pages, etc arrays point to the extra
  849. * memory we allocated past the end of the rbio
  850. */
  851. p = rbio + 1;
  852. #define CONSUME_ALLOC(ptr, count) do { \
  853. ptr = p; \
  854. p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
  855. } while (0)
  856. CONSUME_ALLOC(rbio->stripe_pages, num_pages);
  857. CONSUME_ALLOC(rbio->bio_pages, num_pages);
  858. CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
  859. CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
  860. CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
  861. #undef CONSUME_ALLOC
  862. if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
  863. nr_data = real_stripes - 1;
  864. else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
  865. nr_data = real_stripes - 2;
  866. else
  867. BUG();
  868. rbio->nr_data = nr_data;
  869. return rbio;
  870. }
  871. /* allocate pages for all the stripes in the bio, including parity */
  872. static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
  873. {
  874. int i;
  875. struct page *page;
  876. for (i = 0; i < rbio->nr_pages; i++) {
  877. if (rbio->stripe_pages[i])
  878. continue;
  879. page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  880. if (!page)
  881. return -ENOMEM;
  882. rbio->stripe_pages[i] = page;
  883. }
  884. return 0;
  885. }
  886. /* only allocate pages for p/q stripes */
  887. static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
  888. {
  889. int i;
  890. struct page *page;
  891. i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
  892. for (; i < rbio->nr_pages; i++) {
  893. if (rbio->stripe_pages[i])
  894. continue;
  895. page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  896. if (!page)
  897. return -ENOMEM;
  898. rbio->stripe_pages[i] = page;
  899. }
  900. return 0;
  901. }
  902. /*
  903. * add a single page from a specific stripe into our list of bios for IO
  904. * this will try to merge into existing bios if possible, and returns
  905. * zero if all went well.
  906. */
  907. static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
  908. struct bio_list *bio_list,
  909. struct page *page,
  910. int stripe_nr,
  911. unsigned long page_index,
  912. unsigned long bio_max_len)
  913. {
  914. struct bio *last = bio_list->tail;
  915. u64 last_end = 0;
  916. int ret;
  917. struct bio *bio;
  918. struct btrfs_bio_stripe *stripe;
  919. u64 disk_start;
  920. stripe = &rbio->bbio->stripes[stripe_nr];
  921. disk_start = stripe->physical + (page_index << PAGE_SHIFT);
  922. /* if the device is missing, just fail this stripe */
  923. if (!stripe->dev->bdev)
  924. return fail_rbio_index(rbio, stripe_nr);
  925. /* see if we can add this page onto our existing bio */
  926. if (last) {
  927. last_end = (u64)last->bi_iter.bi_sector << 9;
  928. last_end += last->bi_iter.bi_size;
  929. /*
  930. * we can't merge these if they are from different
  931. * devices or if they are not contiguous
  932. */
  933. if (last_end == disk_start && stripe->dev->bdev &&
  934. !last->bi_status &&
  935. last->bi_disk == stripe->dev->bdev->bd_disk &&
  936. last->bi_partno == stripe->dev->bdev->bd_partno) {
  937. ret = bio_add_page(last, page, PAGE_SIZE, 0);
  938. if (ret == PAGE_SIZE)
  939. return 0;
  940. }
  941. }
  942. /* put a new bio on the list */
  943. bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
  944. bio->bi_iter.bi_size = 0;
  945. bio_set_dev(bio, stripe->dev->bdev);
  946. bio->bi_iter.bi_sector = disk_start >> 9;
  947. bio_add_page(bio, page, PAGE_SIZE, 0);
  948. bio_list_add(bio_list, bio);
  949. return 0;
  950. }
  951. /*
  952. * while we're doing the read/modify/write cycle, we could
  953. * have errors in reading pages off the disk. This checks
  954. * for errors and if we're not able to read the page it'll
  955. * trigger parity reconstruction. The rmw will be finished
  956. * after we've reconstructed the failed stripes
  957. */
  958. static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
  959. {
  960. if (rbio->faila >= 0 || rbio->failb >= 0) {
  961. BUG_ON(rbio->faila == rbio->real_stripes - 1);
  962. __raid56_parity_recover(rbio);
  963. } else {
  964. finish_rmw(rbio);
  965. }
  966. }
  967. /*
  968. * helper function to walk our bio list and populate the bio_pages array with
  969. * the result. This seems expensive, but it is faster than constantly
  970. * searching through the bio list as we setup the IO in finish_rmw or stripe
  971. * reconstruction.
  972. *
  973. * This must be called before you trust the answers from page_in_rbio
  974. */
  975. static void index_rbio_pages(struct btrfs_raid_bio *rbio)
  976. {
  977. struct bio *bio;
  978. u64 start;
  979. unsigned long stripe_offset;
  980. unsigned long page_index;
  981. spin_lock_irq(&rbio->bio_list_lock);
  982. bio_list_for_each(bio, &rbio->bio_list) {
  983. struct bio_vec bvec;
  984. struct bvec_iter iter;
  985. int i = 0;
  986. start = (u64)bio->bi_iter.bi_sector << 9;
  987. stripe_offset = start - rbio->bbio->raid_map[0];
  988. page_index = stripe_offset >> PAGE_SHIFT;
  989. if (bio_flagged(bio, BIO_CLONED))
  990. bio->bi_iter = btrfs_io_bio(bio)->iter;
  991. bio_for_each_segment(bvec, bio, iter) {
  992. rbio->bio_pages[page_index + i] = bvec.bv_page;
  993. i++;
  994. }
  995. }
  996. spin_unlock_irq(&rbio->bio_list_lock);
  997. }
  998. /*
  999. * this is called from one of two situations. We either
  1000. * have a full stripe from the higher layers, or we've read all
  1001. * the missing bits off disk.
  1002. *
  1003. * This will calculate the parity and then send down any
  1004. * changed blocks.
  1005. */
  1006. static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
  1007. {
  1008. struct btrfs_bio *bbio = rbio->bbio;
  1009. void **pointers = rbio->finish_pointers;
  1010. int nr_data = rbio->nr_data;
  1011. int stripe;
  1012. int pagenr;
  1013. bool has_qstripe;
  1014. struct bio_list bio_list;
  1015. struct bio *bio;
  1016. int ret;
  1017. bio_list_init(&bio_list);
  1018. if (rbio->real_stripes - rbio->nr_data == 1)
  1019. has_qstripe = false;
  1020. else if (rbio->real_stripes - rbio->nr_data == 2)
  1021. has_qstripe = true;
  1022. else
  1023. BUG();
  1024. /* at this point we either have a full stripe,
  1025. * or we've read the full stripe from the drive.
  1026. * recalculate the parity and write the new results.
  1027. *
  1028. * We're not allowed to add any new bios to the
  1029. * bio list here, anyone else that wants to
  1030. * change this stripe needs to do their own rmw.
  1031. */
  1032. spin_lock_irq(&rbio->bio_list_lock);
  1033. set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
  1034. spin_unlock_irq(&rbio->bio_list_lock);
  1035. atomic_set(&rbio->error, 0);
  1036. /*
  1037. * now that we've set rmw_locked, run through the
  1038. * bio list one last time and map the page pointers
  1039. *
  1040. * We don't cache full rbios because we're assuming
  1041. * the higher layers are unlikely to use this area of
  1042. * the disk again soon. If they do use it again,
  1043. * hopefully they will send another full bio.
  1044. */
  1045. index_rbio_pages(rbio);
  1046. if (!rbio_is_full(rbio))
  1047. cache_rbio_pages(rbio);
  1048. else
  1049. clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  1050. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1051. struct page *p;
  1052. /* first collect one page from each data stripe */
  1053. for (stripe = 0; stripe < nr_data; stripe++) {
  1054. p = page_in_rbio(rbio, stripe, pagenr, 0);
  1055. pointers[stripe] = kmap(p);
  1056. }
  1057. /* then add the parity stripe */
  1058. p = rbio_pstripe_page(rbio, pagenr);
  1059. SetPageUptodate(p);
  1060. pointers[stripe++] = kmap(p);
  1061. if (has_qstripe) {
  1062. /*
  1063. * raid6, add the qstripe and call the
  1064. * library function to fill in our p/q
  1065. */
  1066. p = rbio_qstripe_page(rbio, pagenr);
  1067. SetPageUptodate(p);
  1068. pointers[stripe++] = kmap(p);
  1069. raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
  1070. pointers);
  1071. } else {
  1072. /* raid5 */
  1073. copy_page(pointers[nr_data], pointers[0]);
  1074. run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
  1075. }
  1076. for (stripe = 0; stripe < rbio->real_stripes; stripe++)
  1077. kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
  1078. }
  1079. /*
  1080. * time to start writing. Make bios for everything from the
  1081. * higher layers (the bio_list in our rbio) and our p/q. Ignore
  1082. * everything else.
  1083. */
  1084. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1085. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1086. struct page *page;
  1087. if (stripe < rbio->nr_data) {
  1088. page = page_in_rbio(rbio, stripe, pagenr, 1);
  1089. if (!page)
  1090. continue;
  1091. } else {
  1092. page = rbio_stripe_page(rbio, stripe, pagenr);
  1093. }
  1094. ret = rbio_add_io_page(rbio, &bio_list,
  1095. page, stripe, pagenr, rbio->stripe_len);
  1096. if (ret)
  1097. goto cleanup;
  1098. }
  1099. }
  1100. if (likely(!bbio->num_tgtdevs))
  1101. goto write_data;
  1102. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1103. if (!bbio->tgtdev_map[stripe])
  1104. continue;
  1105. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1106. struct page *page;
  1107. if (stripe < rbio->nr_data) {
  1108. page = page_in_rbio(rbio, stripe, pagenr, 1);
  1109. if (!page)
  1110. continue;
  1111. } else {
  1112. page = rbio_stripe_page(rbio, stripe, pagenr);
  1113. }
  1114. ret = rbio_add_io_page(rbio, &bio_list, page,
  1115. rbio->bbio->tgtdev_map[stripe],
  1116. pagenr, rbio->stripe_len);
  1117. if (ret)
  1118. goto cleanup;
  1119. }
  1120. }
  1121. write_data:
  1122. atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
  1123. BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
  1124. while (1) {
  1125. bio = bio_list_pop(&bio_list);
  1126. if (!bio)
  1127. break;
  1128. bio->bi_private = rbio;
  1129. bio->bi_end_io = raid_write_end_io;
  1130. bio->bi_opf = REQ_OP_WRITE;
  1131. submit_bio(bio);
  1132. }
  1133. return;
  1134. cleanup:
  1135. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  1136. while ((bio = bio_list_pop(&bio_list)))
  1137. bio_put(bio);
  1138. }
  1139. /*
  1140. * helper to find the stripe number for a given bio. Used to figure out which
  1141. * stripe has failed. This expects the bio to correspond to a physical disk,
  1142. * so it looks up based on physical sector numbers.
  1143. */
  1144. static int find_bio_stripe(struct btrfs_raid_bio *rbio,
  1145. struct bio *bio)
  1146. {
  1147. u64 physical = bio->bi_iter.bi_sector;
  1148. u64 stripe_start;
  1149. int i;
  1150. struct btrfs_bio_stripe *stripe;
  1151. physical <<= 9;
  1152. for (i = 0; i < rbio->bbio->num_stripes; i++) {
  1153. stripe = &rbio->bbio->stripes[i];
  1154. stripe_start = stripe->physical;
  1155. if (physical >= stripe_start &&
  1156. physical < stripe_start + rbio->stripe_len &&
  1157. stripe->dev->bdev &&
  1158. bio->bi_disk == stripe->dev->bdev->bd_disk &&
  1159. bio->bi_partno == stripe->dev->bdev->bd_partno) {
  1160. return i;
  1161. }
  1162. }
  1163. return -1;
  1164. }
  1165. /*
  1166. * helper to find the stripe number for a given
  1167. * bio (before mapping). Used to figure out which stripe has
  1168. * failed. This looks up based on logical block numbers.
  1169. */
  1170. static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
  1171. struct bio *bio)
  1172. {
  1173. u64 logical = bio->bi_iter.bi_sector;
  1174. u64 stripe_start;
  1175. int i;
  1176. logical <<= 9;
  1177. for (i = 0; i < rbio->nr_data; i++) {
  1178. stripe_start = rbio->bbio->raid_map[i];
  1179. if (logical >= stripe_start &&
  1180. logical < stripe_start + rbio->stripe_len) {
  1181. return i;
  1182. }
  1183. }
  1184. return -1;
  1185. }
  1186. /*
  1187. * returns -EIO if we had too many failures
  1188. */
  1189. static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
  1190. {
  1191. unsigned long flags;
  1192. int ret = 0;
  1193. spin_lock_irqsave(&rbio->bio_list_lock, flags);
  1194. /* we already know this stripe is bad, move on */
  1195. if (rbio->faila == failed || rbio->failb == failed)
  1196. goto out;
  1197. if (rbio->faila == -1) {
  1198. /* first failure on this rbio */
  1199. rbio->faila = failed;
  1200. atomic_inc(&rbio->error);
  1201. } else if (rbio->failb == -1) {
  1202. /* second failure on this rbio */
  1203. rbio->failb = failed;
  1204. atomic_inc(&rbio->error);
  1205. } else {
  1206. ret = -EIO;
  1207. }
  1208. out:
  1209. spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
  1210. return ret;
  1211. }
  1212. /*
  1213. * helper to fail a stripe based on a physical disk
  1214. * bio.
  1215. */
  1216. static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
  1217. struct bio *bio)
  1218. {
  1219. int failed = find_bio_stripe(rbio, bio);
  1220. if (failed < 0)
  1221. return -EIO;
  1222. return fail_rbio_index(rbio, failed);
  1223. }
  1224. /*
  1225. * this sets each page in the bio uptodate. It should only be used on private
  1226. * rbio pages, nothing that comes in from the higher layers
  1227. */
  1228. static void set_bio_pages_uptodate(struct bio *bio)
  1229. {
  1230. struct bio_vec *bvec;
  1231. int i;
  1232. ASSERT(!bio_flagged(bio, BIO_CLONED));
  1233. bio_for_each_segment_all(bvec, bio, i)
  1234. SetPageUptodate(bvec->bv_page);
  1235. }
  1236. /*
  1237. * end io for the read phase of the rmw cycle. All the bios here are physical
  1238. * stripe bios we've read from the disk so we can recalculate the parity of the
  1239. * stripe.
  1240. *
  1241. * This will usually kick off finish_rmw once all the bios are read in, but it
  1242. * may trigger parity reconstruction if we had any errors along the way
  1243. */
  1244. static void raid_rmw_end_io(struct bio *bio)
  1245. {
  1246. struct btrfs_raid_bio *rbio = bio->bi_private;
  1247. if (bio->bi_status)
  1248. fail_bio_stripe(rbio, bio);
  1249. else
  1250. set_bio_pages_uptodate(bio);
  1251. bio_put(bio);
  1252. if (!atomic_dec_and_test(&rbio->stripes_pending))
  1253. return;
  1254. if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
  1255. goto cleanup;
  1256. /*
  1257. * this will normally call finish_rmw to start our write
  1258. * but if there are any failed stripes we'll reconstruct
  1259. * from parity first
  1260. */
  1261. validate_rbio_for_rmw(rbio);
  1262. return;
  1263. cleanup:
  1264. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  1265. }
  1266. /*
  1267. * the stripe must be locked by the caller. It will
  1268. * unlock after all the writes are done
  1269. */
  1270. static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
  1271. {
  1272. int bios_to_read = 0;
  1273. struct bio_list bio_list;
  1274. int ret;
  1275. int pagenr;
  1276. int stripe;
  1277. struct bio *bio;
  1278. bio_list_init(&bio_list);
  1279. ret = alloc_rbio_pages(rbio);
  1280. if (ret)
  1281. goto cleanup;
  1282. index_rbio_pages(rbio);
  1283. atomic_set(&rbio->error, 0);
  1284. /*
  1285. * build a list of bios to read all the missing parts of this
  1286. * stripe
  1287. */
  1288. for (stripe = 0; stripe < rbio->nr_data; stripe++) {
  1289. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1290. struct page *page;
  1291. /*
  1292. * we want to find all the pages missing from
  1293. * the rbio and read them from the disk. If
  1294. * page_in_rbio finds a page in the bio list
  1295. * we don't need to read it off the stripe.
  1296. */
  1297. page = page_in_rbio(rbio, stripe, pagenr, 1);
  1298. if (page)
  1299. continue;
  1300. page = rbio_stripe_page(rbio, stripe, pagenr);
  1301. /*
  1302. * the bio cache may have handed us an uptodate
  1303. * page. If so, be happy and use it
  1304. */
  1305. if (PageUptodate(page))
  1306. continue;
  1307. ret = rbio_add_io_page(rbio, &bio_list, page,
  1308. stripe, pagenr, rbio->stripe_len);
  1309. if (ret)
  1310. goto cleanup;
  1311. }
  1312. }
  1313. bios_to_read = bio_list_size(&bio_list);
  1314. if (!bios_to_read) {
  1315. /*
  1316. * this can happen if others have merged with
  1317. * us, it means there is nothing left to read.
  1318. * But if there are missing devices it may not be
  1319. * safe to do the full stripe write yet.
  1320. */
  1321. goto finish;
  1322. }
  1323. /*
  1324. * the bbio may be freed once we submit the last bio. Make sure
  1325. * not to touch it after that
  1326. */
  1327. atomic_set(&rbio->stripes_pending, bios_to_read);
  1328. while (1) {
  1329. bio = bio_list_pop(&bio_list);
  1330. if (!bio)
  1331. break;
  1332. bio->bi_private = rbio;
  1333. bio->bi_end_io = raid_rmw_end_io;
  1334. bio->bi_opf = REQ_OP_READ;
  1335. btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
  1336. submit_bio(bio);
  1337. }
  1338. /* the actual write will happen once the reads are done */
  1339. return 0;
  1340. cleanup:
  1341. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  1342. while ((bio = bio_list_pop(&bio_list)))
  1343. bio_put(bio);
  1344. return -EIO;
  1345. finish:
  1346. validate_rbio_for_rmw(rbio);
  1347. return 0;
  1348. }
  1349. /*
  1350. * if the upper layers pass in a full stripe, we thank them by only allocating
  1351. * enough pages to hold the parity, and sending it all down quickly.
  1352. */
  1353. static int full_stripe_write(struct btrfs_raid_bio *rbio)
  1354. {
  1355. int ret;
  1356. ret = alloc_rbio_parity_pages(rbio);
  1357. if (ret) {
  1358. __free_raid_bio(rbio);
  1359. return ret;
  1360. }
  1361. ret = lock_stripe_add(rbio);
  1362. if (ret == 0)
  1363. finish_rmw(rbio);
  1364. return 0;
  1365. }
  1366. /*
  1367. * partial stripe writes get handed over to async helpers.
  1368. * We're really hoping to merge a few more writes into this
  1369. * rbio before calculating new parity
  1370. */
  1371. static int partial_stripe_write(struct btrfs_raid_bio *rbio)
  1372. {
  1373. int ret;
  1374. ret = lock_stripe_add(rbio);
  1375. if (ret == 0)
  1376. start_async_work(rbio, rmw_work);
  1377. return 0;
  1378. }
  1379. /*
  1380. * sometimes while we were reading from the drive to
  1381. * recalculate parity, enough new bios come into create
  1382. * a full stripe. So we do a check here to see if we can
  1383. * go directly to finish_rmw
  1384. */
  1385. static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
  1386. {
  1387. /* head off into rmw land if we don't have a full stripe */
  1388. if (!rbio_is_full(rbio))
  1389. return partial_stripe_write(rbio);
  1390. return full_stripe_write(rbio);
  1391. }
  1392. /*
  1393. * We use plugging call backs to collect full stripes.
  1394. * Any time we get a partial stripe write while plugged
  1395. * we collect it into a list. When the unplug comes down,
  1396. * we sort the list by logical block number and merge
  1397. * everything we can into the same rbios
  1398. */
  1399. struct btrfs_plug_cb {
  1400. struct blk_plug_cb cb;
  1401. struct btrfs_fs_info *info;
  1402. struct list_head rbio_list;
  1403. struct btrfs_work work;
  1404. };
  1405. /*
  1406. * rbios on the plug list are sorted for easier merging.
  1407. */
  1408. static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
  1409. {
  1410. struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
  1411. plug_list);
  1412. struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
  1413. plug_list);
  1414. u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
  1415. u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
  1416. if (a_sector < b_sector)
  1417. return -1;
  1418. if (a_sector > b_sector)
  1419. return 1;
  1420. return 0;
  1421. }
  1422. static void run_plug(struct btrfs_plug_cb *plug)
  1423. {
  1424. struct btrfs_raid_bio *cur;
  1425. struct btrfs_raid_bio *last = NULL;
  1426. /*
  1427. * sort our plug list then try to merge
  1428. * everything we can in hopes of creating full
  1429. * stripes.
  1430. */
  1431. list_sort(NULL, &plug->rbio_list, plug_cmp);
  1432. while (!list_empty(&plug->rbio_list)) {
  1433. cur = list_entry(plug->rbio_list.next,
  1434. struct btrfs_raid_bio, plug_list);
  1435. list_del_init(&cur->plug_list);
  1436. if (rbio_is_full(cur)) {
  1437. int ret;
  1438. /* we have a full stripe, send it down */
  1439. ret = full_stripe_write(cur);
  1440. BUG_ON(ret);
  1441. continue;
  1442. }
  1443. if (last) {
  1444. if (rbio_can_merge(last, cur)) {
  1445. merge_rbio(last, cur);
  1446. __free_raid_bio(cur);
  1447. continue;
  1448. }
  1449. __raid56_parity_write(last);
  1450. }
  1451. last = cur;
  1452. }
  1453. if (last) {
  1454. __raid56_parity_write(last);
  1455. }
  1456. kfree(plug);
  1457. }
  1458. /*
  1459. * if the unplug comes from schedule, we have to push the
  1460. * work off to a helper thread
  1461. */
  1462. static void unplug_work(struct btrfs_work *work)
  1463. {
  1464. struct btrfs_plug_cb *plug;
  1465. plug = container_of(work, struct btrfs_plug_cb, work);
  1466. run_plug(plug);
  1467. }
  1468. static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
  1469. {
  1470. struct btrfs_plug_cb *plug;
  1471. plug = container_of(cb, struct btrfs_plug_cb, cb);
  1472. if (from_schedule) {
  1473. btrfs_init_work(&plug->work, btrfs_rmw_helper,
  1474. unplug_work, NULL, NULL);
  1475. btrfs_queue_work(plug->info->rmw_workers,
  1476. &plug->work);
  1477. return;
  1478. }
  1479. run_plug(plug);
  1480. }
  1481. /*
  1482. * our main entry point for writes from the rest of the FS.
  1483. */
  1484. int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
  1485. struct btrfs_bio *bbio, u64 stripe_len)
  1486. {
  1487. struct btrfs_raid_bio *rbio;
  1488. struct btrfs_plug_cb *plug = NULL;
  1489. struct blk_plug_cb *cb;
  1490. int ret;
  1491. rbio = alloc_rbio(fs_info, bbio, stripe_len);
  1492. if (IS_ERR(rbio)) {
  1493. btrfs_put_bbio(bbio);
  1494. return PTR_ERR(rbio);
  1495. }
  1496. bio_list_add(&rbio->bio_list, bio);
  1497. rbio->bio_list_bytes = bio->bi_iter.bi_size;
  1498. rbio->operation = BTRFS_RBIO_WRITE;
  1499. btrfs_bio_counter_inc_noblocked(fs_info);
  1500. rbio->generic_bio_cnt = 1;
  1501. /*
  1502. * don't plug on full rbios, just get them out the door
  1503. * as quickly as we can
  1504. */
  1505. if (rbio_is_full(rbio)) {
  1506. ret = full_stripe_write(rbio);
  1507. if (ret)
  1508. btrfs_bio_counter_dec(fs_info);
  1509. return ret;
  1510. }
  1511. cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
  1512. if (cb) {
  1513. plug = container_of(cb, struct btrfs_plug_cb, cb);
  1514. if (!plug->info) {
  1515. plug->info = fs_info;
  1516. INIT_LIST_HEAD(&plug->rbio_list);
  1517. }
  1518. list_add_tail(&rbio->plug_list, &plug->rbio_list);
  1519. ret = 0;
  1520. } else {
  1521. ret = __raid56_parity_write(rbio);
  1522. if (ret)
  1523. btrfs_bio_counter_dec(fs_info);
  1524. }
  1525. return ret;
  1526. }
  1527. /*
  1528. * all parity reconstruction happens here. We've read in everything
  1529. * we can find from the drives and this does the heavy lifting of
  1530. * sorting the good from the bad.
  1531. */
  1532. static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
  1533. {
  1534. int pagenr, stripe;
  1535. void **pointers;
  1536. int faila = -1, failb = -1;
  1537. struct page *page;
  1538. blk_status_t err;
  1539. int i;
  1540. pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
  1541. if (!pointers) {
  1542. err = BLK_STS_RESOURCE;
  1543. goto cleanup_io;
  1544. }
  1545. faila = rbio->faila;
  1546. failb = rbio->failb;
  1547. if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1548. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
  1549. spin_lock_irq(&rbio->bio_list_lock);
  1550. set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
  1551. spin_unlock_irq(&rbio->bio_list_lock);
  1552. }
  1553. index_rbio_pages(rbio);
  1554. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1555. /*
  1556. * Now we just use bitmap to mark the horizontal stripes in
  1557. * which we have data when doing parity scrub.
  1558. */
  1559. if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
  1560. !test_bit(pagenr, rbio->dbitmap))
  1561. continue;
  1562. /* setup our array of pointers with pages
  1563. * from each stripe
  1564. */
  1565. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1566. /*
  1567. * if we're rebuilding a read, we have to use
  1568. * pages from the bio list
  1569. */
  1570. if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1571. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
  1572. (stripe == faila || stripe == failb)) {
  1573. page = page_in_rbio(rbio, stripe, pagenr, 0);
  1574. } else {
  1575. page = rbio_stripe_page(rbio, stripe, pagenr);
  1576. }
  1577. pointers[stripe] = kmap(page);
  1578. }
  1579. /* all raid6 handling here */
  1580. if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
  1581. /*
  1582. * single failure, rebuild from parity raid5
  1583. * style
  1584. */
  1585. if (failb < 0) {
  1586. if (faila == rbio->nr_data) {
  1587. /*
  1588. * Just the P stripe has failed, without
  1589. * a bad data or Q stripe.
  1590. * TODO, we should redo the xor here.
  1591. */
  1592. err = BLK_STS_IOERR;
  1593. goto cleanup;
  1594. }
  1595. /*
  1596. * a single failure in raid6 is rebuilt
  1597. * in the pstripe code below
  1598. */
  1599. goto pstripe;
  1600. }
  1601. /* make sure our ps and qs are in order */
  1602. if (faila > failb) {
  1603. int tmp = failb;
  1604. failb = faila;
  1605. faila = tmp;
  1606. }
  1607. /* if the q stripe is failed, do a pstripe reconstruction
  1608. * from the xors.
  1609. * If both the q stripe and the P stripe are failed, we're
  1610. * here due to a crc mismatch and we can't give them the
  1611. * data they want
  1612. */
  1613. if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
  1614. if (rbio->bbio->raid_map[faila] ==
  1615. RAID5_P_STRIPE) {
  1616. err = BLK_STS_IOERR;
  1617. goto cleanup;
  1618. }
  1619. /*
  1620. * otherwise we have one bad data stripe and
  1621. * a good P stripe. raid5!
  1622. */
  1623. goto pstripe;
  1624. }
  1625. if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
  1626. raid6_datap_recov(rbio->real_stripes,
  1627. PAGE_SIZE, faila, pointers);
  1628. } else {
  1629. raid6_2data_recov(rbio->real_stripes,
  1630. PAGE_SIZE, faila, failb,
  1631. pointers);
  1632. }
  1633. } else {
  1634. void *p;
  1635. /* rebuild from P stripe here (raid5 or raid6) */
  1636. BUG_ON(failb != -1);
  1637. pstripe:
  1638. /* Copy parity block into failed block to start with */
  1639. copy_page(pointers[faila], pointers[rbio->nr_data]);
  1640. /* rearrange the pointer array */
  1641. p = pointers[faila];
  1642. for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
  1643. pointers[stripe] = pointers[stripe + 1];
  1644. pointers[rbio->nr_data - 1] = p;
  1645. /* xor in the rest */
  1646. run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
  1647. }
  1648. /* if we're doing this rebuild as part of an rmw, go through
  1649. * and set all of our private rbio pages in the
  1650. * failed stripes as uptodate. This way finish_rmw will
  1651. * know they can be trusted. If this was a read reconstruction,
  1652. * other endio functions will fiddle the uptodate bits
  1653. */
  1654. if (rbio->operation == BTRFS_RBIO_WRITE) {
  1655. for (i = 0; i < rbio->stripe_npages; i++) {
  1656. if (faila != -1) {
  1657. page = rbio_stripe_page(rbio, faila, i);
  1658. SetPageUptodate(page);
  1659. }
  1660. if (failb != -1) {
  1661. page = rbio_stripe_page(rbio, failb, i);
  1662. SetPageUptodate(page);
  1663. }
  1664. }
  1665. }
  1666. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1667. /*
  1668. * if we're rebuilding a read, we have to use
  1669. * pages from the bio list
  1670. */
  1671. if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1672. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
  1673. (stripe == faila || stripe == failb)) {
  1674. page = page_in_rbio(rbio, stripe, pagenr, 0);
  1675. } else {
  1676. page = rbio_stripe_page(rbio, stripe, pagenr);
  1677. }
  1678. kunmap(page);
  1679. }
  1680. }
  1681. err = BLK_STS_OK;
  1682. cleanup:
  1683. kfree(pointers);
  1684. cleanup_io:
  1685. /*
  1686. * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
  1687. * valid rbio which is consistent with ondisk content, thus such a
  1688. * valid rbio can be cached to avoid further disk reads.
  1689. */
  1690. if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1691. rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
  1692. /*
  1693. * - In case of two failures, where rbio->failb != -1:
  1694. *
  1695. * Do not cache this rbio since the above read reconstruction
  1696. * (raid6_datap_recov() or raid6_2data_recov()) may have
  1697. * changed some content of stripes which are not identical to
  1698. * on-disk content any more, otherwise, a later write/recover
  1699. * may steal stripe_pages from this rbio and end up with
  1700. * corruptions or rebuild failures.
  1701. *
  1702. * - In case of single failure, where rbio->failb == -1:
  1703. *
  1704. * Cache this rbio iff the above read reconstruction is
  1705. * excuted without problems.
  1706. */
  1707. if (err == BLK_STS_OK && rbio->failb < 0)
  1708. cache_rbio_pages(rbio);
  1709. else
  1710. clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  1711. rbio_orig_end_io(rbio, err);
  1712. } else if (err == BLK_STS_OK) {
  1713. rbio->faila = -1;
  1714. rbio->failb = -1;
  1715. if (rbio->operation == BTRFS_RBIO_WRITE)
  1716. finish_rmw(rbio);
  1717. else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
  1718. finish_parity_scrub(rbio, 0);
  1719. else
  1720. BUG();
  1721. } else {
  1722. rbio_orig_end_io(rbio, err);
  1723. }
  1724. }
  1725. /*
  1726. * This is called only for stripes we've read from disk to
  1727. * reconstruct the parity.
  1728. */
  1729. static void raid_recover_end_io(struct bio *bio)
  1730. {
  1731. struct btrfs_raid_bio *rbio = bio->bi_private;
  1732. /*
  1733. * we only read stripe pages off the disk, set them
  1734. * up to date if there were no errors
  1735. */
  1736. if (bio->bi_status)
  1737. fail_bio_stripe(rbio, bio);
  1738. else
  1739. set_bio_pages_uptodate(bio);
  1740. bio_put(bio);
  1741. if (!atomic_dec_and_test(&rbio->stripes_pending))
  1742. return;
  1743. if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
  1744. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  1745. else
  1746. __raid_recover_end_io(rbio);
  1747. }
  1748. /*
  1749. * reads everything we need off the disk to reconstruct
  1750. * the parity. endio handlers trigger final reconstruction
  1751. * when the IO is done.
  1752. *
  1753. * This is used both for reads from the higher layers and for
  1754. * parity construction required to finish a rmw cycle.
  1755. */
  1756. static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
  1757. {
  1758. int bios_to_read = 0;
  1759. struct bio_list bio_list;
  1760. int ret;
  1761. int pagenr;
  1762. int stripe;
  1763. struct bio *bio;
  1764. bio_list_init(&bio_list);
  1765. ret = alloc_rbio_pages(rbio);
  1766. if (ret)
  1767. goto cleanup;
  1768. atomic_set(&rbio->error, 0);
  1769. /*
  1770. * read everything that hasn't failed. Thanks to the
  1771. * stripe cache, it is possible that some or all of these
  1772. * pages are going to be uptodate.
  1773. */
  1774. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  1775. if (rbio->faila == stripe || rbio->failb == stripe) {
  1776. atomic_inc(&rbio->error);
  1777. continue;
  1778. }
  1779. for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
  1780. struct page *p;
  1781. /*
  1782. * the rmw code may have already read this
  1783. * page in
  1784. */
  1785. p = rbio_stripe_page(rbio, stripe, pagenr);
  1786. if (PageUptodate(p))
  1787. continue;
  1788. ret = rbio_add_io_page(rbio, &bio_list,
  1789. rbio_stripe_page(rbio, stripe, pagenr),
  1790. stripe, pagenr, rbio->stripe_len);
  1791. if (ret < 0)
  1792. goto cleanup;
  1793. }
  1794. }
  1795. bios_to_read = bio_list_size(&bio_list);
  1796. if (!bios_to_read) {
  1797. /*
  1798. * we might have no bios to read just because the pages
  1799. * were up to date, or we might have no bios to read because
  1800. * the devices were gone.
  1801. */
  1802. if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
  1803. __raid_recover_end_io(rbio);
  1804. goto out;
  1805. } else {
  1806. goto cleanup;
  1807. }
  1808. }
  1809. /*
  1810. * the bbio may be freed once we submit the last bio. Make sure
  1811. * not to touch it after that
  1812. */
  1813. atomic_set(&rbio->stripes_pending, bios_to_read);
  1814. while (1) {
  1815. bio = bio_list_pop(&bio_list);
  1816. if (!bio)
  1817. break;
  1818. bio->bi_private = rbio;
  1819. bio->bi_end_io = raid_recover_end_io;
  1820. bio->bi_opf = REQ_OP_READ;
  1821. btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
  1822. submit_bio(bio);
  1823. }
  1824. out:
  1825. return 0;
  1826. cleanup:
  1827. if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
  1828. rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
  1829. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  1830. while ((bio = bio_list_pop(&bio_list)))
  1831. bio_put(bio);
  1832. return -EIO;
  1833. }
  1834. /*
  1835. * the main entry point for reads from the higher layers. This
  1836. * is really only called when the normal read path had a failure,
  1837. * so we assume the bio they send down corresponds to a failed part
  1838. * of the drive.
  1839. */
  1840. int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
  1841. struct btrfs_bio *bbio, u64 stripe_len,
  1842. int mirror_num, int generic_io)
  1843. {
  1844. struct btrfs_raid_bio *rbio;
  1845. int ret;
  1846. if (generic_io) {
  1847. ASSERT(bbio->mirror_num == mirror_num);
  1848. btrfs_io_bio(bio)->mirror_num = mirror_num;
  1849. }
  1850. rbio = alloc_rbio(fs_info, bbio, stripe_len);
  1851. if (IS_ERR(rbio)) {
  1852. if (generic_io)
  1853. btrfs_put_bbio(bbio);
  1854. return PTR_ERR(rbio);
  1855. }
  1856. rbio->operation = BTRFS_RBIO_READ_REBUILD;
  1857. bio_list_add(&rbio->bio_list, bio);
  1858. rbio->bio_list_bytes = bio->bi_iter.bi_size;
  1859. rbio->faila = find_logical_bio_stripe(rbio, bio);
  1860. if (rbio->faila == -1) {
  1861. btrfs_warn(fs_info,
  1862. "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
  1863. __func__, (u64)bio->bi_iter.bi_sector << 9,
  1864. (u64)bio->bi_iter.bi_size, bbio->map_type);
  1865. if (generic_io)
  1866. btrfs_put_bbio(bbio);
  1867. kfree(rbio);
  1868. return -EIO;
  1869. }
  1870. if (generic_io) {
  1871. btrfs_bio_counter_inc_noblocked(fs_info);
  1872. rbio->generic_bio_cnt = 1;
  1873. } else {
  1874. btrfs_get_bbio(bbio);
  1875. }
  1876. /*
  1877. * Loop retry:
  1878. * for 'mirror == 2', reconstruct from all other stripes.
  1879. * for 'mirror_num > 2', select a stripe to fail on every retry.
  1880. */
  1881. if (mirror_num > 2) {
  1882. /*
  1883. * 'mirror == 3' is to fail the p stripe and
  1884. * reconstruct from the q stripe. 'mirror > 3' is to
  1885. * fail a data stripe and reconstruct from p+q stripe.
  1886. */
  1887. rbio->failb = rbio->real_stripes - (mirror_num - 1);
  1888. ASSERT(rbio->failb > 0);
  1889. if (rbio->failb <= rbio->faila)
  1890. rbio->failb--;
  1891. }
  1892. ret = lock_stripe_add(rbio);
  1893. /*
  1894. * __raid56_parity_recover will end the bio with
  1895. * any errors it hits. We don't want to return
  1896. * its error value up the stack because our caller
  1897. * will end up calling bio_endio with any nonzero
  1898. * return
  1899. */
  1900. if (ret == 0)
  1901. __raid56_parity_recover(rbio);
  1902. /*
  1903. * our rbio has been added to the list of
  1904. * rbios that will be handled after the
  1905. * currently lock owner is done
  1906. */
  1907. return 0;
  1908. }
  1909. static void rmw_work(struct btrfs_work *work)
  1910. {
  1911. struct btrfs_raid_bio *rbio;
  1912. rbio = container_of(work, struct btrfs_raid_bio, work);
  1913. raid56_rmw_stripe(rbio);
  1914. }
  1915. static void read_rebuild_work(struct btrfs_work *work)
  1916. {
  1917. struct btrfs_raid_bio *rbio;
  1918. rbio = container_of(work, struct btrfs_raid_bio, work);
  1919. __raid56_parity_recover(rbio);
  1920. }
  1921. /*
  1922. * The following code is used to scrub/replace the parity stripe
  1923. *
  1924. * Caller must have already increased bio_counter for getting @bbio.
  1925. *
  1926. * Note: We need make sure all the pages that add into the scrub/replace
  1927. * raid bio are correct and not be changed during the scrub/replace. That
  1928. * is those pages just hold metadata or file data with checksum.
  1929. */
  1930. struct btrfs_raid_bio *
  1931. raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
  1932. struct btrfs_bio *bbio, u64 stripe_len,
  1933. struct btrfs_device *scrub_dev,
  1934. unsigned long *dbitmap, int stripe_nsectors)
  1935. {
  1936. struct btrfs_raid_bio *rbio;
  1937. int i;
  1938. rbio = alloc_rbio(fs_info, bbio, stripe_len);
  1939. if (IS_ERR(rbio))
  1940. return NULL;
  1941. bio_list_add(&rbio->bio_list, bio);
  1942. /*
  1943. * This is a special bio which is used to hold the completion handler
  1944. * and make the scrub rbio is similar to the other types
  1945. */
  1946. ASSERT(!bio->bi_iter.bi_size);
  1947. rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
  1948. /*
  1949. * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
  1950. * to the end position, so this search can start from the first parity
  1951. * stripe.
  1952. */
  1953. for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
  1954. if (bbio->stripes[i].dev == scrub_dev) {
  1955. rbio->scrubp = i;
  1956. break;
  1957. }
  1958. }
  1959. ASSERT(i < rbio->real_stripes);
  1960. /* Now we just support the sectorsize equals to page size */
  1961. ASSERT(fs_info->sectorsize == PAGE_SIZE);
  1962. ASSERT(rbio->stripe_npages == stripe_nsectors);
  1963. bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
  1964. /*
  1965. * We have already increased bio_counter when getting bbio, record it
  1966. * so we can free it at rbio_orig_end_io().
  1967. */
  1968. rbio->generic_bio_cnt = 1;
  1969. return rbio;
  1970. }
  1971. /* Used for both parity scrub and missing. */
  1972. void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
  1973. u64 logical)
  1974. {
  1975. int stripe_offset;
  1976. int index;
  1977. ASSERT(logical >= rbio->bbio->raid_map[0]);
  1978. ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
  1979. rbio->stripe_len * rbio->nr_data);
  1980. stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
  1981. index = stripe_offset >> PAGE_SHIFT;
  1982. rbio->bio_pages[index] = page;
  1983. }
  1984. /*
  1985. * We just scrub the parity that we have correct data on the same horizontal,
  1986. * so we needn't allocate all pages for all the stripes.
  1987. */
  1988. static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
  1989. {
  1990. int i;
  1991. int bit;
  1992. int index;
  1993. struct page *page;
  1994. for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
  1995. for (i = 0; i < rbio->real_stripes; i++) {
  1996. index = i * rbio->stripe_npages + bit;
  1997. if (rbio->stripe_pages[index])
  1998. continue;
  1999. page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  2000. if (!page)
  2001. return -ENOMEM;
  2002. rbio->stripe_pages[index] = page;
  2003. }
  2004. }
  2005. return 0;
  2006. }
  2007. static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
  2008. int need_check)
  2009. {
  2010. struct btrfs_bio *bbio = rbio->bbio;
  2011. void **pointers = rbio->finish_pointers;
  2012. unsigned long *pbitmap = rbio->finish_pbitmap;
  2013. int nr_data = rbio->nr_data;
  2014. int stripe;
  2015. int pagenr;
  2016. bool has_qstripe;
  2017. struct page *p_page = NULL;
  2018. struct page *q_page = NULL;
  2019. struct bio_list bio_list;
  2020. struct bio *bio;
  2021. int is_replace = 0;
  2022. int ret;
  2023. bio_list_init(&bio_list);
  2024. if (rbio->real_stripes - rbio->nr_data == 1)
  2025. has_qstripe = false;
  2026. else if (rbio->real_stripes - rbio->nr_data == 2)
  2027. has_qstripe = true;
  2028. else
  2029. BUG();
  2030. if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
  2031. is_replace = 1;
  2032. bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
  2033. }
  2034. /*
  2035. * Because the higher layers(scrubber) are unlikely to
  2036. * use this area of the disk again soon, so don't cache
  2037. * it.
  2038. */
  2039. clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
  2040. if (!need_check)
  2041. goto writeback;
  2042. p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  2043. if (!p_page)
  2044. goto cleanup;
  2045. SetPageUptodate(p_page);
  2046. if (has_qstripe) {
  2047. /* RAID6, allocate and map temp space for the Q stripe */
  2048. q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  2049. if (!q_page) {
  2050. __free_page(p_page);
  2051. goto cleanup;
  2052. }
  2053. SetPageUptodate(q_page);
  2054. pointers[rbio->real_stripes - 1] = kmap(q_page);
  2055. }
  2056. atomic_set(&rbio->error, 0);
  2057. /* Map the parity stripe just once */
  2058. pointers[nr_data] = kmap(p_page);
  2059. for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
  2060. struct page *p;
  2061. void *parity;
  2062. /* first collect one page from each data stripe */
  2063. for (stripe = 0; stripe < nr_data; stripe++) {
  2064. p = page_in_rbio(rbio, stripe, pagenr, 0);
  2065. pointers[stripe] = kmap(p);
  2066. }
  2067. if (has_qstripe) {
  2068. /* RAID6, call the library function to fill in our P/Q */
  2069. raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
  2070. pointers);
  2071. } else {
  2072. /* raid5 */
  2073. copy_page(pointers[nr_data], pointers[0]);
  2074. run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
  2075. }
  2076. /* Check scrubbing parity and repair it */
  2077. p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
  2078. parity = kmap(p);
  2079. if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
  2080. copy_page(parity, pointers[rbio->scrubp]);
  2081. else
  2082. /* Parity is right, needn't writeback */
  2083. bitmap_clear(rbio->dbitmap, pagenr, 1);
  2084. kunmap(p);
  2085. for (stripe = 0; stripe < nr_data; stripe++)
  2086. kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
  2087. }
  2088. kunmap(p_page);
  2089. __free_page(p_page);
  2090. if (q_page) {
  2091. kunmap(q_page);
  2092. __free_page(q_page);
  2093. }
  2094. writeback:
  2095. /*
  2096. * time to start writing. Make bios for everything from the
  2097. * higher layers (the bio_list in our rbio) and our p/q. Ignore
  2098. * everything else.
  2099. */
  2100. for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
  2101. struct page *page;
  2102. page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
  2103. ret = rbio_add_io_page(rbio, &bio_list,
  2104. page, rbio->scrubp, pagenr, rbio->stripe_len);
  2105. if (ret)
  2106. goto cleanup;
  2107. }
  2108. if (!is_replace)
  2109. goto submit_write;
  2110. for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
  2111. struct page *page;
  2112. page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
  2113. ret = rbio_add_io_page(rbio, &bio_list, page,
  2114. bbio->tgtdev_map[rbio->scrubp],
  2115. pagenr, rbio->stripe_len);
  2116. if (ret)
  2117. goto cleanup;
  2118. }
  2119. submit_write:
  2120. nr_data = bio_list_size(&bio_list);
  2121. if (!nr_data) {
  2122. /* Every parity is right */
  2123. rbio_orig_end_io(rbio, BLK_STS_OK);
  2124. return;
  2125. }
  2126. atomic_set(&rbio->stripes_pending, nr_data);
  2127. while (1) {
  2128. bio = bio_list_pop(&bio_list);
  2129. if (!bio)
  2130. break;
  2131. bio->bi_private = rbio;
  2132. bio->bi_end_io = raid_write_end_io;
  2133. bio->bi_opf = REQ_OP_WRITE;
  2134. submit_bio(bio);
  2135. }
  2136. return;
  2137. cleanup:
  2138. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  2139. while ((bio = bio_list_pop(&bio_list)))
  2140. bio_put(bio);
  2141. }
  2142. static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
  2143. {
  2144. if (stripe >= 0 && stripe < rbio->nr_data)
  2145. return 1;
  2146. return 0;
  2147. }
  2148. /*
  2149. * While we're doing the parity check and repair, we could have errors
  2150. * in reading pages off the disk. This checks for errors and if we're
  2151. * not able to read the page it'll trigger parity reconstruction. The
  2152. * parity scrub will be finished after we've reconstructed the failed
  2153. * stripes
  2154. */
  2155. static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
  2156. {
  2157. if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
  2158. goto cleanup;
  2159. if (rbio->faila >= 0 || rbio->failb >= 0) {
  2160. int dfail = 0, failp = -1;
  2161. if (is_data_stripe(rbio, rbio->faila))
  2162. dfail++;
  2163. else if (is_parity_stripe(rbio->faila))
  2164. failp = rbio->faila;
  2165. if (is_data_stripe(rbio, rbio->failb))
  2166. dfail++;
  2167. else if (is_parity_stripe(rbio->failb))
  2168. failp = rbio->failb;
  2169. /*
  2170. * Because we can not use a scrubbing parity to repair
  2171. * the data, so the capability of the repair is declined.
  2172. * (In the case of RAID5, we can not repair anything)
  2173. */
  2174. if (dfail > rbio->bbio->max_errors - 1)
  2175. goto cleanup;
  2176. /*
  2177. * If all data is good, only parity is correctly, just
  2178. * repair the parity.
  2179. */
  2180. if (dfail == 0) {
  2181. finish_parity_scrub(rbio, 0);
  2182. return;
  2183. }
  2184. /*
  2185. * Here means we got one corrupted data stripe and one
  2186. * corrupted parity on RAID6, if the corrupted parity
  2187. * is scrubbing parity, luckily, use the other one to repair
  2188. * the data, or we can not repair the data stripe.
  2189. */
  2190. if (failp != rbio->scrubp)
  2191. goto cleanup;
  2192. __raid_recover_end_io(rbio);
  2193. } else {
  2194. finish_parity_scrub(rbio, 1);
  2195. }
  2196. return;
  2197. cleanup:
  2198. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  2199. }
  2200. /*
  2201. * end io for the read phase of the rmw cycle. All the bios here are physical
  2202. * stripe bios we've read from the disk so we can recalculate the parity of the
  2203. * stripe.
  2204. *
  2205. * This will usually kick off finish_rmw once all the bios are read in, but it
  2206. * may trigger parity reconstruction if we had any errors along the way
  2207. */
  2208. static void raid56_parity_scrub_end_io(struct bio *bio)
  2209. {
  2210. struct btrfs_raid_bio *rbio = bio->bi_private;
  2211. if (bio->bi_status)
  2212. fail_bio_stripe(rbio, bio);
  2213. else
  2214. set_bio_pages_uptodate(bio);
  2215. bio_put(bio);
  2216. if (!atomic_dec_and_test(&rbio->stripes_pending))
  2217. return;
  2218. /*
  2219. * this will normally call finish_rmw to start our write
  2220. * but if there are any failed stripes we'll reconstruct
  2221. * from parity first
  2222. */
  2223. validate_rbio_for_parity_scrub(rbio);
  2224. }
  2225. static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
  2226. {
  2227. int bios_to_read = 0;
  2228. struct bio_list bio_list;
  2229. int ret;
  2230. int pagenr;
  2231. int stripe;
  2232. struct bio *bio;
  2233. bio_list_init(&bio_list);
  2234. ret = alloc_rbio_essential_pages(rbio);
  2235. if (ret)
  2236. goto cleanup;
  2237. atomic_set(&rbio->error, 0);
  2238. /*
  2239. * build a list of bios to read all the missing parts of this
  2240. * stripe
  2241. */
  2242. for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
  2243. for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
  2244. struct page *page;
  2245. /*
  2246. * we want to find all the pages missing from
  2247. * the rbio and read them from the disk. If
  2248. * page_in_rbio finds a page in the bio list
  2249. * we don't need to read it off the stripe.
  2250. */
  2251. page = page_in_rbio(rbio, stripe, pagenr, 1);
  2252. if (page)
  2253. continue;
  2254. page = rbio_stripe_page(rbio, stripe, pagenr);
  2255. /*
  2256. * the bio cache may have handed us an uptodate
  2257. * page. If so, be happy and use it
  2258. */
  2259. if (PageUptodate(page))
  2260. continue;
  2261. ret = rbio_add_io_page(rbio, &bio_list, page,
  2262. stripe, pagenr, rbio->stripe_len);
  2263. if (ret)
  2264. goto cleanup;
  2265. }
  2266. }
  2267. bios_to_read = bio_list_size(&bio_list);
  2268. if (!bios_to_read) {
  2269. /*
  2270. * this can happen if others have merged with
  2271. * us, it means there is nothing left to read.
  2272. * But if there are missing devices it may not be
  2273. * safe to do the full stripe write yet.
  2274. */
  2275. goto finish;
  2276. }
  2277. /*
  2278. * the bbio may be freed once we submit the last bio. Make sure
  2279. * not to touch it after that
  2280. */
  2281. atomic_set(&rbio->stripes_pending, bios_to_read);
  2282. while (1) {
  2283. bio = bio_list_pop(&bio_list);
  2284. if (!bio)
  2285. break;
  2286. bio->bi_private = rbio;
  2287. bio->bi_end_io = raid56_parity_scrub_end_io;
  2288. bio->bi_opf = REQ_OP_READ;
  2289. btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
  2290. submit_bio(bio);
  2291. }
  2292. /* the actual write will happen once the reads are done */
  2293. return;
  2294. cleanup:
  2295. rbio_orig_end_io(rbio, BLK_STS_IOERR);
  2296. while ((bio = bio_list_pop(&bio_list)))
  2297. bio_put(bio);
  2298. return;
  2299. finish:
  2300. validate_rbio_for_parity_scrub(rbio);
  2301. }
  2302. static void scrub_parity_work(struct btrfs_work *work)
  2303. {
  2304. struct btrfs_raid_bio *rbio;
  2305. rbio = container_of(work, struct btrfs_raid_bio, work);
  2306. raid56_parity_scrub_stripe(rbio);
  2307. }
  2308. void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
  2309. {
  2310. if (!lock_stripe_add(rbio))
  2311. start_async_work(rbio, scrub_parity_work);
  2312. }
  2313. /* The following code is used for dev replace of a missing RAID 5/6 device. */
  2314. struct btrfs_raid_bio *
  2315. raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
  2316. struct btrfs_bio *bbio, u64 length)
  2317. {
  2318. struct btrfs_raid_bio *rbio;
  2319. rbio = alloc_rbio(fs_info, bbio, length);
  2320. if (IS_ERR(rbio))
  2321. return NULL;
  2322. rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
  2323. bio_list_add(&rbio->bio_list, bio);
  2324. /*
  2325. * This is a special bio which is used to hold the completion handler
  2326. * and make the scrub rbio is similar to the other types
  2327. */
  2328. ASSERT(!bio->bi_iter.bi_size);
  2329. rbio->faila = find_logical_bio_stripe(rbio, bio);
  2330. if (rbio->faila == -1) {
  2331. BUG();
  2332. kfree(rbio);
  2333. return NULL;
  2334. }
  2335. /*
  2336. * When we get bbio, we have already increased bio_counter, record it
  2337. * so we can free it at rbio_orig_end_io()
  2338. */
  2339. rbio->generic_bio_cnt = 1;
  2340. return rbio;
  2341. }
  2342. void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
  2343. {
  2344. if (!lock_stripe_add(rbio))
  2345. start_async_work(rbio, read_rebuild_work);
  2346. }