drbd_req.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806
  1. /*
  2. drbd_req.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/drbd.h>
  22. #include "drbd_int.h"
  23. #include "drbd_req.h"
  24. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
  25. /* Update disk stats at start of I/O request */
  26. static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
  27. {
  28. struct request_queue *q = device->rq_queue;
  29. generic_start_io_acct(q, bio_op(req->master_bio),
  30. req->i.size >> 9, &device->vdisk->part0);
  31. }
  32. /* Update disk stats when completing request upwards */
  33. static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
  34. {
  35. struct request_queue *q = device->rq_queue;
  36. generic_end_io_acct(q, bio_op(req->master_bio),
  37. &device->vdisk->part0, req->start_jif);
  38. }
  39. static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
  40. {
  41. struct drbd_request *req;
  42. req = mempool_alloc(&drbd_request_mempool, GFP_NOIO);
  43. if (!req)
  44. return NULL;
  45. memset(req, 0, sizeof(*req));
  46. drbd_req_make_private_bio(req, bio_src);
  47. req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
  48. | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
  49. | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_UNMAP : 0)
  50. | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
  51. req->device = device;
  52. req->master_bio = bio_src;
  53. req->epoch = 0;
  54. drbd_clear_interval(&req->i);
  55. req->i.sector = bio_src->bi_iter.bi_sector;
  56. req->i.size = bio_src->bi_iter.bi_size;
  57. req->i.local = true;
  58. req->i.waiting = false;
  59. INIT_LIST_HEAD(&req->tl_requests);
  60. INIT_LIST_HEAD(&req->w.list);
  61. INIT_LIST_HEAD(&req->req_pending_master_completion);
  62. INIT_LIST_HEAD(&req->req_pending_local);
  63. /* one reference to be put by __drbd_make_request */
  64. atomic_set(&req->completion_ref, 1);
  65. /* one kref as long as completion_ref > 0 */
  66. kref_init(&req->kref);
  67. return req;
  68. }
  69. static void drbd_remove_request_interval(struct rb_root *root,
  70. struct drbd_request *req)
  71. {
  72. struct drbd_device *device = req->device;
  73. struct drbd_interval *i = &req->i;
  74. drbd_remove_interval(root, i);
  75. /* Wake up any processes waiting for this request to complete. */
  76. if (i->waiting)
  77. wake_up(&device->misc_wait);
  78. }
  79. void drbd_req_destroy(struct kref *kref)
  80. {
  81. struct drbd_request *req = container_of(kref, struct drbd_request, kref);
  82. struct drbd_device *device = req->device;
  83. const unsigned s = req->rq_state;
  84. if ((req->master_bio && !(s & RQ_POSTPONED)) ||
  85. atomic_read(&req->completion_ref) ||
  86. (s & RQ_LOCAL_PENDING) ||
  87. ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
  88. drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
  89. s, atomic_read(&req->completion_ref));
  90. return;
  91. }
  92. /* If called from mod_rq_state (expected normal case) or
  93. * drbd_send_and_submit (the less likely normal path), this holds the
  94. * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
  95. * though it may be still empty (never added to the transfer log).
  96. *
  97. * If called from do_retry(), we do NOT hold the req_lock, but we are
  98. * still allowed to unconditionally list_del(&req->tl_requests),
  99. * because it will be on a local on-stack list only. */
  100. list_del_init(&req->tl_requests);
  101. /* finally remove the request from the conflict detection
  102. * respective block_id verification interval tree. */
  103. if (!drbd_interval_empty(&req->i)) {
  104. struct rb_root *root;
  105. if (s & RQ_WRITE)
  106. root = &device->write_requests;
  107. else
  108. root = &device->read_requests;
  109. drbd_remove_request_interval(root, req);
  110. } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
  111. drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
  112. s, (unsigned long long)req->i.sector, req->i.size);
  113. /* if it was a write, we may have to set the corresponding
  114. * bit(s) out-of-sync first. If it had a local part, we need to
  115. * release the reference to the activity log. */
  116. if (s & RQ_WRITE) {
  117. /* Set out-of-sync unless both OK flags are set
  118. * (local only or remote failed).
  119. * Other places where we set out-of-sync:
  120. * READ with local io-error */
  121. /* There is a special case:
  122. * we may notice late that IO was suspended,
  123. * and postpone, or schedule for retry, a write,
  124. * before it even was submitted or sent.
  125. * In that case we do not want to touch the bitmap at all.
  126. */
  127. if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
  128. if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
  129. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  130. if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
  131. drbd_set_in_sync(device, req->i.sector, req->i.size);
  132. }
  133. /* one might be tempted to move the drbd_al_complete_io
  134. * to the local io completion callback drbd_request_endio.
  135. * but, if this was a mirror write, we may only
  136. * drbd_al_complete_io after this is RQ_NET_DONE,
  137. * otherwise the extent could be dropped from the al
  138. * before it has actually been written on the peer.
  139. * if we crash before our peer knows about the request,
  140. * but after the extent has been dropped from the al,
  141. * we would forget to resync the corresponding extent.
  142. */
  143. if (s & RQ_IN_ACT_LOG) {
  144. if (get_ldev_if_state(device, D_FAILED)) {
  145. drbd_al_complete_io(device, &req->i);
  146. put_ldev(device);
  147. } else if (__ratelimit(&drbd_ratelimit_state)) {
  148. drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
  149. "but my Disk seems to have failed :(\n",
  150. (unsigned long long) req->i.sector, req->i.size);
  151. }
  152. }
  153. }
  154. mempool_free(req, &drbd_request_mempool);
  155. }
  156. static void wake_all_senders(struct drbd_connection *connection)
  157. {
  158. wake_up(&connection->sender_work.q_wait);
  159. }
  160. /* must hold resource->req_lock */
  161. void start_new_tl_epoch(struct drbd_connection *connection)
  162. {
  163. /* no point closing an epoch, if it is empty, anyways. */
  164. if (connection->current_tle_writes == 0)
  165. return;
  166. connection->current_tle_writes = 0;
  167. atomic_inc(&connection->current_tle_nr);
  168. wake_all_senders(connection);
  169. }
  170. void complete_master_bio(struct drbd_device *device,
  171. struct bio_and_error *m)
  172. {
  173. m->bio->bi_status = errno_to_blk_status(m->error);
  174. bio_endio(m->bio);
  175. dec_ap_bio(device);
  176. }
  177. /* Helper for __req_mod().
  178. * Set m->bio to the master bio, if it is fit to be completed,
  179. * or leave it alone (it is initialized to NULL in __req_mod),
  180. * if it has already been completed, or cannot be completed yet.
  181. * If m->bio is set, the error status to be returned is placed in m->error.
  182. */
  183. static
  184. void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
  185. {
  186. const unsigned s = req->rq_state;
  187. struct drbd_device *device = req->device;
  188. int error, ok;
  189. /* we must not complete the master bio, while it is
  190. * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
  191. * not yet acknowledged by the peer
  192. * not yet completed by the local io subsystem
  193. * these flags may get cleared in any order by
  194. * the worker,
  195. * the receiver,
  196. * the bio_endio completion callbacks.
  197. */
  198. if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
  199. (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
  200. (s & RQ_COMPLETION_SUSP)) {
  201. drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
  202. return;
  203. }
  204. if (!req->master_bio) {
  205. drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
  206. return;
  207. }
  208. /*
  209. * figure out whether to report success or failure.
  210. *
  211. * report success when at least one of the operations succeeded.
  212. * or, to put the other way,
  213. * only report failure, when both operations failed.
  214. *
  215. * what to do about the failures is handled elsewhere.
  216. * what we need to do here is just: complete the master_bio.
  217. *
  218. * local completion error, if any, has been stored as ERR_PTR
  219. * in private_bio within drbd_request_endio.
  220. */
  221. ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
  222. error = PTR_ERR(req->private_bio);
  223. /* Before we can signal completion to the upper layers,
  224. * we may need to close the current transfer log epoch.
  225. * We are within the request lock, so we can simply compare
  226. * the request epoch number with the current transfer log
  227. * epoch number. If they match, increase the current_tle_nr,
  228. * and reset the transfer log epoch write_cnt.
  229. */
  230. if (op_is_write(bio_op(req->master_bio)) &&
  231. req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
  232. start_new_tl_epoch(first_peer_device(device)->connection);
  233. /* Update disk stats */
  234. _drbd_end_io_acct(device, req);
  235. /* If READ failed,
  236. * have it be pushed back to the retry work queue,
  237. * so it will re-enter __drbd_make_request(),
  238. * and be re-assigned to a suitable local or remote path,
  239. * or failed if we do not have access to good data anymore.
  240. *
  241. * Unless it was failed early by __drbd_make_request(),
  242. * because no path was available, in which case
  243. * it was not even added to the transfer_log.
  244. *
  245. * read-ahead may fail, and will not be retried.
  246. *
  247. * WRITE should have used all available paths already.
  248. */
  249. if (!ok &&
  250. bio_op(req->master_bio) == REQ_OP_READ &&
  251. !(req->master_bio->bi_opf & REQ_RAHEAD) &&
  252. !list_empty(&req->tl_requests))
  253. req->rq_state |= RQ_POSTPONED;
  254. if (!(req->rq_state & RQ_POSTPONED)) {
  255. m->error = ok ? 0 : (error ?: -EIO);
  256. m->bio = req->master_bio;
  257. req->master_bio = NULL;
  258. /* We leave it in the tree, to be able to verify later
  259. * write-acks in protocol != C during resync.
  260. * But we mark it as "complete", so it won't be counted as
  261. * conflict in a multi-primary setup. */
  262. req->i.completed = true;
  263. }
  264. if (req->i.waiting)
  265. wake_up(&device->misc_wait);
  266. /* Either we are about to complete to upper layers,
  267. * or we will restart this request.
  268. * In either case, the request object will be destroyed soon,
  269. * so better remove it from all lists. */
  270. list_del_init(&req->req_pending_master_completion);
  271. }
  272. /* still holds resource->req_lock */
  273. static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
  274. {
  275. struct drbd_device *device = req->device;
  276. D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
  277. if (!put)
  278. return;
  279. if (!atomic_sub_and_test(put, &req->completion_ref))
  280. return;
  281. drbd_req_complete(req, m);
  282. /* local completion may still come in later,
  283. * we need to keep the req object around. */
  284. if (req->rq_state & RQ_LOCAL_ABORTED)
  285. return;
  286. if (req->rq_state & RQ_POSTPONED) {
  287. /* don't destroy the req object just yet,
  288. * but queue it for retry */
  289. drbd_restart_request(req);
  290. return;
  291. }
  292. kref_put(&req->kref, drbd_req_destroy);
  293. }
  294. static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  295. {
  296. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  297. if (!connection)
  298. return;
  299. if (connection->req_next == NULL)
  300. connection->req_next = req;
  301. }
  302. static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  303. {
  304. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  305. if (!connection)
  306. return;
  307. if (connection->req_next != req)
  308. return;
  309. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  310. const unsigned s = req->rq_state;
  311. if (s & RQ_NET_QUEUED)
  312. break;
  313. }
  314. if (&req->tl_requests == &connection->transfer_log)
  315. req = NULL;
  316. connection->req_next = req;
  317. }
  318. static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  319. {
  320. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  321. if (!connection)
  322. return;
  323. if (connection->req_ack_pending == NULL)
  324. connection->req_ack_pending = req;
  325. }
  326. static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  327. {
  328. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  329. if (!connection)
  330. return;
  331. if (connection->req_ack_pending != req)
  332. return;
  333. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  334. const unsigned s = req->rq_state;
  335. if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
  336. break;
  337. }
  338. if (&req->tl_requests == &connection->transfer_log)
  339. req = NULL;
  340. connection->req_ack_pending = req;
  341. }
  342. static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  343. {
  344. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  345. if (!connection)
  346. return;
  347. if (connection->req_not_net_done == NULL)
  348. connection->req_not_net_done = req;
  349. }
  350. static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  351. {
  352. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  353. if (!connection)
  354. return;
  355. if (connection->req_not_net_done != req)
  356. return;
  357. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  358. const unsigned s = req->rq_state;
  359. if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
  360. break;
  361. }
  362. if (&req->tl_requests == &connection->transfer_log)
  363. req = NULL;
  364. connection->req_not_net_done = req;
  365. }
  366. /* I'd like this to be the only place that manipulates
  367. * req->completion_ref and req->kref. */
  368. static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
  369. int clear, int set)
  370. {
  371. struct drbd_device *device = req->device;
  372. struct drbd_peer_device *peer_device = first_peer_device(device);
  373. unsigned s = req->rq_state;
  374. int c_put = 0;
  375. if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
  376. set |= RQ_COMPLETION_SUSP;
  377. /* apply */
  378. req->rq_state &= ~clear;
  379. req->rq_state |= set;
  380. /* no change? */
  381. if (req->rq_state == s)
  382. return;
  383. /* intent: get references */
  384. kref_get(&req->kref);
  385. if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
  386. atomic_inc(&req->completion_ref);
  387. if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
  388. inc_ap_pending(device);
  389. atomic_inc(&req->completion_ref);
  390. }
  391. if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
  392. atomic_inc(&req->completion_ref);
  393. set_if_null_req_next(peer_device, req);
  394. }
  395. if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
  396. kref_get(&req->kref); /* wait for the DONE */
  397. if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
  398. /* potentially already completed in the ack_receiver thread */
  399. if (!(s & RQ_NET_DONE)) {
  400. atomic_add(req->i.size >> 9, &device->ap_in_flight);
  401. set_if_null_req_not_net_done(peer_device, req);
  402. }
  403. if (req->rq_state & RQ_NET_PENDING)
  404. set_if_null_req_ack_pending(peer_device, req);
  405. }
  406. if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
  407. atomic_inc(&req->completion_ref);
  408. /* progress: put references */
  409. if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
  410. ++c_put;
  411. if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
  412. D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
  413. ++c_put;
  414. }
  415. if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
  416. if (req->rq_state & RQ_LOCAL_ABORTED)
  417. kref_put(&req->kref, drbd_req_destroy);
  418. else
  419. ++c_put;
  420. list_del_init(&req->req_pending_local);
  421. }
  422. if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
  423. dec_ap_pending(device);
  424. ++c_put;
  425. req->acked_jif = jiffies;
  426. advance_conn_req_ack_pending(peer_device, req);
  427. }
  428. if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
  429. ++c_put;
  430. advance_conn_req_next(peer_device, req);
  431. }
  432. if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
  433. if (s & RQ_NET_SENT)
  434. atomic_sub(req->i.size >> 9, &device->ap_in_flight);
  435. if (s & RQ_EXP_BARR_ACK)
  436. kref_put(&req->kref, drbd_req_destroy);
  437. req->net_done_jif = jiffies;
  438. /* in ahead/behind mode, or just in case,
  439. * before we finally destroy this request,
  440. * the caching pointers must not reference it anymore */
  441. advance_conn_req_next(peer_device, req);
  442. advance_conn_req_ack_pending(peer_device, req);
  443. advance_conn_req_not_net_done(peer_device, req);
  444. }
  445. /* potentially complete and destroy */
  446. /* If we made progress, retry conflicting peer requests, if any. */
  447. if (req->i.waiting)
  448. wake_up(&device->misc_wait);
  449. drbd_req_put_completion_ref(req, m, c_put);
  450. kref_put(&req->kref, drbd_req_destroy);
  451. }
  452. static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
  453. {
  454. char b[BDEVNAME_SIZE];
  455. if (!__ratelimit(&drbd_ratelimit_state))
  456. return;
  457. drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
  458. (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
  459. (unsigned long long)req->i.sector,
  460. req->i.size >> 9,
  461. bdevname(device->ldev->backing_bdev, b));
  462. }
  463. /* Helper for HANDED_OVER_TO_NETWORK.
  464. * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
  465. * Is it also still "PENDING"?
  466. * --> If so, clear PENDING and set NET_OK below.
  467. * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
  468. * (and we must not set RQ_NET_OK) */
  469. static inline bool is_pending_write_protocol_A(struct drbd_request *req)
  470. {
  471. return (req->rq_state &
  472. (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
  473. == (RQ_WRITE|RQ_NET_PENDING);
  474. }
  475. /* obviously this could be coded as many single functions
  476. * instead of one huge switch,
  477. * or by putting the code directly in the respective locations
  478. * (as it has been before).
  479. *
  480. * but having it this way
  481. * enforces that it is all in this one place, where it is easier to audit,
  482. * it makes it obvious that whatever "event" "happens" to a request should
  483. * happen "atomically" within the req_lock,
  484. * and it enforces that we have to think in a very structured manner
  485. * about the "events" that may happen to a request during its life time ...
  486. */
  487. int __req_mod(struct drbd_request *req, enum drbd_req_event what,
  488. struct bio_and_error *m)
  489. {
  490. struct drbd_device *const device = req->device;
  491. struct drbd_peer_device *const peer_device = first_peer_device(device);
  492. struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
  493. struct net_conf *nc;
  494. int p, rv = 0;
  495. if (m)
  496. m->bio = NULL;
  497. switch (what) {
  498. default:
  499. drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
  500. break;
  501. /* does not happen...
  502. * initialization done in drbd_req_new
  503. case CREATED:
  504. break;
  505. */
  506. case TO_BE_SENT: /* via network */
  507. /* reached via __drbd_make_request
  508. * and from w_read_retry_remote */
  509. D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
  510. rcu_read_lock();
  511. nc = rcu_dereference(connection->net_conf);
  512. p = nc->wire_protocol;
  513. rcu_read_unlock();
  514. req->rq_state |=
  515. p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
  516. p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
  517. mod_rq_state(req, m, 0, RQ_NET_PENDING);
  518. break;
  519. case TO_BE_SUBMITTED: /* locally */
  520. /* reached via __drbd_make_request */
  521. D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
  522. mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
  523. break;
  524. case COMPLETED_OK:
  525. if (req->rq_state & RQ_WRITE)
  526. device->writ_cnt += req->i.size >> 9;
  527. else
  528. device->read_cnt += req->i.size >> 9;
  529. mod_rq_state(req, m, RQ_LOCAL_PENDING,
  530. RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
  531. break;
  532. case ABORT_DISK_IO:
  533. mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
  534. break;
  535. case WRITE_COMPLETED_WITH_ERROR:
  536. drbd_report_io_error(device, req);
  537. __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
  538. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  539. break;
  540. case READ_COMPLETED_WITH_ERROR:
  541. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  542. drbd_report_io_error(device, req);
  543. __drbd_chk_io_error(device, DRBD_READ_ERROR);
  544. /* fall through. */
  545. case READ_AHEAD_COMPLETED_WITH_ERROR:
  546. /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
  547. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  548. break;
  549. case DISCARD_COMPLETED_NOTSUPP:
  550. case DISCARD_COMPLETED_WITH_ERROR:
  551. /* I'd rather not detach from local disk just because it
  552. * failed a REQ_DISCARD. */
  553. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  554. break;
  555. case QUEUE_FOR_NET_READ:
  556. /* READ, and
  557. * no local disk,
  558. * or target area marked as invalid,
  559. * or just got an io-error. */
  560. /* from __drbd_make_request
  561. * or from bio_endio during read io-error recovery */
  562. /* So we can verify the handle in the answer packet.
  563. * Corresponding drbd_remove_request_interval is in
  564. * drbd_req_complete() */
  565. D_ASSERT(device, drbd_interval_empty(&req->i));
  566. drbd_insert_interval(&device->read_requests, &req->i);
  567. set_bit(UNPLUG_REMOTE, &device->flags);
  568. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  569. D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
  570. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  571. req->w.cb = w_send_read_req;
  572. drbd_queue_work(&connection->sender_work,
  573. &req->w);
  574. break;
  575. case QUEUE_FOR_NET_WRITE:
  576. /* assert something? */
  577. /* from __drbd_make_request only */
  578. /* Corresponding drbd_remove_request_interval is in
  579. * drbd_req_complete() */
  580. D_ASSERT(device, drbd_interval_empty(&req->i));
  581. drbd_insert_interval(&device->write_requests, &req->i);
  582. /* NOTE
  583. * In case the req ended up on the transfer log before being
  584. * queued on the worker, it could lead to this request being
  585. * missed during cleanup after connection loss.
  586. * So we have to do both operations here,
  587. * within the same lock that protects the transfer log.
  588. *
  589. * _req_add_to_epoch(req); this has to be after the
  590. * _maybe_start_new_epoch(req); which happened in
  591. * __drbd_make_request, because we now may set the bit
  592. * again ourselves to close the current epoch.
  593. *
  594. * Add req to the (now) current epoch (barrier). */
  595. /* otherwise we may lose an unplug, which may cause some remote
  596. * io-scheduler timeout to expire, increasing maximum latency,
  597. * hurting performance. */
  598. set_bit(UNPLUG_REMOTE, &device->flags);
  599. /* queue work item to send data */
  600. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  601. mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
  602. req->w.cb = w_send_dblock;
  603. drbd_queue_work(&connection->sender_work,
  604. &req->w);
  605. /* close the epoch, in case it outgrew the limit */
  606. rcu_read_lock();
  607. nc = rcu_dereference(connection->net_conf);
  608. p = nc->max_epoch_size;
  609. rcu_read_unlock();
  610. if (connection->current_tle_writes >= p)
  611. start_new_tl_epoch(connection);
  612. break;
  613. case QUEUE_FOR_SEND_OOS:
  614. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  615. req->w.cb = w_send_out_of_sync;
  616. drbd_queue_work(&connection->sender_work,
  617. &req->w);
  618. break;
  619. case READ_RETRY_REMOTE_CANCELED:
  620. case SEND_CANCELED:
  621. case SEND_FAILED:
  622. /* real cleanup will be done from tl_clear. just update flags
  623. * so it is no longer marked as on the worker queue */
  624. mod_rq_state(req, m, RQ_NET_QUEUED, 0);
  625. break;
  626. case HANDED_OVER_TO_NETWORK:
  627. /* assert something? */
  628. if (is_pending_write_protocol_A(req))
  629. /* this is what is dangerous about protocol A:
  630. * pretend it was successfully written on the peer. */
  631. mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
  632. RQ_NET_SENT|RQ_NET_OK);
  633. else
  634. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
  635. /* It is still not yet RQ_NET_DONE until the
  636. * corresponding epoch barrier got acked as well,
  637. * so we know what to dirty on connection loss. */
  638. break;
  639. case OOS_HANDED_TO_NETWORK:
  640. /* Was not set PENDING, no longer QUEUED, so is now DONE
  641. * as far as this connection is concerned. */
  642. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
  643. break;
  644. case CONNECTION_LOST_WHILE_PENDING:
  645. /* transfer log cleanup after connection loss */
  646. mod_rq_state(req, m,
  647. RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
  648. RQ_NET_DONE);
  649. break;
  650. case CONFLICT_RESOLVED:
  651. /* for superseded conflicting writes of multiple primaries,
  652. * there is no need to keep anything in the tl, potential
  653. * node crashes are covered by the activity log.
  654. *
  655. * If this request had been marked as RQ_POSTPONED before,
  656. * it will actually not be completed, but "restarted",
  657. * resubmitted from the retry worker context. */
  658. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  659. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  660. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
  661. break;
  662. case WRITE_ACKED_BY_PEER_AND_SIS:
  663. req->rq_state |= RQ_NET_SIS;
  664. case WRITE_ACKED_BY_PEER:
  665. /* Normal operation protocol C: successfully written on peer.
  666. * During resync, even in protocol != C,
  667. * we requested an explicit write ack anyways.
  668. * Which means we cannot even assert anything here.
  669. * Nothing more to do here.
  670. * We want to keep the tl in place for all protocols, to cater
  671. * for volatile write-back caches on lower level devices. */
  672. goto ack_common;
  673. case RECV_ACKED_BY_PEER:
  674. D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
  675. /* protocol B; pretends to be successfully written on peer.
  676. * see also notes above in HANDED_OVER_TO_NETWORK about
  677. * protocol != C */
  678. ack_common:
  679. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
  680. break;
  681. case POSTPONE_WRITE:
  682. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  683. /* If this node has already detected the write conflict, the
  684. * worker will be waiting on misc_wait. Wake it up once this
  685. * request has completed locally.
  686. */
  687. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  688. req->rq_state |= RQ_POSTPONED;
  689. if (req->i.waiting)
  690. wake_up(&device->misc_wait);
  691. /* Do not clear RQ_NET_PENDING. This request will make further
  692. * progress via restart_conflicting_writes() or
  693. * fail_postponed_requests(). Hopefully. */
  694. break;
  695. case NEG_ACKED:
  696. mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
  697. break;
  698. case FAIL_FROZEN_DISK_IO:
  699. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  700. break;
  701. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  702. break;
  703. case RESTART_FROZEN_DISK_IO:
  704. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  705. break;
  706. mod_rq_state(req, m,
  707. RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
  708. RQ_LOCAL_PENDING);
  709. rv = MR_READ;
  710. if (bio_data_dir(req->master_bio) == WRITE)
  711. rv = MR_WRITE;
  712. get_ldev(device); /* always succeeds in this call path */
  713. req->w.cb = w_restart_disk_io;
  714. drbd_queue_work(&connection->sender_work,
  715. &req->w);
  716. break;
  717. case RESEND:
  718. /* Simply complete (local only) READs. */
  719. if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
  720. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  721. break;
  722. }
  723. /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
  724. before the connection loss (B&C only); only P_BARRIER_ACK
  725. (or the local completion?) was missing when we suspended.
  726. Throwing them out of the TL here by pretending we got a BARRIER_ACK.
  727. During connection handshake, we ensure that the peer was not rebooted. */
  728. if (!(req->rq_state & RQ_NET_OK)) {
  729. /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
  730. * in that case we must not set RQ_NET_PENDING. */
  731. mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
  732. if (req->w.cb) {
  733. /* w.cb expected to be w_send_dblock, or w_send_read_req */
  734. drbd_queue_work(&connection->sender_work,
  735. &req->w);
  736. rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
  737. } /* else: FIXME can this happen? */
  738. break;
  739. }
  740. /* else, fall through to BARRIER_ACKED */
  741. case BARRIER_ACKED:
  742. /* barrier ack for READ requests does not make sense */
  743. if (!(req->rq_state & RQ_WRITE))
  744. break;
  745. if (req->rq_state & RQ_NET_PENDING) {
  746. /* barrier came in before all requests were acked.
  747. * this is bad, because if the connection is lost now,
  748. * we won't be able to clean them up... */
  749. drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
  750. }
  751. /* Allowed to complete requests, even while suspended.
  752. * As this is called for all requests within a matching epoch,
  753. * we need to filter, and only set RQ_NET_DONE for those that
  754. * have actually been on the wire. */
  755. mod_rq_state(req, m, RQ_COMPLETION_SUSP,
  756. (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
  757. break;
  758. case DATA_RECEIVED:
  759. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  760. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
  761. break;
  762. case QUEUE_AS_DRBD_BARRIER:
  763. start_new_tl_epoch(connection);
  764. mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
  765. break;
  766. };
  767. return rv;
  768. }
  769. /* we may do a local read if:
  770. * - we are consistent (of course),
  771. * - or we are generally inconsistent,
  772. * BUT we are still/already IN SYNC for this area.
  773. * since size may be bigger than BM_BLOCK_SIZE,
  774. * we may need to check several bits.
  775. */
  776. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
  777. {
  778. unsigned long sbnr, ebnr;
  779. sector_t esector, nr_sectors;
  780. if (device->state.disk == D_UP_TO_DATE)
  781. return true;
  782. if (device->state.disk != D_INCONSISTENT)
  783. return false;
  784. esector = sector + (size >> 9) - 1;
  785. nr_sectors = drbd_get_capacity(device->this_bdev);
  786. D_ASSERT(device, sector < nr_sectors);
  787. D_ASSERT(device, esector < nr_sectors);
  788. sbnr = BM_SECT_TO_BIT(sector);
  789. ebnr = BM_SECT_TO_BIT(esector);
  790. return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
  791. }
  792. static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
  793. enum drbd_read_balancing rbm)
  794. {
  795. struct backing_dev_info *bdi;
  796. int stripe_shift;
  797. switch (rbm) {
  798. case RB_CONGESTED_REMOTE:
  799. bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
  800. return bdi_read_congested(bdi);
  801. case RB_LEAST_PENDING:
  802. return atomic_read(&device->local_cnt) >
  803. atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
  804. case RB_32K_STRIPING: /* stripe_shift = 15 */
  805. case RB_64K_STRIPING:
  806. case RB_128K_STRIPING:
  807. case RB_256K_STRIPING:
  808. case RB_512K_STRIPING:
  809. case RB_1M_STRIPING: /* stripe_shift = 20 */
  810. stripe_shift = (rbm - RB_32K_STRIPING + 15);
  811. return (sector >> (stripe_shift - 9)) & 1;
  812. case RB_ROUND_ROBIN:
  813. return test_and_change_bit(READ_BALANCE_RR, &device->flags);
  814. case RB_PREFER_REMOTE:
  815. return true;
  816. case RB_PREFER_LOCAL:
  817. default:
  818. return false;
  819. }
  820. }
  821. /*
  822. * complete_conflicting_writes - wait for any conflicting write requests
  823. *
  824. * The write_requests tree contains all active write requests which we
  825. * currently know about. Wait for any requests to complete which conflict with
  826. * the new one.
  827. *
  828. * Only way out: remove the conflicting intervals from the tree.
  829. */
  830. static void complete_conflicting_writes(struct drbd_request *req)
  831. {
  832. DEFINE_WAIT(wait);
  833. struct drbd_device *device = req->device;
  834. struct drbd_interval *i;
  835. sector_t sector = req->i.sector;
  836. int size = req->i.size;
  837. for (;;) {
  838. drbd_for_each_overlap(i, &device->write_requests, sector, size) {
  839. /* Ignore, if already completed to upper layers. */
  840. if (i->completed)
  841. continue;
  842. /* Handle the first found overlap. After the schedule
  843. * we have to restart the tree walk. */
  844. break;
  845. }
  846. if (!i) /* if any */
  847. break;
  848. /* Indicate to wake up device->misc_wait on progress. */
  849. prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
  850. i->waiting = true;
  851. spin_unlock_irq(&device->resource->req_lock);
  852. schedule();
  853. spin_lock_irq(&device->resource->req_lock);
  854. }
  855. finish_wait(&device->misc_wait, &wait);
  856. }
  857. /* called within req_lock */
  858. static void maybe_pull_ahead(struct drbd_device *device)
  859. {
  860. struct drbd_connection *connection = first_peer_device(device)->connection;
  861. struct net_conf *nc;
  862. bool congested = false;
  863. enum drbd_on_congestion on_congestion;
  864. rcu_read_lock();
  865. nc = rcu_dereference(connection->net_conf);
  866. on_congestion = nc ? nc->on_congestion : OC_BLOCK;
  867. rcu_read_unlock();
  868. if (on_congestion == OC_BLOCK ||
  869. connection->agreed_pro_version < 96)
  870. return;
  871. if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
  872. return; /* nothing to do ... */
  873. /* If I don't even have good local storage, we can not reasonably try
  874. * to pull ahead of the peer. We also need the local reference to make
  875. * sure device->act_log is there.
  876. */
  877. if (!get_ldev_if_state(device, D_UP_TO_DATE))
  878. return;
  879. if (nc->cong_fill &&
  880. atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
  881. drbd_info(device, "Congestion-fill threshold reached\n");
  882. congested = true;
  883. }
  884. if (device->act_log->used >= nc->cong_extents) {
  885. drbd_info(device, "Congestion-extents threshold reached\n");
  886. congested = true;
  887. }
  888. if (congested) {
  889. /* start a new epoch for non-mirrored writes */
  890. start_new_tl_epoch(first_peer_device(device)->connection);
  891. if (on_congestion == OC_PULL_AHEAD)
  892. _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
  893. else /*nc->on_congestion == OC_DISCONNECT */
  894. _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
  895. }
  896. put_ldev(device);
  897. }
  898. /* If this returns false, and req->private_bio is still set,
  899. * this should be submitted locally.
  900. *
  901. * If it returns false, but req->private_bio is not set,
  902. * we do not have access to good data :(
  903. *
  904. * Otherwise, this destroys req->private_bio, if any,
  905. * and returns true.
  906. */
  907. static bool do_remote_read(struct drbd_request *req)
  908. {
  909. struct drbd_device *device = req->device;
  910. enum drbd_read_balancing rbm;
  911. if (req->private_bio) {
  912. if (!drbd_may_do_local_read(device,
  913. req->i.sector, req->i.size)) {
  914. bio_put(req->private_bio);
  915. req->private_bio = NULL;
  916. put_ldev(device);
  917. }
  918. }
  919. if (device->state.pdsk != D_UP_TO_DATE)
  920. return false;
  921. if (req->private_bio == NULL)
  922. return true;
  923. /* TODO: improve read balancing decisions, take into account drbd
  924. * protocol, pending requests etc. */
  925. rcu_read_lock();
  926. rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
  927. rcu_read_unlock();
  928. if (rbm == RB_PREFER_LOCAL && req->private_bio)
  929. return false; /* submit locally */
  930. if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
  931. if (req->private_bio) {
  932. bio_put(req->private_bio);
  933. req->private_bio = NULL;
  934. put_ldev(device);
  935. }
  936. return true;
  937. }
  938. return false;
  939. }
  940. bool drbd_should_do_remote(union drbd_dev_state s)
  941. {
  942. return s.pdsk == D_UP_TO_DATE ||
  943. (s.pdsk >= D_INCONSISTENT &&
  944. s.conn >= C_WF_BITMAP_T &&
  945. s.conn < C_AHEAD);
  946. /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
  947. That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
  948. states. */
  949. }
  950. static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
  951. {
  952. return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
  953. /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
  954. since we enter state C_AHEAD only if proto >= 96 */
  955. }
  956. /* returns number of connections (== 1, for drbd 8.4)
  957. * expected to actually write this data,
  958. * which does NOT include those that we are L_AHEAD for. */
  959. static int drbd_process_write_request(struct drbd_request *req)
  960. {
  961. struct drbd_device *device = req->device;
  962. int remote, send_oos;
  963. remote = drbd_should_do_remote(device->state);
  964. send_oos = drbd_should_send_out_of_sync(device->state);
  965. /* Need to replicate writes. Unless it is an empty flush,
  966. * which is better mapped to a DRBD P_BARRIER packet,
  967. * also for drbd wire protocol compatibility reasons.
  968. * If this was a flush, just start a new epoch.
  969. * Unless the current epoch was empty anyways, or we are not currently
  970. * replicating, in which case there is no point. */
  971. if (unlikely(req->i.size == 0)) {
  972. /* The only size==0 bios we expect are empty flushes. */
  973. D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
  974. if (remote)
  975. _req_mod(req, QUEUE_AS_DRBD_BARRIER);
  976. return remote;
  977. }
  978. if (!remote && !send_oos)
  979. return 0;
  980. D_ASSERT(device, !(remote && send_oos));
  981. if (remote) {
  982. _req_mod(req, TO_BE_SENT);
  983. _req_mod(req, QUEUE_FOR_NET_WRITE);
  984. } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
  985. _req_mod(req, QUEUE_FOR_SEND_OOS);
  986. return remote;
  987. }
  988. static void drbd_process_discard_req(struct drbd_request *req)
  989. {
  990. struct block_device *bdev = req->device->ldev->backing_bdev;
  991. if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
  992. GFP_NOIO, 0))
  993. req->private_bio->bi_status = BLK_STS_IOERR;
  994. bio_endio(req->private_bio);
  995. }
  996. static void
  997. drbd_submit_req_private_bio(struct drbd_request *req)
  998. {
  999. struct drbd_device *device = req->device;
  1000. struct bio *bio = req->private_bio;
  1001. unsigned int type;
  1002. if (bio_op(bio) != REQ_OP_READ)
  1003. type = DRBD_FAULT_DT_WR;
  1004. else if (bio->bi_opf & REQ_RAHEAD)
  1005. type = DRBD_FAULT_DT_RA;
  1006. else
  1007. type = DRBD_FAULT_DT_RD;
  1008. bio_set_dev(bio, device->ldev->backing_bdev);
  1009. /* State may have changed since we grabbed our reference on the
  1010. * ->ldev member. Double check, and short-circuit to endio.
  1011. * In case the last activity log transaction failed to get on
  1012. * stable storage, and this is a WRITE, we may not even submit
  1013. * this bio. */
  1014. if (get_ldev(device)) {
  1015. if (drbd_insert_fault(device, type))
  1016. bio_io_error(bio);
  1017. else if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
  1018. bio_op(bio) == REQ_OP_DISCARD)
  1019. drbd_process_discard_req(req);
  1020. else
  1021. generic_make_request(bio);
  1022. put_ldev(device);
  1023. } else
  1024. bio_io_error(bio);
  1025. }
  1026. static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
  1027. {
  1028. spin_lock_irq(&device->resource->req_lock);
  1029. list_add_tail(&req->tl_requests, &device->submit.writes);
  1030. list_add_tail(&req->req_pending_master_completion,
  1031. &device->pending_master_completion[1 /* WRITE */]);
  1032. spin_unlock_irq(&device->resource->req_lock);
  1033. queue_work(device->submit.wq, &device->submit.worker);
  1034. /* do_submit() may sleep internally on al_wait, too */
  1035. wake_up(&device->al_wait);
  1036. }
  1037. /* returns the new drbd_request pointer, if the caller is expected to
  1038. * drbd_send_and_submit() it (to save latency), or NULL if we queued the
  1039. * request on the submitter thread.
  1040. * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
  1041. */
  1042. static struct drbd_request *
  1043. drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1044. {
  1045. const int rw = bio_data_dir(bio);
  1046. struct drbd_request *req;
  1047. /* allocate outside of all locks; */
  1048. req = drbd_req_new(device, bio);
  1049. if (!req) {
  1050. dec_ap_bio(device);
  1051. /* only pass the error to the upper layers.
  1052. * if user cannot handle io errors, that's not our business. */
  1053. drbd_err(device, "could not kmalloc() req\n");
  1054. bio->bi_status = BLK_STS_RESOURCE;
  1055. bio_endio(bio);
  1056. return ERR_PTR(-ENOMEM);
  1057. }
  1058. req->start_jif = start_jif;
  1059. if (!get_ldev(device)) {
  1060. bio_put(req->private_bio);
  1061. req->private_bio = NULL;
  1062. }
  1063. /* Update disk stats */
  1064. _drbd_start_io_acct(device, req);
  1065. /* process discards always from our submitter thread */
  1066. if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
  1067. bio_op(bio) == REQ_OP_DISCARD)
  1068. goto queue_for_submitter_thread;
  1069. if (rw == WRITE && req->private_bio && req->i.size
  1070. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1071. if (!drbd_al_begin_io_fastpath(device, &req->i))
  1072. goto queue_for_submitter_thread;
  1073. req->rq_state |= RQ_IN_ACT_LOG;
  1074. req->in_actlog_jif = jiffies;
  1075. }
  1076. return req;
  1077. queue_for_submitter_thread:
  1078. atomic_inc(&device->ap_actlog_cnt);
  1079. drbd_queue_write(device, req);
  1080. return NULL;
  1081. }
  1082. /* Require at least one path to current data.
  1083. * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
  1084. * We would not allow to read what was written,
  1085. * we would not have bumped the data generation uuids,
  1086. * we would cause data divergence for all the wrong reasons.
  1087. *
  1088. * If we don't see at least one D_UP_TO_DATE, we will fail this request,
  1089. * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
  1090. * and queues for retry later.
  1091. */
  1092. static bool may_do_writes(struct drbd_device *device)
  1093. {
  1094. const union drbd_dev_state s = device->state;
  1095. return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
  1096. }
  1097. struct drbd_plug_cb {
  1098. struct blk_plug_cb cb;
  1099. struct drbd_request *most_recent_req;
  1100. /* do we need more? */
  1101. };
  1102. static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
  1103. {
  1104. struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
  1105. struct drbd_resource *resource = plug->cb.data;
  1106. struct drbd_request *req = plug->most_recent_req;
  1107. kfree(cb);
  1108. if (!req)
  1109. return;
  1110. spin_lock_irq(&resource->req_lock);
  1111. /* In case the sender did not process it yet, raise the flag to
  1112. * have it followed with P_UNPLUG_REMOTE just after. */
  1113. req->rq_state |= RQ_UNPLUG;
  1114. /* but also queue a generic unplug */
  1115. drbd_queue_unplug(req->device);
  1116. kref_put(&req->kref, drbd_req_destroy);
  1117. spin_unlock_irq(&resource->req_lock);
  1118. }
  1119. static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
  1120. {
  1121. /* A lot of text to say
  1122. * return (struct drbd_plug_cb*)blk_check_plugged(); */
  1123. struct drbd_plug_cb *plug;
  1124. struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
  1125. if (cb)
  1126. plug = container_of(cb, struct drbd_plug_cb, cb);
  1127. else
  1128. plug = NULL;
  1129. return plug;
  1130. }
  1131. static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
  1132. {
  1133. struct drbd_request *tmp = plug->most_recent_req;
  1134. /* Will be sent to some peer.
  1135. * Remember to tag it with UNPLUG_REMOTE on unplug */
  1136. kref_get(&req->kref);
  1137. plug->most_recent_req = req;
  1138. if (tmp)
  1139. kref_put(&tmp->kref, drbd_req_destroy);
  1140. }
  1141. static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
  1142. {
  1143. struct drbd_resource *resource = device->resource;
  1144. const int rw = bio_data_dir(req->master_bio);
  1145. struct bio_and_error m = { NULL, };
  1146. bool no_remote = false;
  1147. bool submit_private_bio = false;
  1148. spin_lock_irq(&resource->req_lock);
  1149. if (rw == WRITE) {
  1150. /* This may temporarily give up the req_lock,
  1151. * but will re-aquire it before it returns here.
  1152. * Needs to be before the check on drbd_suspended() */
  1153. complete_conflicting_writes(req);
  1154. /* no more giving up req_lock from now on! */
  1155. /* check for congestion, and potentially stop sending
  1156. * full data updates, but start sending "dirty bits" only. */
  1157. maybe_pull_ahead(device);
  1158. }
  1159. if (drbd_suspended(device)) {
  1160. /* push back and retry: */
  1161. req->rq_state |= RQ_POSTPONED;
  1162. if (req->private_bio) {
  1163. bio_put(req->private_bio);
  1164. req->private_bio = NULL;
  1165. put_ldev(device);
  1166. }
  1167. goto out;
  1168. }
  1169. /* We fail READ early, if we can not serve it.
  1170. * We must do this before req is registered on any lists.
  1171. * Otherwise, drbd_req_complete() will queue failed READ for retry. */
  1172. if (rw != WRITE) {
  1173. if (!do_remote_read(req) && !req->private_bio)
  1174. goto nodata;
  1175. }
  1176. /* which transfer log epoch does this belong to? */
  1177. req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
  1178. /* no point in adding empty flushes to the transfer log,
  1179. * they are mapped to drbd barriers already. */
  1180. if (likely(req->i.size!=0)) {
  1181. if (rw == WRITE)
  1182. first_peer_device(device)->connection->current_tle_writes++;
  1183. list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
  1184. }
  1185. if (rw == WRITE) {
  1186. if (req->private_bio && !may_do_writes(device)) {
  1187. bio_put(req->private_bio);
  1188. req->private_bio = NULL;
  1189. put_ldev(device);
  1190. goto nodata;
  1191. }
  1192. if (!drbd_process_write_request(req))
  1193. no_remote = true;
  1194. } else {
  1195. /* We either have a private_bio, or we can read from remote.
  1196. * Otherwise we had done the goto nodata above. */
  1197. if (req->private_bio == NULL) {
  1198. _req_mod(req, TO_BE_SENT);
  1199. _req_mod(req, QUEUE_FOR_NET_READ);
  1200. } else
  1201. no_remote = true;
  1202. }
  1203. if (no_remote == false) {
  1204. struct drbd_plug_cb *plug = drbd_check_plugged(resource);
  1205. if (plug)
  1206. drbd_update_plug(plug, req);
  1207. }
  1208. /* If it took the fast path in drbd_request_prepare, add it here.
  1209. * The slow path has added it already. */
  1210. if (list_empty(&req->req_pending_master_completion))
  1211. list_add_tail(&req->req_pending_master_completion,
  1212. &device->pending_master_completion[rw == WRITE]);
  1213. if (req->private_bio) {
  1214. /* needs to be marked within the same spinlock */
  1215. req->pre_submit_jif = jiffies;
  1216. list_add_tail(&req->req_pending_local,
  1217. &device->pending_completion[rw == WRITE]);
  1218. _req_mod(req, TO_BE_SUBMITTED);
  1219. /* but we need to give up the spinlock to submit */
  1220. submit_private_bio = true;
  1221. } else if (no_remote) {
  1222. nodata:
  1223. if (__ratelimit(&drbd_ratelimit_state))
  1224. drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
  1225. (unsigned long long)req->i.sector, req->i.size >> 9);
  1226. /* A write may have been queued for send_oos, however.
  1227. * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
  1228. }
  1229. out:
  1230. drbd_req_put_completion_ref(req, &m, 1);
  1231. spin_unlock_irq(&resource->req_lock);
  1232. /* Even though above is a kref_put(), this is safe.
  1233. * As long as we still need to submit our private bio,
  1234. * we hold a completion ref, and the request cannot disappear.
  1235. * If however this request did not even have a private bio to submit
  1236. * (e.g. remote read), req may already be invalid now.
  1237. * That's why we cannot check on req->private_bio. */
  1238. if (submit_private_bio)
  1239. drbd_submit_req_private_bio(req);
  1240. if (m.bio)
  1241. complete_master_bio(device, &m);
  1242. }
  1243. void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1244. {
  1245. struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
  1246. if (IS_ERR_OR_NULL(req))
  1247. return;
  1248. drbd_send_and_submit(device, req);
  1249. }
  1250. static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
  1251. {
  1252. struct blk_plug plug;
  1253. struct drbd_request *req, *tmp;
  1254. blk_start_plug(&plug);
  1255. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1256. const int rw = bio_data_dir(req->master_bio);
  1257. if (rw == WRITE /* rw != WRITE should not even end up here! */
  1258. && req->private_bio && req->i.size
  1259. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1260. if (!drbd_al_begin_io_fastpath(device, &req->i))
  1261. continue;
  1262. req->rq_state |= RQ_IN_ACT_LOG;
  1263. req->in_actlog_jif = jiffies;
  1264. atomic_dec(&device->ap_actlog_cnt);
  1265. }
  1266. list_del_init(&req->tl_requests);
  1267. drbd_send_and_submit(device, req);
  1268. }
  1269. blk_finish_plug(&plug);
  1270. }
  1271. static bool prepare_al_transaction_nonblock(struct drbd_device *device,
  1272. struct list_head *incoming,
  1273. struct list_head *pending,
  1274. struct list_head *later)
  1275. {
  1276. struct drbd_request *req;
  1277. int wake = 0;
  1278. int err;
  1279. spin_lock_irq(&device->al_lock);
  1280. while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
  1281. err = drbd_al_begin_io_nonblock(device, &req->i);
  1282. if (err == -ENOBUFS)
  1283. break;
  1284. if (err == -EBUSY)
  1285. wake = 1;
  1286. if (err)
  1287. list_move_tail(&req->tl_requests, later);
  1288. else
  1289. list_move_tail(&req->tl_requests, pending);
  1290. }
  1291. spin_unlock_irq(&device->al_lock);
  1292. if (wake)
  1293. wake_up(&device->al_wait);
  1294. return !list_empty(pending);
  1295. }
  1296. static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
  1297. {
  1298. struct blk_plug plug;
  1299. struct drbd_request *req;
  1300. blk_start_plug(&plug);
  1301. while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
  1302. req->rq_state |= RQ_IN_ACT_LOG;
  1303. req->in_actlog_jif = jiffies;
  1304. atomic_dec(&device->ap_actlog_cnt);
  1305. list_del_init(&req->tl_requests);
  1306. drbd_send_and_submit(device, req);
  1307. }
  1308. blk_finish_plug(&plug);
  1309. }
  1310. void do_submit(struct work_struct *ws)
  1311. {
  1312. struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
  1313. LIST_HEAD(incoming); /* from drbd_make_request() */
  1314. LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
  1315. LIST_HEAD(busy); /* blocked by resync requests */
  1316. /* grab new incoming requests */
  1317. spin_lock_irq(&device->resource->req_lock);
  1318. list_splice_tail_init(&device->submit.writes, &incoming);
  1319. spin_unlock_irq(&device->resource->req_lock);
  1320. for (;;) {
  1321. DEFINE_WAIT(wait);
  1322. /* move used-to-be-busy back to front of incoming */
  1323. list_splice_init(&busy, &incoming);
  1324. submit_fast_path(device, &incoming);
  1325. if (list_empty(&incoming))
  1326. break;
  1327. for (;;) {
  1328. prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
  1329. list_splice_init(&busy, &incoming);
  1330. prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
  1331. if (!list_empty(&pending))
  1332. break;
  1333. schedule();
  1334. /* If all currently "hot" activity log extents are kept busy by
  1335. * incoming requests, we still must not totally starve new
  1336. * requests to "cold" extents.
  1337. * Something left on &incoming means there had not been
  1338. * enough update slots available, and the activity log
  1339. * has been marked as "starving".
  1340. *
  1341. * Try again now, without looking for new requests,
  1342. * effectively blocking all new requests until we made
  1343. * at least _some_ progress with what we currently have.
  1344. */
  1345. if (!list_empty(&incoming))
  1346. continue;
  1347. /* Nothing moved to pending, but nothing left
  1348. * on incoming: all moved to busy!
  1349. * Grab new and iterate. */
  1350. spin_lock_irq(&device->resource->req_lock);
  1351. list_splice_tail_init(&device->submit.writes, &incoming);
  1352. spin_unlock_irq(&device->resource->req_lock);
  1353. }
  1354. finish_wait(&device->al_wait, &wait);
  1355. /* If the transaction was full, before all incoming requests
  1356. * had been processed, skip ahead to commit, and iterate
  1357. * without splicing in more incoming requests from upper layers.
  1358. *
  1359. * Else, if all incoming have been processed,
  1360. * they have become either "pending" (to be submitted after
  1361. * next transaction commit) or "busy" (blocked by resync).
  1362. *
  1363. * Maybe more was queued, while we prepared the transaction?
  1364. * Try to stuff those into this transaction as well.
  1365. * Be strictly non-blocking here,
  1366. * we already have something to commit.
  1367. *
  1368. * Commit if we don't make any more progres.
  1369. */
  1370. while (list_empty(&incoming)) {
  1371. LIST_HEAD(more_pending);
  1372. LIST_HEAD(more_incoming);
  1373. bool made_progress;
  1374. /* It is ok to look outside the lock,
  1375. * it's only an optimization anyways */
  1376. if (list_empty(&device->submit.writes))
  1377. break;
  1378. spin_lock_irq(&device->resource->req_lock);
  1379. list_splice_tail_init(&device->submit.writes, &more_incoming);
  1380. spin_unlock_irq(&device->resource->req_lock);
  1381. if (list_empty(&more_incoming))
  1382. break;
  1383. made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
  1384. list_splice_tail_init(&more_pending, &pending);
  1385. list_splice_tail_init(&more_incoming, &incoming);
  1386. if (!made_progress)
  1387. break;
  1388. }
  1389. drbd_al_begin_io_commit(device);
  1390. send_and_submit_pending(device, &pending);
  1391. }
  1392. }
  1393. blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
  1394. {
  1395. struct drbd_device *device = (struct drbd_device *) q->queuedata;
  1396. unsigned long start_jif;
  1397. blk_queue_split(q, &bio);
  1398. start_jif = jiffies;
  1399. /*
  1400. * what we "blindly" assume:
  1401. */
  1402. D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
  1403. inc_ap_bio(device);
  1404. __drbd_make_request(device, bio, start_jif);
  1405. return BLK_QC_T_NONE;
  1406. }
  1407. static bool net_timeout_reached(struct drbd_request *net_req,
  1408. struct drbd_connection *connection,
  1409. unsigned long now, unsigned long ent,
  1410. unsigned int ko_count, unsigned int timeout)
  1411. {
  1412. struct drbd_device *device = net_req->device;
  1413. if (!time_after(now, net_req->pre_send_jif + ent))
  1414. return false;
  1415. if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
  1416. return false;
  1417. if (net_req->rq_state & RQ_NET_PENDING) {
  1418. drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
  1419. jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
  1420. return true;
  1421. }
  1422. /* We received an ACK already (or are using protocol A),
  1423. * but are waiting for the epoch closing barrier ack.
  1424. * Check if we sent the barrier already. We should not blame the peer
  1425. * for being unresponsive, if we did not even ask it yet. */
  1426. if (net_req->epoch == connection->send.current_epoch_nr) {
  1427. drbd_warn(device,
  1428. "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
  1429. jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
  1430. return false;
  1431. }
  1432. /* Worst case: we may have been blocked for whatever reason, then
  1433. * suddenly are able to send a lot of requests (and epoch separating
  1434. * barriers) in quick succession.
  1435. * The timestamp of the net_req may be much too old and not correspond
  1436. * to the sending time of the relevant unack'ed barrier packet, so
  1437. * would trigger a spurious timeout. The latest barrier packet may
  1438. * have a too recent timestamp to trigger the timeout, potentially miss
  1439. * a timeout. Right now we don't have a place to conveniently store
  1440. * these timestamps.
  1441. * But in this particular situation, the application requests are still
  1442. * completed to upper layers, DRBD should still "feel" responsive.
  1443. * No need yet to kill this connection, it may still recover.
  1444. * If not, eventually we will have queued enough into the network for
  1445. * us to block. From that point of view, the timestamp of the last sent
  1446. * barrier packet is relevant enough.
  1447. */
  1448. if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
  1449. drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
  1450. connection->send.last_sent_barrier_jif, now,
  1451. jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
  1452. return true;
  1453. }
  1454. return false;
  1455. }
  1456. /* A request is considered timed out, if
  1457. * - we have some effective timeout from the configuration,
  1458. * with some state restrictions applied,
  1459. * - the oldest request is waiting for a response from the network
  1460. * resp. the local disk,
  1461. * - the oldest request is in fact older than the effective timeout,
  1462. * - the connection was established (resp. disk was attached)
  1463. * for longer than the timeout already.
  1464. * Note that for 32bit jiffies and very stable connections/disks,
  1465. * we may have a wrap around, which is catched by
  1466. * !time_in_range(now, last_..._jif, last_..._jif + timeout).
  1467. *
  1468. * Side effect: once per 32bit wrap-around interval, which means every
  1469. * ~198 days with 250 HZ, we have a window where the timeout would need
  1470. * to expire twice (worst case) to become effective. Good enough.
  1471. */
  1472. void request_timer_fn(struct timer_list *t)
  1473. {
  1474. struct drbd_device *device = from_timer(device, t, request_timer);
  1475. struct drbd_connection *connection = first_peer_device(device)->connection;
  1476. struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
  1477. struct net_conf *nc;
  1478. unsigned long oldest_submit_jif;
  1479. unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
  1480. unsigned long now;
  1481. unsigned int ko_count = 0, timeout = 0;
  1482. rcu_read_lock();
  1483. nc = rcu_dereference(connection->net_conf);
  1484. if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
  1485. ko_count = nc->ko_count;
  1486. timeout = nc->timeout;
  1487. }
  1488. if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
  1489. dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
  1490. put_ldev(device);
  1491. }
  1492. rcu_read_unlock();
  1493. ent = timeout * HZ/10 * ko_count;
  1494. et = min_not_zero(dt, ent);
  1495. if (!et)
  1496. return; /* Recurring timer stopped */
  1497. now = jiffies;
  1498. nt = now + et;
  1499. spin_lock_irq(&device->resource->req_lock);
  1500. req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
  1501. req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
  1502. /* maybe the oldest request waiting for the peer is in fact still
  1503. * blocking in tcp sendmsg. That's ok, though, that's handled via the
  1504. * socket send timeout, requesting a ping, and bumping ko-count in
  1505. * we_should_drop_the_connection().
  1506. */
  1507. /* check the oldest request we did successfully sent,
  1508. * but which is still waiting for an ACK. */
  1509. req_peer = connection->req_ack_pending;
  1510. /* if we don't have such request (e.g. protocoll A)
  1511. * check the oldest requests which is still waiting on its epoch
  1512. * closing barrier ack. */
  1513. if (!req_peer)
  1514. req_peer = connection->req_not_net_done;
  1515. /* evaluate the oldest peer request only in one timer! */
  1516. if (req_peer && req_peer->device != device)
  1517. req_peer = NULL;
  1518. /* do we have something to evaluate? */
  1519. if (req_peer == NULL && req_write == NULL && req_read == NULL)
  1520. goto out;
  1521. oldest_submit_jif =
  1522. (req_write && req_read)
  1523. ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
  1524. ? req_write->pre_submit_jif : req_read->pre_submit_jif )
  1525. : req_write ? req_write->pre_submit_jif
  1526. : req_read ? req_read->pre_submit_jif : now;
  1527. if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
  1528. _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
  1529. if (dt && oldest_submit_jif != now &&
  1530. time_after(now, oldest_submit_jif + dt) &&
  1531. !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
  1532. drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
  1533. __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
  1534. }
  1535. /* Reschedule timer for the nearest not already expired timeout.
  1536. * Fallback to now + min(effective network timeout, disk timeout). */
  1537. ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
  1538. ? req_peer->pre_send_jif + ent : now + et;
  1539. dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
  1540. ? oldest_submit_jif + dt : now + et;
  1541. nt = time_before(ent, dt) ? ent : dt;
  1542. out:
  1543. spin_unlock_irq(&device->resource->req_lock);
  1544. mod_timer(&device->request_timer, nt);
  1545. }