validation.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* vnode and volume validity verification.
  3. *
  4. * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/sched.h>
  10. #include "internal.h"
  11. /*
  12. * Data validation is managed through a number of mechanisms from the server:
  13. *
  14. * (1) On first contact with a server (such as if it has just been rebooted),
  15. * the server sends us a CB.InitCallBackState* request.
  16. *
  17. * (2) On a RW volume, in response to certain vnode (inode)-accessing RPC
  18. * calls, the server maintains a time-limited per-vnode promise that it
  19. * will send us a CB.CallBack request if a third party alters the vnodes
  20. * accessed.
  21. *
  22. * Note that a vnode-level callbacks may also be sent for other reasons,
  23. * such as filelock release.
  24. *
  25. * (3) On a RO (or Backup) volume, in response to certain vnode-accessing RPC
  26. * calls, each server maintains a time-limited per-volume promise that it
  27. * will send us a CB.CallBack request if the RO volume is updated to a
  28. * snapshot of the RW volume ("vos release"). This is an atomic event
  29. * that cuts over all instances of the RO volume across multiple servers
  30. * simultaneously.
  31. *
  32. * Note that a volume-level callbacks may also be sent for other reasons,
  33. * such as the volumeserver taking over control of the volume from the
  34. * fileserver.
  35. *
  36. * Note also that each server maintains an independent time limit on an
  37. * independent callback.
  38. *
  39. * (4) Certain RPC calls include a volume information record "VolSync" in
  40. * their reply. This contains a creation date for the volume that should
  41. * remain unchanged for a RW volume (but will be changed if the volume is
  42. * restored from backup) or will be bumped to the time of snapshotting
  43. * when a RO volume is released.
  44. *
  45. * In order to track this events, the following are provided:
  46. *
  47. * ->cb_v_break. A counter of events that might mean that the contents of
  48. * a volume have been altered since we last checked a vnode.
  49. *
  50. * ->cb_v_check. A counter of the number of events that we've sent a
  51. * query to the server for. Everything's up to date if this equals
  52. * cb_v_break.
  53. *
  54. * ->cb_scrub. A counter of the number of regression events for which we
  55. * have to completely wipe the cache.
  56. *
  57. * ->cb_ro_snapshot. A counter of the number of times that we've
  58. * recognised that a RO volume has been updated.
  59. *
  60. * ->cb_break. A counter of events that might mean that the contents of a
  61. * vnode have been altered.
  62. *
  63. * ->cb_expires_at. The time at which the callback promise expires or
  64. * AFS_NO_CB_PROMISE if we have no promise.
  65. *
  66. * The way we manage things is:
  67. *
  68. * (1) When a volume-level CB.CallBack occurs, we increment ->cb_v_break on
  69. * the volume and reset ->cb_expires_at (ie. set AFS_NO_CB_PROMISE) on the
  70. * volume and volume's server record.
  71. *
  72. * (2) When a CB.InitCallBackState occurs, we treat this as a volume-level
  73. * callback break on all the volumes that have been using that volume
  74. * (ie. increment ->cb_v_break and reset ->cb_expires_at).
  75. *
  76. * (3) When a vnode-level CB.CallBack occurs, we increment ->cb_break on the
  77. * vnode and reset its ->cb_expires_at. If the vnode is mmapped, we also
  78. * dispatch a work item to unmap all PTEs to the vnode's pagecache to
  79. * force reentry to the filesystem for revalidation.
  80. *
  81. * (4) When entering the filesystem, we call afs_validate() to check the
  82. * validity of a vnode. This first checks to see if ->cb_v_check and
  83. * ->cb_v_break match, and if they don't, we lock volume->cb_check_lock
  84. * exclusively and perform an FS.FetchStatus on the vnode.
  85. *
  86. * After checking the volume, we check the vnode. If there's a mismatch
  87. * between the volume counters and the vnode's mirrors of those counters,
  88. * we lock vnode->validate_lock and issue an FS.FetchStatus on the vnode.
  89. *
  90. * (5) When the reply from FS.FetchStatus arrives, the VolSync record is
  91. * parsed:
  92. *
  93. * (A) If the Creation timestamp has changed on a RW volume or regressed
  94. * on a RO volume, we try to increment ->cb_scrub; if it advances on a
  95. * RO volume, we assume "vos release" happened and try to increment
  96. * ->cb_ro_snapshot.
  97. *
  98. * (B) If the Update timestamp has regressed, we try to increment
  99. * ->cb_scrub.
  100. *
  101. * Note that in both of these cases, we only do the increment if we can
  102. * cmpxchg the value of the timestamp from the value we noted before the
  103. * op. This tries to prevent parallel ops from fighting one another.
  104. *
  105. * volume->cb_v_check is then set to ->cb_v_break.
  106. *
  107. * (6) The AFSCallBack record included in the FS.FetchStatus reply is also
  108. * parsed and used to set the promise in ->cb_expires_at for the vnode,
  109. * the volume and the volume's server record.
  110. *
  111. * (7) If ->cb_scrub is seen to have advanced, we invalidate the pagecache for
  112. * the vnode.
  113. */
  114. /*
  115. * Check the validity of a vnode/inode and its parent volume.
  116. */
  117. bool afs_check_validity(const struct afs_vnode *vnode)
  118. {
  119. const struct afs_volume *volume = vnode->volume;
  120. time64_t deadline = ktime_get_real_seconds() + 10;
  121. if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  122. return true;
  123. if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
  124. atomic64_read(&vnode->cb_expires_at) <= deadline ||
  125. volume->cb_expires_at <= deadline ||
  126. vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot) ||
  127. vnode->cb_scrub != atomic_read(&volume->cb_scrub) ||
  128. test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
  129. _debug("inval");
  130. return false;
  131. }
  132. return true;
  133. }
  134. /*
  135. * See if the server we've just talked to is currently excluded.
  136. */
  137. static bool __afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume)
  138. {
  139. const struct afs_server_entry *se;
  140. const struct afs_server_list *slist;
  141. bool is_excluded = true;
  142. int i;
  143. rcu_read_lock();
  144. slist = rcu_dereference(volume->servers);
  145. for (i = 0; i < slist->nr_servers; i++) {
  146. se = &slist->servers[i];
  147. if (op->server == se->server) {
  148. is_excluded = test_bit(AFS_SE_EXCLUDED, &se->flags);
  149. break;
  150. }
  151. }
  152. rcu_read_unlock();
  153. return is_excluded;
  154. }
  155. /*
  156. * Update the volume's server list when the creation time changes and see if
  157. * the server we've just talked to is currently excluded.
  158. */
  159. static int afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume)
  160. {
  161. int ret;
  162. if (__afs_is_server_excluded(op, volume))
  163. return 1;
  164. set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
  165. ret = afs_check_volume_status(op->volume, op);
  166. if (ret < 0)
  167. return ret;
  168. return __afs_is_server_excluded(op, volume);
  169. }
  170. /*
  171. * Handle a change to the volume creation time in the VolSync record.
  172. */
  173. static int afs_update_volume_creation_time(struct afs_operation *op, struct afs_volume *volume)
  174. {
  175. unsigned int snap;
  176. time64_t cur = volume->creation_time;
  177. time64_t old = op->pre_volsync.creation;
  178. time64_t new = op->volsync.creation;
  179. int ret;
  180. _enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
  181. if (cur == TIME64_MIN) {
  182. volume->creation_time = new;
  183. return 0;
  184. }
  185. if (new == cur)
  186. return 0;
  187. /* Try to advance the creation timestamp from what we had before the
  188. * operation to what we got back from the server. This should
  189. * hopefully ensure that in a race between multiple operations only one
  190. * of them will do this.
  191. */
  192. if (cur != old)
  193. return 0;
  194. /* If the creation time changes in an unexpected way, we need to scrub
  195. * our caches. For a RW vol, this will only change if the volume is
  196. * restored from a backup; for a RO/Backup vol, this will advance when
  197. * the volume is updated to a new snapshot (eg. "vos release").
  198. */
  199. if (volume->type == AFSVL_RWVOL)
  200. goto regressed;
  201. if (volume->type == AFSVL_BACKVOL) {
  202. if (new < old)
  203. goto regressed;
  204. goto advance;
  205. }
  206. /* We have an RO volume, we need to query the VL server and look at the
  207. * server flags to see if RW->RO replication is in progress.
  208. */
  209. ret = afs_is_server_excluded(op, volume);
  210. if (ret < 0)
  211. return ret;
  212. if (ret > 0) {
  213. snap = atomic_read(&volume->cb_ro_snapshot);
  214. trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_volume_excluded);
  215. return ret;
  216. }
  217. advance:
  218. snap = atomic_inc_return(&volume->cb_ro_snapshot);
  219. trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_for_vos_release);
  220. volume->creation_time = new;
  221. return 0;
  222. regressed:
  223. atomic_inc(&volume->cb_scrub);
  224. trace_afs_cb_v_break(volume->vid, 0, afs_cb_break_for_creation_regress);
  225. volume->creation_time = new;
  226. return 0;
  227. }
  228. /*
  229. * Handle a change to the volume update time in the VolSync record.
  230. */
  231. static void afs_update_volume_update_time(struct afs_operation *op, struct afs_volume *volume)
  232. {
  233. enum afs_cb_break_reason reason = afs_cb_break_no_break;
  234. time64_t cur = volume->update_time;
  235. time64_t old = op->pre_volsync.update;
  236. time64_t new = op->volsync.update;
  237. _enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new);
  238. if (cur == TIME64_MIN) {
  239. volume->update_time = new;
  240. return;
  241. }
  242. if (new == cur)
  243. return;
  244. /* If the volume update time changes in an unexpected way, we need to
  245. * scrub our caches. For a RW vol, this will advance on every
  246. * modification op; for a RO/Backup vol, this will advance when the
  247. * volume is updated to a new snapshot (eg. "vos release").
  248. */
  249. if (new < old)
  250. reason = afs_cb_break_for_update_regress;
  251. /* Try to advance the update timestamp from what we had before the
  252. * operation to what we got back from the server. This should
  253. * hopefully ensure that in a race between multiple operations only one
  254. * of them will do this.
  255. */
  256. if (cur == old) {
  257. if (reason == afs_cb_break_for_update_regress) {
  258. atomic_inc(&volume->cb_scrub);
  259. trace_afs_cb_v_break(volume->vid, 0, reason);
  260. }
  261. volume->update_time = new;
  262. }
  263. }
  264. static int afs_update_volume_times(struct afs_operation *op, struct afs_volume *volume)
  265. {
  266. int ret = 0;
  267. if (likely(op->volsync.creation == volume->creation_time &&
  268. op->volsync.update == volume->update_time))
  269. return 0;
  270. mutex_lock(&volume->volsync_lock);
  271. if (op->volsync.creation != volume->creation_time) {
  272. ret = afs_update_volume_creation_time(op, volume);
  273. if (ret < 0)
  274. goto out;
  275. }
  276. if (op->volsync.update != volume->update_time)
  277. afs_update_volume_update_time(op, volume);
  278. out:
  279. mutex_unlock(&volume->volsync_lock);
  280. return ret;
  281. }
  282. /*
  283. * Update the state of a volume, including recording the expiration time of the
  284. * callback promise. Returns 1 to redo the operation from the start.
  285. */
  286. int afs_update_volume_state(struct afs_operation *op)
  287. {
  288. struct afs_server_list *slist = op->server_list;
  289. struct afs_server_entry *se = &slist->servers[op->server_index];
  290. struct afs_callback *cb = &op->file[0].scb.callback;
  291. struct afs_volume *volume = op->volume;
  292. unsigned int cb_v_break = atomic_read(&volume->cb_v_break);
  293. unsigned int cb_v_check = atomic_read(&volume->cb_v_check);
  294. int ret;
  295. _enter("%llx", op->volume->vid);
  296. if (op->volsync.creation != TIME64_MIN || op->volsync.update != TIME64_MIN) {
  297. ret = afs_update_volume_times(op, volume);
  298. if (ret != 0) {
  299. _leave(" = %d", ret);
  300. return ret;
  301. }
  302. }
  303. if (op->cb_v_break == cb_v_break &&
  304. (op->file[0].scb.have_cb || op->file[1].scb.have_cb)) {
  305. time64_t expires_at = cb->expires_at;
  306. if (!op->file[0].scb.have_cb)
  307. expires_at = op->file[1].scb.callback.expires_at;
  308. se->cb_expires_at = expires_at;
  309. volume->cb_expires_at = expires_at;
  310. }
  311. if (cb_v_check < op->cb_v_break)
  312. atomic_cmpxchg(&volume->cb_v_check, cb_v_check, op->cb_v_break);
  313. return 0;
  314. }
  315. /*
  316. * mark the data attached to an inode as obsolete due to a write on the server
  317. * - might also want to ditch all the outstanding writes and dirty pages
  318. */
  319. static void afs_zap_data(struct afs_vnode *vnode)
  320. {
  321. _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
  322. afs_invalidate_cache(vnode, 0);
  323. /* nuke all the non-dirty pages that aren't locked, mapped or being
  324. * written back in a regular file and completely discard the pages in a
  325. * directory or symlink */
  326. if (S_ISREG(vnode->netfs.inode.i_mode))
  327. filemap_invalidate_inode(&vnode->netfs.inode, true, 0, LLONG_MAX);
  328. else
  329. filemap_invalidate_inode(&vnode->netfs.inode, false, 0, LLONG_MAX);
  330. }
  331. /*
  332. * validate a vnode/inode
  333. * - there are several things we need to check
  334. * - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
  335. * symlink)
  336. * - parent dir metadata changed (security changes)
  337. * - dentry data changed (write, truncate)
  338. * - dentry metadata changed (security changes)
  339. */
  340. int afs_validate(struct afs_vnode *vnode, struct key *key)
  341. {
  342. struct afs_volume *volume = vnode->volume;
  343. unsigned int cb_ro_snapshot, cb_scrub;
  344. time64_t deadline = ktime_get_real_seconds() + 10;
  345. bool zap = false, locked_vol = false;
  346. int ret;
  347. _enter("{v={%llx:%llu} fl=%lx},%x",
  348. vnode->fid.vid, vnode->fid.vnode, vnode->flags,
  349. key_serial(key));
  350. if (afs_check_validity(vnode))
  351. return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
  352. ret = down_write_killable(&vnode->validate_lock);
  353. if (ret < 0)
  354. goto error;
  355. if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
  356. ret = -ESTALE;
  357. goto error_unlock;
  358. }
  359. /* Validate a volume after the v_break has changed or the volume
  360. * callback expired. We only want to do this once per volume per
  361. * v_break change. The actual work will be done when parsing the
  362. * status fetch reply.
  363. */
  364. if (volume->cb_expires_at <= deadline ||
  365. atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) {
  366. ret = mutex_lock_interruptible(&volume->cb_check_lock);
  367. if (ret < 0)
  368. goto error_unlock;
  369. locked_vol = true;
  370. }
  371. cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot);
  372. cb_scrub = atomic_read(&volume->cb_scrub);
  373. if (vnode->cb_ro_snapshot != cb_ro_snapshot ||
  374. vnode->cb_scrub != cb_scrub)
  375. unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
  376. if (vnode->cb_ro_snapshot != cb_ro_snapshot ||
  377. vnode->cb_scrub != cb_scrub ||
  378. volume->cb_expires_at <= deadline ||
  379. atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
  380. atomic64_read(&vnode->cb_expires_at) <= deadline
  381. ) {
  382. ret = afs_fetch_status(vnode, key, false, NULL);
  383. if (ret < 0) {
  384. if (ret == -ENOENT) {
  385. set_bit(AFS_VNODE_DELETED, &vnode->flags);
  386. ret = -ESTALE;
  387. }
  388. goto error_unlock;
  389. }
  390. _debug("new promise [fl=%lx]", vnode->flags);
  391. }
  392. /* We can drop the volume lock now as. */
  393. if (locked_vol) {
  394. mutex_unlock(&volume->cb_check_lock);
  395. locked_vol = false;
  396. }
  397. cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot);
  398. cb_scrub = atomic_read(&volume->cb_scrub);
  399. _debug("vnode inval %x==%x %x==%x",
  400. vnode->cb_ro_snapshot, cb_ro_snapshot,
  401. vnode->cb_scrub, cb_scrub);
  402. if (vnode->cb_scrub != cb_scrub)
  403. zap = true;
  404. vnode->cb_ro_snapshot = cb_ro_snapshot;
  405. vnode->cb_scrub = cb_scrub;
  406. /* if the vnode's data version number changed then its contents are
  407. * different */
  408. zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
  409. if (zap)
  410. afs_zap_data(vnode);
  411. up_write(&vnode->validate_lock);
  412. _leave(" = 0");
  413. return 0;
  414. error_unlock:
  415. if (locked_vol)
  416. mutex_unlock(&volume->cb_check_lock);
  417. up_write(&vnode->validate_lock);
  418. error:
  419. _leave(" = %d", ret);
  420. return ret;
  421. }