callback_xdr.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/nfs/callback_xdr.c
  4. *
  5. * Copyright (C) 2004 Trond Myklebust
  6. *
  7. * NFSv4 callback encode/decode procedures
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sunrpc/svc.h>
  11. #include <linux/nfs4.h>
  12. #include <linux/nfs_fs.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/printk.h>
  15. #include <linux/slab.h>
  16. #include <linux/sunrpc/bc_xprt.h>
  17. #include "nfs4_fs.h"
  18. #include "callback.h"
  19. #include "internal.h"
  20. #include "nfs4session.h"
  21. #include "nfs4trace.h"
  22. #define CB_OP_TAGLEN_MAXSZ (512)
  23. #define CB_OP_HDR_RES_MAXSZ (2 * 4) // opcode, status
  24. #define CB_OP_GETATTR_BITMAP_MAXSZ (4 * 4) // bitmap length, 3 bitmaps
  25. #define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
  26. CB_OP_GETATTR_BITMAP_MAXSZ + \
  27. /* change, size, atime, ctime,
  28. * mtime, deleg_atime, deleg_mtime */\
  29. (2 + 2 + 3 + 3 + 3 + 3 + 3) * 4)
  30. #define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  31. #if defined(CONFIG_NFS_V4_1)
  32. #define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  33. #define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  34. #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
  35. NFS4_MAX_SESSIONID_LEN + \
  36. (1 + 3) * 4) // seqid, 3 slotids
  37. #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  38. #define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  39. #define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  40. #endif /* CONFIG_NFS_V4_1 */
  41. #ifdef CONFIG_NFS_V4_2
  42. #define CB_OP_OFFLOAD_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
  43. #endif /* CONFIG_NFS_V4_2 */
  44. #define NFSDBG_FACILITY NFSDBG_CALLBACK
  45. /* Internal error code */
  46. #define NFS4ERR_RESOURCE_HDR 11050
  47. struct callback_op {
  48. __be32 (*process_op)(void *, void *, struct cb_process_state *);
  49. __be32 (*decode_args)(struct svc_rqst *, struct xdr_stream *, void *);
  50. __be32 (*encode_res)(struct svc_rqst *, struct xdr_stream *,
  51. const void *);
  52. long res_maxsize;
  53. };
  54. static struct callback_op callback_ops[];
  55. static __be32 nfs4_callback_null(struct svc_rqst *rqstp)
  56. {
  57. return htonl(NFS4_OK);
  58. }
  59. /*
  60. * svc_process_common() looks for an XDR encoder to know when
  61. * not to drop a Reply.
  62. */
  63. static bool nfs4_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr)
  64. {
  65. return true;
  66. }
  67. static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len,
  68. const char **str, size_t maxlen)
  69. {
  70. ssize_t err;
  71. err = xdr_stream_decode_opaque_inline(xdr, (void **)str, maxlen);
  72. if (err < 0)
  73. return cpu_to_be32(NFS4ERR_RESOURCE);
  74. *len = err;
  75. return 0;
  76. }
  77. static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
  78. {
  79. __be32 *p;
  80. p = xdr_inline_decode(xdr, 4);
  81. if (unlikely(p == NULL))
  82. return htonl(NFS4ERR_RESOURCE);
  83. fh->size = ntohl(*p);
  84. if (fh->size > NFS4_FHSIZE)
  85. return htonl(NFS4ERR_BADHANDLE);
  86. p = xdr_inline_decode(xdr, fh->size);
  87. if (unlikely(p == NULL))
  88. return htonl(NFS4ERR_RESOURCE);
  89. memcpy(&fh->data[0], p, fh->size);
  90. memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size);
  91. return 0;
  92. }
  93. static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
  94. {
  95. __be32 *p;
  96. unsigned int attrlen;
  97. p = xdr_inline_decode(xdr, 4);
  98. if (unlikely(p == NULL))
  99. return htonl(NFS4ERR_RESOURCE);
  100. attrlen = ntohl(*p);
  101. p = xdr_inline_decode(xdr, attrlen << 2);
  102. if (unlikely(p == NULL))
  103. return htonl(NFS4ERR_RESOURCE);
  104. if (likely(attrlen > 0))
  105. bitmap[0] = ntohl(*p++);
  106. if (attrlen > 1)
  107. bitmap[1] = ntohl(*p++);
  108. if (attrlen > 2)
  109. bitmap[2] = ntohl(*p);
  110. return 0;
  111. }
  112. static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  113. {
  114. __be32 *p;
  115. p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
  116. if (unlikely(p == NULL))
  117. return htonl(NFS4ERR_RESOURCE);
  118. memcpy(stateid->data, p, NFS4_STATEID_SIZE);
  119. return 0;
  120. }
  121. static __be32 decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  122. {
  123. stateid->type = NFS4_DELEGATION_STATEID_TYPE;
  124. return decode_stateid(xdr, stateid);
  125. }
  126. static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
  127. {
  128. __be32 *p;
  129. __be32 status;
  130. status = decode_string(xdr, &hdr->taglen, &hdr->tag, CB_OP_TAGLEN_MAXSZ);
  131. if (unlikely(status != 0))
  132. return status;
  133. p = xdr_inline_decode(xdr, 12);
  134. if (unlikely(p == NULL))
  135. return htonl(NFS4ERR_RESOURCE);
  136. hdr->minorversion = ntohl(*p++);
  137. /* Check for minor version support */
  138. if (hdr->minorversion <= NFS4_MAX_MINOR_VERSION) {
  139. hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 and v4.2 */
  140. } else {
  141. pr_warn_ratelimited("NFS: %s: NFSv4 server callback with "
  142. "illegal minor version %u!\n",
  143. __func__, hdr->minorversion);
  144. return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
  145. }
  146. hdr->nops = ntohl(*p);
  147. return 0;
  148. }
  149. static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
  150. {
  151. __be32 *p;
  152. p = xdr_inline_decode(xdr, 4);
  153. if (unlikely(p == NULL))
  154. return htonl(NFS4ERR_RESOURCE_HDR);
  155. *op = ntohl(*p);
  156. return 0;
  157. }
  158. static __be32 decode_getattr_args(struct svc_rqst *rqstp,
  159. struct xdr_stream *xdr, void *argp)
  160. {
  161. struct cb_getattrargs *args = argp;
  162. __be32 status;
  163. status = decode_fh(xdr, &args->fh);
  164. if (unlikely(status != 0))
  165. return status;
  166. return decode_bitmap(xdr, args->bitmap);
  167. }
  168. static __be32 decode_recall_args(struct svc_rqst *rqstp,
  169. struct xdr_stream *xdr, void *argp)
  170. {
  171. struct cb_recallargs *args = argp;
  172. __be32 *p;
  173. __be32 status;
  174. status = decode_delegation_stateid(xdr, &args->stateid);
  175. if (unlikely(status != 0))
  176. return status;
  177. p = xdr_inline_decode(xdr, 4);
  178. if (unlikely(p == NULL))
  179. return htonl(NFS4ERR_RESOURCE);
  180. args->truncate = ntohl(*p);
  181. return decode_fh(xdr, &args->fh);
  182. }
  183. #if defined(CONFIG_NFS_V4_1)
  184. static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  185. {
  186. stateid->type = NFS4_LAYOUT_STATEID_TYPE;
  187. return decode_stateid(xdr, stateid);
  188. }
  189. static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
  190. struct xdr_stream *xdr, void *argp)
  191. {
  192. struct cb_layoutrecallargs *args = argp;
  193. __be32 *p;
  194. __be32 status = 0;
  195. uint32_t iomode;
  196. p = xdr_inline_decode(xdr, 4 * sizeof(uint32_t));
  197. if (unlikely(p == NULL))
  198. return htonl(NFS4ERR_BADXDR);
  199. args->cbl_layout_type = ntohl(*p++);
  200. /* Depite the spec's xdr, iomode really belongs in the FILE switch,
  201. * as it is unusable and ignored with the other types.
  202. */
  203. iomode = ntohl(*p++);
  204. args->cbl_layoutchanged = ntohl(*p++);
  205. args->cbl_recall_type = ntohl(*p++);
  206. if (args->cbl_recall_type == RETURN_FILE) {
  207. args->cbl_range.iomode = iomode;
  208. status = decode_fh(xdr, &args->cbl_fh);
  209. if (unlikely(status != 0))
  210. return status;
  211. p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
  212. if (unlikely(p == NULL))
  213. return htonl(NFS4ERR_BADXDR);
  214. p = xdr_decode_hyper(p, &args->cbl_range.offset);
  215. p = xdr_decode_hyper(p, &args->cbl_range.length);
  216. return decode_layout_stateid(xdr, &args->cbl_stateid);
  217. } else if (args->cbl_recall_type == RETURN_FSID) {
  218. p = xdr_inline_decode(xdr, 2 * sizeof(uint64_t));
  219. if (unlikely(p == NULL))
  220. return htonl(NFS4ERR_BADXDR);
  221. p = xdr_decode_hyper(p, &args->cbl_fsid.major);
  222. p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
  223. } else if (args->cbl_recall_type != RETURN_ALL)
  224. return htonl(NFS4ERR_BADXDR);
  225. return 0;
  226. }
  227. static
  228. __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
  229. struct xdr_stream *xdr,
  230. void *argp)
  231. {
  232. struct cb_devicenotifyargs *args = argp;
  233. uint32_t tmp, n, i;
  234. __be32 *p;
  235. __be32 status = 0;
  236. /* Num of device notifications */
  237. p = xdr_inline_decode(xdr, sizeof(uint32_t));
  238. if (unlikely(p == NULL)) {
  239. status = htonl(NFS4ERR_BADXDR);
  240. goto out;
  241. }
  242. n = ntohl(*p++);
  243. if (n == 0)
  244. goto out;
  245. args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
  246. if (!args->devs) {
  247. status = htonl(NFS4ERR_DELAY);
  248. goto out;
  249. }
  250. /* Decode each dev notification */
  251. for (i = 0; i < n; i++) {
  252. struct cb_devicenotifyitem *dev = &args->devs[i];
  253. p = xdr_inline_decode(xdr, (4 * sizeof(uint32_t)) +
  254. NFS4_DEVICEID4_SIZE);
  255. if (unlikely(p == NULL)) {
  256. status = htonl(NFS4ERR_BADXDR);
  257. goto err;
  258. }
  259. tmp = ntohl(*p++); /* bitmap size */
  260. if (tmp != 1) {
  261. status = htonl(NFS4ERR_INVAL);
  262. goto err;
  263. }
  264. dev->cbd_notify_type = ntohl(*p++);
  265. if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE &&
  266. dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) {
  267. status = htonl(NFS4ERR_INVAL);
  268. goto err;
  269. }
  270. tmp = ntohl(*p++); /* opaque size */
  271. if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) &&
  272. (tmp != NFS4_DEVICEID4_SIZE + 8)) ||
  273. ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) &&
  274. (tmp != NFS4_DEVICEID4_SIZE + 4))) {
  275. status = htonl(NFS4ERR_INVAL);
  276. goto err;
  277. }
  278. dev->cbd_layout_type = ntohl(*p++);
  279. memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE);
  280. p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
  281. if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) {
  282. p = xdr_inline_decode(xdr, sizeof(uint32_t));
  283. if (unlikely(p == NULL)) {
  284. status = htonl(NFS4ERR_BADXDR);
  285. goto err;
  286. }
  287. dev->cbd_immediate = ntohl(*p++);
  288. } else {
  289. dev->cbd_immediate = 0;
  290. }
  291. dprintk("%s: type %d layout 0x%x immediate %d\n",
  292. __func__, dev->cbd_notify_type, dev->cbd_layout_type,
  293. dev->cbd_immediate);
  294. }
  295. args->ndevs = n;
  296. dprintk("%s: ndevs %d\n", __func__, args->ndevs);
  297. return 0;
  298. err:
  299. kfree(args->devs);
  300. out:
  301. args->devs = NULL;
  302. args->ndevs = 0;
  303. dprintk("%s: status %d ndevs %d\n",
  304. __func__, ntohl(status), args->ndevs);
  305. return status;
  306. }
  307. static __be32 decode_sessionid(struct xdr_stream *xdr,
  308. struct nfs4_sessionid *sid)
  309. {
  310. __be32 *p;
  311. p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN);
  312. if (unlikely(p == NULL))
  313. return htonl(NFS4ERR_RESOURCE);
  314. memcpy(sid->data, p, NFS4_MAX_SESSIONID_LEN);
  315. return 0;
  316. }
  317. static __be32 decode_rc_list(struct xdr_stream *xdr,
  318. struct referring_call_list *rc_list)
  319. {
  320. __be32 *p;
  321. int i;
  322. __be32 status;
  323. status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
  324. if (status)
  325. goto out;
  326. status = htonl(NFS4ERR_RESOURCE);
  327. p = xdr_inline_decode(xdr, sizeof(uint32_t));
  328. if (unlikely(p == NULL))
  329. goto out;
  330. rc_list->rcl_nrefcalls = ntohl(*p++);
  331. if (rc_list->rcl_nrefcalls) {
  332. if (unlikely(rc_list->rcl_nrefcalls > xdr->buf->len))
  333. goto out;
  334. p = xdr_inline_decode(xdr,
  335. rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
  336. if (unlikely(p == NULL))
  337. goto out;
  338. rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
  339. sizeof(*rc_list->rcl_refcalls),
  340. GFP_KERNEL);
  341. if (unlikely(rc_list->rcl_refcalls == NULL))
  342. goto out;
  343. for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
  344. rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
  345. rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
  346. }
  347. }
  348. status = 0;
  349. out:
  350. return status;
  351. }
  352. static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
  353. struct xdr_stream *xdr,
  354. void *argp)
  355. {
  356. struct cb_sequenceargs *args = argp;
  357. __be32 *p;
  358. int i;
  359. __be32 status;
  360. status = decode_sessionid(xdr, &args->csa_sessionid);
  361. if (status)
  362. return status;
  363. p = xdr_inline_decode(xdr, 5 * sizeof(uint32_t));
  364. if (unlikely(p == NULL))
  365. return htonl(NFS4ERR_RESOURCE);
  366. args->csa_addr = svc_addr(rqstp);
  367. args->csa_sequenceid = ntohl(*p++);
  368. args->csa_slotid = ntohl(*p++);
  369. args->csa_highestslotid = ntohl(*p++);
  370. args->csa_cachethis = ntohl(*p++);
  371. args->csa_nrclists = ntohl(*p++);
  372. args->csa_rclists = NULL;
  373. if (args->csa_nrclists) {
  374. args->csa_rclists = kmalloc_array(args->csa_nrclists,
  375. sizeof(*args->csa_rclists),
  376. GFP_KERNEL);
  377. if (unlikely(args->csa_rclists == NULL))
  378. return htonl(NFS4ERR_RESOURCE);
  379. for (i = 0; i < args->csa_nrclists; i++) {
  380. status = decode_rc_list(xdr, &args->csa_rclists[i]);
  381. if (status) {
  382. args->csa_nrclists = i;
  383. goto out_free;
  384. }
  385. }
  386. }
  387. return 0;
  388. out_free:
  389. for (i = 0; i < args->csa_nrclists; i++)
  390. kfree(args->csa_rclists[i].rcl_refcalls);
  391. kfree(args->csa_rclists);
  392. return status;
  393. }
  394. static __be32 decode_recallany_args(struct svc_rqst *rqstp,
  395. struct xdr_stream *xdr,
  396. void *argp)
  397. {
  398. struct cb_recallanyargs *args = argp;
  399. uint32_t bitmap[3];
  400. __be32 *p, status;
  401. p = xdr_inline_decode(xdr, 4);
  402. if (unlikely(p == NULL))
  403. return htonl(NFS4ERR_BADXDR);
  404. args->craa_objs_to_keep = ntohl(*p++);
  405. status = decode_bitmap(xdr, bitmap);
  406. if (unlikely(status))
  407. return status;
  408. args->craa_type_mask = bitmap[0];
  409. return 0;
  410. }
  411. static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
  412. struct xdr_stream *xdr,
  413. void *argp)
  414. {
  415. struct cb_recallslotargs *args = argp;
  416. __be32 *p;
  417. p = xdr_inline_decode(xdr, 4);
  418. if (unlikely(p == NULL))
  419. return htonl(NFS4ERR_BADXDR);
  420. args->crsa_target_highest_slotid = ntohl(*p++);
  421. return 0;
  422. }
  423. static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_args *args)
  424. {
  425. __be32 *p;
  426. unsigned int len;
  427. p = xdr_inline_decode(xdr, 12);
  428. if (unlikely(p == NULL))
  429. return htonl(NFS4ERR_BADXDR);
  430. p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
  431. len = be32_to_cpu(*p);
  432. p = xdr_inline_decode(xdr, len);
  433. if (unlikely(p == NULL))
  434. return htonl(NFS4ERR_BADXDR);
  435. /* Only try to decode if the length is right */
  436. if (len == 20) {
  437. p += 2; /* skip "lock id:" */
  438. args->cbnl_owner.s_dev = be32_to_cpu(*p++);
  439. xdr_decode_hyper(p, &args->cbnl_owner.id);
  440. args->cbnl_valid = true;
  441. } else {
  442. args->cbnl_owner.s_dev = 0;
  443. args->cbnl_owner.id = 0;
  444. args->cbnl_valid = false;
  445. }
  446. return 0;
  447. }
  448. static __be32 decode_notify_lock_args(struct svc_rqst *rqstp,
  449. struct xdr_stream *xdr, void *argp)
  450. {
  451. struct cb_notify_lock_args *args = argp;
  452. __be32 status;
  453. status = decode_fh(xdr, &args->cbnl_fh);
  454. if (unlikely(status != 0))
  455. return status;
  456. return decode_lockowner(xdr, args);
  457. }
  458. #endif /* CONFIG_NFS_V4_1 */
  459. #ifdef CONFIG_NFS_V4_2
  460. static __be32 decode_write_response(struct xdr_stream *xdr,
  461. struct cb_offloadargs *args)
  462. {
  463. __be32 *p;
  464. /* skip the always zero field */
  465. p = xdr_inline_decode(xdr, 4);
  466. if (unlikely(!p))
  467. goto out;
  468. p++;
  469. /* decode count, stable_how, verifier */
  470. p = xdr_inline_decode(xdr, 8 + 4);
  471. if (unlikely(!p))
  472. goto out;
  473. p = xdr_decode_hyper(p, &args->wr_count);
  474. args->wr_writeverf.committed = be32_to_cpup(p);
  475. p = xdr_inline_decode(xdr, NFS4_VERIFIER_SIZE);
  476. if (likely(p)) {
  477. memcpy(&args->wr_writeverf.verifier.data[0], p,
  478. NFS4_VERIFIER_SIZE);
  479. return 0;
  480. }
  481. out:
  482. return htonl(NFS4ERR_RESOURCE);
  483. }
  484. static __be32 decode_offload_args(struct svc_rqst *rqstp,
  485. struct xdr_stream *xdr,
  486. void *data)
  487. {
  488. struct cb_offloadargs *args = data;
  489. __be32 *p;
  490. __be32 status;
  491. /* decode fh */
  492. status = decode_fh(xdr, &args->coa_fh);
  493. if (unlikely(status != 0))
  494. return status;
  495. /* decode stateid */
  496. status = decode_stateid(xdr, &args->coa_stateid);
  497. if (unlikely(status != 0))
  498. return status;
  499. /* decode status */
  500. p = xdr_inline_decode(xdr, 4);
  501. if (unlikely(!p))
  502. goto out;
  503. args->error = ntohl(*p++);
  504. if (!args->error) {
  505. status = decode_write_response(xdr, args);
  506. if (unlikely(status != 0))
  507. return status;
  508. } else {
  509. p = xdr_inline_decode(xdr, 8);
  510. if (unlikely(!p))
  511. goto out;
  512. p = xdr_decode_hyper(p, &args->wr_count);
  513. }
  514. return 0;
  515. out:
  516. return htonl(NFS4ERR_RESOURCE);
  517. }
  518. #endif /* CONFIG_NFS_V4_2 */
  519. static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
  520. {
  521. if (unlikely(xdr_stream_encode_opaque(xdr, str, len) < 0))
  522. return cpu_to_be32(NFS4ERR_RESOURCE);
  523. return 0;
  524. }
  525. static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, size_t sz)
  526. {
  527. if (xdr_stream_encode_uint32_array(xdr, bitmap, sz) < 0)
  528. return cpu_to_be32(NFS4ERR_RESOURCE);
  529. return 0;
  530. }
  531. static __be32 encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change)
  532. {
  533. __be32 *p;
  534. if (!(bitmap[0] & FATTR4_WORD0_CHANGE))
  535. return 0;
  536. p = xdr_reserve_space(xdr, 8);
  537. if (unlikely(!p))
  538. return htonl(NFS4ERR_RESOURCE);
  539. p = xdr_encode_hyper(p, change);
  540. return 0;
  541. }
  542. static __be32 encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size)
  543. {
  544. __be32 *p;
  545. if (!(bitmap[0] & FATTR4_WORD0_SIZE))
  546. return 0;
  547. p = xdr_reserve_space(xdr, 8);
  548. if (unlikely(!p))
  549. return htonl(NFS4ERR_RESOURCE);
  550. p = xdr_encode_hyper(p, size);
  551. return 0;
  552. }
  553. static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec64 *time)
  554. {
  555. __be32 *p;
  556. p = xdr_reserve_space(xdr, 12);
  557. if (unlikely(!p))
  558. return htonl(NFS4ERR_RESOURCE);
  559. p = xdr_encode_hyper(p, time->tv_sec);
  560. *p = htonl(time->tv_nsec);
  561. return 0;
  562. }
  563. static __be32 encode_attr_atime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
  564. {
  565. if (!(bitmap[1] & FATTR4_WORD1_TIME_ACCESS))
  566. return 0;
  567. return encode_attr_time(xdr,time);
  568. }
  569. static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
  570. {
  571. if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA))
  572. return 0;
  573. return encode_attr_time(xdr,time);
  574. }
  575. static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec64 *time)
  576. {
  577. if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY))
  578. return 0;
  579. return encode_attr_time(xdr,time);
  580. }
  581. static __be32 encode_attr_delegatime(struct xdr_stream *xdr,
  582. const uint32_t *bitmap,
  583. const struct timespec64 *time)
  584. {
  585. if (!(bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS))
  586. return 0;
  587. return encode_attr_time(xdr,time);
  588. }
  589. static __be32 encode_attr_delegmtime(struct xdr_stream *xdr,
  590. const uint32_t *bitmap,
  591. const struct timespec64 *time)
  592. {
  593. if (!(bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY))
  594. return 0;
  595. return encode_attr_time(xdr,time);
  596. }
  597. static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr)
  598. {
  599. __be32 status;
  600. hdr->status = xdr_reserve_space(xdr, 4);
  601. if (unlikely(hdr->status == NULL))
  602. return htonl(NFS4ERR_RESOURCE);
  603. status = encode_string(xdr, hdr->taglen, hdr->tag);
  604. if (unlikely(status != 0))
  605. return status;
  606. hdr->nops = xdr_reserve_space(xdr, 4);
  607. if (unlikely(hdr->nops == NULL))
  608. return htonl(NFS4ERR_RESOURCE);
  609. return 0;
  610. }
  611. static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)
  612. {
  613. __be32 *p;
  614. p = xdr_reserve_space(xdr, 8);
  615. if (unlikely(p == NULL))
  616. return htonl(NFS4ERR_RESOURCE_HDR);
  617. *p++ = htonl(op);
  618. *p = res;
  619. return 0;
  620. }
  621. static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
  622. const void *resp)
  623. {
  624. const struct cb_getattrres *res = resp;
  625. __be32 *savep = NULL;
  626. __be32 status = res->status;
  627. if (unlikely(status != 0))
  628. goto out;
  629. status = encode_attr_bitmap(xdr, res->bitmap, ARRAY_SIZE(res->bitmap));
  630. if (unlikely(status != 0))
  631. goto out;
  632. status = cpu_to_be32(NFS4ERR_RESOURCE);
  633. savep = xdr_reserve_space(xdr, sizeof(*savep));
  634. if (unlikely(!savep))
  635. goto out;
  636. status = encode_attr_change(xdr, res->bitmap, res->change_attr);
  637. if (unlikely(status != 0))
  638. goto out;
  639. status = encode_attr_size(xdr, res->bitmap, res->size);
  640. if (unlikely(status != 0))
  641. goto out;
  642. status = encode_attr_atime(xdr, res->bitmap, &res->atime);
  643. if (unlikely(status != 0))
  644. goto out;
  645. status = encode_attr_ctime(xdr, res->bitmap, &res->ctime);
  646. if (unlikely(status != 0))
  647. goto out;
  648. status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
  649. if (unlikely(status != 0))
  650. goto out;
  651. status = encode_attr_delegatime(xdr, res->bitmap, &res->atime);
  652. if (unlikely(status != 0))
  653. goto out;
  654. status = encode_attr_delegmtime(xdr, res->bitmap, &res->mtime);
  655. *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
  656. out:
  657. return status;
  658. }
  659. #if defined(CONFIG_NFS_V4_1)
  660. static __be32 encode_sessionid(struct xdr_stream *xdr,
  661. const struct nfs4_sessionid *sid)
  662. {
  663. __be32 *p;
  664. p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
  665. if (unlikely(p == NULL))
  666. return htonl(NFS4ERR_RESOURCE);
  667. memcpy(p, sid, NFS4_MAX_SESSIONID_LEN);
  668. return 0;
  669. }
  670. static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
  671. struct xdr_stream *xdr,
  672. const void *resp)
  673. {
  674. const struct cb_sequenceres *res = resp;
  675. __be32 *p;
  676. __be32 status = res->csr_status;
  677. if (unlikely(status != 0))
  678. return status;
  679. status = encode_sessionid(xdr, &res->csr_sessionid);
  680. if (status)
  681. return status;
  682. p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
  683. if (unlikely(p == NULL))
  684. return htonl(NFS4ERR_RESOURCE);
  685. *p++ = htonl(res->csr_sequenceid);
  686. *p++ = htonl(res->csr_slotid);
  687. *p++ = htonl(res->csr_highestslotid);
  688. *p++ = htonl(res->csr_target_highestslotid);
  689. return 0;
  690. }
  691. static __be32
  692. preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
  693. {
  694. if (op_nr == OP_CB_SEQUENCE) {
  695. if (nop != 0)
  696. return htonl(NFS4ERR_SEQUENCE_POS);
  697. } else {
  698. if (nop == 0)
  699. return htonl(NFS4ERR_OP_NOT_IN_SESSION);
  700. }
  701. switch (op_nr) {
  702. case OP_CB_GETATTR:
  703. case OP_CB_RECALL:
  704. case OP_CB_SEQUENCE:
  705. case OP_CB_RECALL_ANY:
  706. case OP_CB_RECALL_SLOT:
  707. case OP_CB_LAYOUTRECALL:
  708. case OP_CB_NOTIFY_DEVICEID:
  709. case OP_CB_NOTIFY_LOCK:
  710. *op = &callback_ops[op_nr];
  711. break;
  712. case OP_CB_NOTIFY:
  713. case OP_CB_PUSH_DELEG:
  714. case OP_CB_RECALLABLE_OBJ_AVAIL:
  715. case OP_CB_WANTS_CANCELLED:
  716. return htonl(NFS4ERR_NOTSUPP);
  717. default:
  718. return htonl(NFS4ERR_OP_ILLEGAL);
  719. }
  720. return htonl(NFS_OK);
  721. }
  722. static void nfs4_callback_free_slot(struct nfs4_session *session,
  723. struct nfs4_slot *slot)
  724. {
  725. struct nfs4_slot_table *tbl = &session->bc_slot_table;
  726. spin_lock(&tbl->slot_tbl_lock);
  727. /*
  728. * Let the state manager know callback processing done.
  729. * A single slot, so highest used slotid is either 0 or -1
  730. */
  731. nfs4_free_slot(tbl, slot);
  732. spin_unlock(&tbl->slot_tbl_lock);
  733. }
  734. static void nfs4_cb_free_slot(struct cb_process_state *cps)
  735. {
  736. if (cps->slot) {
  737. nfs4_callback_free_slot(cps->clp->cl_session, cps->slot);
  738. cps->slot = NULL;
  739. }
  740. }
  741. #else /* CONFIG_NFS_V4_1 */
  742. static __be32
  743. preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
  744. {
  745. return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
  746. }
  747. static void nfs4_cb_free_slot(struct cb_process_state *cps)
  748. {
  749. }
  750. #endif /* CONFIG_NFS_V4_1 */
  751. #ifdef CONFIG_NFS_V4_2
  752. static __be32
  753. preprocess_nfs42_op(int nop, unsigned int op_nr, struct callback_op **op)
  754. {
  755. __be32 status = preprocess_nfs41_op(nop, op_nr, op);
  756. if (status != htonl(NFS4ERR_OP_ILLEGAL))
  757. return status;
  758. if (op_nr == OP_CB_OFFLOAD) {
  759. *op = &callback_ops[op_nr];
  760. return htonl(NFS_OK);
  761. } else
  762. return htonl(NFS4ERR_NOTSUPP);
  763. return htonl(NFS4ERR_OP_ILLEGAL);
  764. }
  765. #else /* CONFIG_NFS_V4_2 */
  766. static __be32
  767. preprocess_nfs42_op(int nop, unsigned int op_nr, struct callback_op **op)
  768. {
  769. return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
  770. }
  771. #endif /* CONFIG_NFS_V4_2 */
  772. static __be32
  773. preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
  774. {
  775. switch (op_nr) {
  776. case OP_CB_GETATTR:
  777. case OP_CB_RECALL:
  778. *op = &callback_ops[op_nr];
  779. break;
  780. default:
  781. return htonl(NFS4ERR_OP_ILLEGAL);
  782. }
  783. return htonl(NFS_OK);
  784. }
  785. static __be32 process_op(int nop, struct svc_rqst *rqstp,
  786. struct cb_process_state *cps)
  787. {
  788. struct xdr_stream *xdr_out = &rqstp->rq_res_stream;
  789. struct callback_op *op = &callback_ops[0];
  790. unsigned int op_nr;
  791. __be32 status;
  792. long maxlen;
  793. __be32 res;
  794. status = decode_op_hdr(&rqstp->rq_arg_stream, &op_nr);
  795. if (unlikely(status))
  796. return status;
  797. switch (cps->minorversion) {
  798. case 0:
  799. status = preprocess_nfs4_op(op_nr, &op);
  800. break;
  801. case 1:
  802. status = preprocess_nfs41_op(nop, op_nr, &op);
  803. break;
  804. case 2:
  805. status = preprocess_nfs42_op(nop, op_nr, &op);
  806. break;
  807. default:
  808. status = htonl(NFS4ERR_MINOR_VERS_MISMATCH);
  809. }
  810. if (status == htonl(NFS4ERR_OP_ILLEGAL))
  811. op_nr = OP_CB_ILLEGAL;
  812. if (status)
  813. goto encode_hdr;
  814. if (cps->drc_status) {
  815. status = cps->drc_status;
  816. goto encode_hdr;
  817. }
  818. maxlen = xdr_out->end - xdr_out->p;
  819. if (maxlen > 0 && maxlen < PAGE_SIZE) {
  820. status = op->decode_args(rqstp, &rqstp->rq_arg_stream,
  821. rqstp->rq_argp);
  822. if (likely(status == 0))
  823. status = op->process_op(rqstp->rq_argp, rqstp->rq_resp,
  824. cps);
  825. } else
  826. status = htonl(NFS4ERR_RESOURCE);
  827. encode_hdr:
  828. res = encode_op_hdr(xdr_out, op_nr, status);
  829. if (unlikely(res))
  830. return res;
  831. if (op->encode_res != NULL && status == 0)
  832. status = op->encode_res(rqstp, xdr_out, rqstp->rq_resp);
  833. return status;
  834. }
  835. /*
  836. * Decode, process and encode a COMPOUND
  837. */
  838. static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
  839. {
  840. struct cb_compound_hdr_arg hdr_arg = { 0 };
  841. struct cb_compound_hdr_res hdr_res = { NULL };
  842. struct cb_process_state cps = {
  843. .drc_status = 0,
  844. .clp = NULL,
  845. .net = SVC_NET(rqstp),
  846. };
  847. unsigned int nops = 0;
  848. __be32 status;
  849. status = decode_compound_hdr_arg(&rqstp->rq_arg_stream, &hdr_arg);
  850. if (status == htonl(NFS4ERR_RESOURCE))
  851. return rpc_garbage_args;
  852. if (hdr_arg.minorversion == 0) {
  853. cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
  854. if (!cps.clp) {
  855. trace_nfs_cb_no_clp(rqstp->rq_xid, hdr_arg.cb_ident);
  856. goto out_invalidcred;
  857. }
  858. if (!check_gss_callback_principal(cps.clp, rqstp)) {
  859. trace_nfs_cb_badprinc(rqstp->rq_xid, hdr_arg.cb_ident);
  860. nfs_put_client(cps.clp);
  861. goto out_invalidcred;
  862. }
  863. svc_xprt_set_valid(rqstp->rq_xprt);
  864. }
  865. cps.minorversion = hdr_arg.minorversion;
  866. hdr_res.taglen = hdr_arg.taglen;
  867. hdr_res.tag = hdr_arg.tag;
  868. if (encode_compound_hdr_res(&rqstp->rq_res_stream, &hdr_res) != 0) {
  869. if (cps.clp)
  870. nfs_put_client(cps.clp);
  871. return rpc_system_err;
  872. }
  873. while (status == 0 && nops != hdr_arg.nops) {
  874. status = process_op(nops, rqstp, &cps);
  875. nops++;
  876. }
  877. /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return
  878. * resource error in cb_compound status without returning op */
  879. if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) {
  880. status = htonl(NFS4ERR_RESOURCE);
  881. nops--;
  882. }
  883. if (svc_is_backchannel(rqstp) && cps.clp) {
  884. rqstp->bc_to_initval = cps.clp->cl_rpcclient->cl_timeout->to_initval;
  885. rqstp->bc_to_retries = cps.clp->cl_rpcclient->cl_timeout->to_retries;
  886. }
  887. *hdr_res.status = status;
  888. *hdr_res.nops = htonl(nops);
  889. nfs4_cb_free_slot(&cps);
  890. nfs_put_client(cps.clp);
  891. return rpc_success;
  892. out_invalidcred:
  893. pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n");
  894. rqstp->rq_auth_stat = rpc_autherr_badcred;
  895. return rpc_success;
  896. }
  897. static int
  898. nfs_callback_dispatch(struct svc_rqst *rqstp)
  899. {
  900. const struct svc_procedure *procp = rqstp->rq_procinfo;
  901. *rqstp->rq_accept_statp = procp->pc_func(rqstp);
  902. return 1;
  903. }
  904. /*
  905. * Define NFS4 callback COMPOUND ops.
  906. */
  907. static struct callback_op callback_ops[] = {
  908. [0] = {
  909. .res_maxsize = CB_OP_HDR_RES_MAXSZ,
  910. },
  911. [OP_CB_GETATTR] = {
  912. .process_op = nfs4_callback_getattr,
  913. .decode_args = decode_getattr_args,
  914. .encode_res = encode_getattr_res,
  915. .res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
  916. },
  917. [OP_CB_RECALL] = {
  918. .process_op = nfs4_callback_recall,
  919. .decode_args = decode_recall_args,
  920. .res_maxsize = CB_OP_RECALL_RES_MAXSZ,
  921. },
  922. #if defined(CONFIG_NFS_V4_1)
  923. [OP_CB_LAYOUTRECALL] = {
  924. .process_op = nfs4_callback_layoutrecall,
  925. .decode_args = decode_layoutrecall_args,
  926. .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
  927. },
  928. [OP_CB_NOTIFY_DEVICEID] = {
  929. .process_op = nfs4_callback_devicenotify,
  930. .decode_args = decode_devicenotify_args,
  931. .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ,
  932. },
  933. [OP_CB_SEQUENCE] = {
  934. .process_op = nfs4_callback_sequence,
  935. .decode_args = decode_cb_sequence_args,
  936. .encode_res = encode_cb_sequence_res,
  937. .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
  938. },
  939. [OP_CB_RECALL_ANY] = {
  940. .process_op = nfs4_callback_recallany,
  941. .decode_args = decode_recallany_args,
  942. .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
  943. },
  944. [OP_CB_RECALL_SLOT] = {
  945. .process_op = nfs4_callback_recallslot,
  946. .decode_args = decode_recallslot_args,
  947. .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
  948. },
  949. [OP_CB_NOTIFY_LOCK] = {
  950. .process_op = nfs4_callback_notify_lock,
  951. .decode_args = decode_notify_lock_args,
  952. .res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
  953. },
  954. #endif /* CONFIG_NFS_V4_1 */
  955. #ifdef CONFIG_NFS_V4_2
  956. [OP_CB_OFFLOAD] = {
  957. .process_op = nfs4_callback_offload,
  958. .decode_args = decode_offload_args,
  959. .res_maxsize = CB_OP_OFFLOAD_RES_MAXSZ,
  960. },
  961. #endif /* CONFIG_NFS_V4_2 */
  962. };
  963. /*
  964. * Define NFS4 callback procedures
  965. */
  966. static const struct svc_procedure nfs4_callback_procedures1[] = {
  967. [CB_NULL] = {
  968. .pc_func = nfs4_callback_null,
  969. .pc_encode = nfs4_encode_void,
  970. .pc_xdrressize = 1,
  971. .pc_name = "NULL",
  972. },
  973. [CB_COMPOUND] = {
  974. .pc_func = nfs4_callback_compound,
  975. .pc_encode = nfs4_encode_void,
  976. .pc_argsize = 256,
  977. .pc_argzero = 256,
  978. .pc_ressize = 256,
  979. .pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
  980. .pc_name = "COMPOUND",
  981. }
  982. };
  983. static DEFINE_PER_CPU_ALIGNED(unsigned long,
  984. nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)]);
  985. const struct svc_version nfs4_callback_version1 = {
  986. .vs_vers = 1,
  987. .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
  988. .vs_proc = nfs4_callback_procedures1,
  989. .vs_count = nfs4_callback_count1,
  990. .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
  991. .vs_dispatch = nfs_callback_dispatch,
  992. .vs_hidden = true,
  993. .vs_need_cong_ctrl = true,
  994. };
  995. static DEFINE_PER_CPU_ALIGNED(unsigned long,
  996. nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)]);
  997. const struct svc_version nfs4_callback_version4 = {
  998. .vs_vers = 4,
  999. .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
  1000. .vs_proc = nfs4_callback_procedures1,
  1001. .vs_count = nfs4_callback_count4,
  1002. .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
  1003. .vs_dispatch = nfs_callback_dispatch,
  1004. .vs_hidden = true,
  1005. .vs_need_cong_ctrl = true,
  1006. };