pnfs_nfs.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971
  1. /*
  2. * Common NFS I/O operations for the pnfs file based
  3. * layout drivers.
  4. *
  5. * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
  6. *
  7. * Tom Haynes <loghyr@primarydata.com>
  8. */
  9. #include <linux/nfs_fs.h>
  10. #include <linux/nfs_page.h>
  11. #include <linux/sunrpc/addr.h>
  12. #include <linux/module.h>
  13. #include "nfs4session.h"
  14. #include "internal.h"
  15. #include "pnfs.h"
  16. #define NFSDBG_FACILITY NFSDBG_PNFS
  17. void pnfs_generic_rw_release(void *data)
  18. {
  19. struct nfs_pgio_header *hdr = data;
  20. nfs_put_client(hdr->ds_clp);
  21. hdr->mds_ops->rpc_release(data);
  22. }
  23. EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
  24. /* Fake up some data that will cause nfs_commit_release to retry the writes. */
  25. void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
  26. {
  27. struct nfs_writeverf *verf = data->res.verf;
  28. data->task.tk_status = 0;
  29. memset(&verf->verifier, 0, sizeof(verf->verifier));
  30. verf->committed = NFS_UNSTABLE;
  31. }
  32. EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
  33. void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
  34. {
  35. struct nfs_commit_data *wdata = data;
  36. /* Note this may cause RPC to be resent */
  37. wdata->mds_ops->rpc_call_done(task, data);
  38. }
  39. EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
  40. void pnfs_generic_commit_release(void *calldata)
  41. {
  42. struct nfs_commit_data *data = calldata;
  43. data->completion_ops->completion(data);
  44. pnfs_put_lseg(data->lseg);
  45. nfs_put_client(data->ds_clp);
  46. nfs_commitdata_release(data);
  47. }
  48. EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
  49. /* The generic layer is about to remove the req from the commit list.
  50. * If this will make the bucket empty, it will need to put the lseg reference.
  51. * Note this must be called holding nfsi->commit_mutex
  52. */
  53. void
  54. pnfs_generic_clear_request_commit(struct nfs_page *req,
  55. struct nfs_commit_info *cinfo)
  56. {
  57. struct pnfs_layout_segment *freeme = NULL;
  58. if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
  59. goto out;
  60. cinfo->ds->nwritten--;
  61. if (list_is_singular(&req->wb_list)) {
  62. struct pnfs_commit_bucket *bucket;
  63. bucket = list_first_entry(&req->wb_list,
  64. struct pnfs_commit_bucket,
  65. written);
  66. freeme = bucket->wlseg;
  67. bucket->wlseg = NULL;
  68. }
  69. out:
  70. nfs_request_remove_commit_list(req, cinfo);
  71. pnfs_put_lseg(freeme);
  72. }
  73. EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
  74. static int
  75. pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
  76. struct nfs_commit_info *cinfo,
  77. int max)
  78. {
  79. struct list_head *src = &bucket->written;
  80. struct list_head *dst = &bucket->committing;
  81. int ret;
  82. lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
  83. ret = nfs_scan_commit_list(src, dst, cinfo, max);
  84. if (ret) {
  85. cinfo->ds->nwritten -= ret;
  86. cinfo->ds->ncommitting += ret;
  87. if (bucket->clseg == NULL)
  88. bucket->clseg = pnfs_get_lseg(bucket->wlseg);
  89. if (list_empty(src)) {
  90. pnfs_put_lseg(bucket->wlseg);
  91. bucket->wlseg = NULL;
  92. }
  93. }
  94. return ret;
  95. }
  96. /* Move reqs from written to committing lists, returning count
  97. * of number moved.
  98. */
  99. int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
  100. int max)
  101. {
  102. int i, rv = 0, cnt;
  103. lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
  104. for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
  105. cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
  106. cinfo, max);
  107. max -= cnt;
  108. rv += cnt;
  109. }
  110. return rv;
  111. }
  112. EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
  113. /* Pull everything off the committing lists and dump into @dst. */
  114. void pnfs_generic_recover_commit_reqs(struct list_head *dst,
  115. struct nfs_commit_info *cinfo)
  116. {
  117. struct pnfs_commit_bucket *b;
  118. struct pnfs_layout_segment *freeme;
  119. int nwritten;
  120. int i;
  121. lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
  122. restart:
  123. for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
  124. nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
  125. if (!nwritten)
  126. continue;
  127. cinfo->ds->nwritten -= nwritten;
  128. if (list_empty(&b->written)) {
  129. freeme = b->wlseg;
  130. b->wlseg = NULL;
  131. pnfs_put_lseg(freeme);
  132. goto restart;
  133. }
  134. }
  135. }
  136. EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
  137. static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
  138. {
  139. struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
  140. struct pnfs_commit_bucket *bucket;
  141. struct pnfs_layout_segment *freeme;
  142. struct list_head *pos;
  143. LIST_HEAD(pages);
  144. int i;
  145. mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
  146. for (i = idx; i < fl_cinfo->nbuckets; i++) {
  147. bucket = &fl_cinfo->buckets[i];
  148. if (list_empty(&bucket->committing))
  149. continue;
  150. freeme = bucket->clseg;
  151. bucket->clseg = NULL;
  152. list_for_each(pos, &bucket->committing)
  153. cinfo->ds->ncommitting--;
  154. list_splice_init(&bucket->committing, &pages);
  155. mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
  156. nfs_retry_commit(&pages, freeme, cinfo, i);
  157. pnfs_put_lseg(freeme);
  158. mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
  159. }
  160. mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
  161. }
  162. static unsigned int
  163. pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
  164. struct list_head *list)
  165. {
  166. struct pnfs_ds_commit_info *fl_cinfo;
  167. struct pnfs_commit_bucket *bucket;
  168. struct nfs_commit_data *data;
  169. int i;
  170. unsigned int nreq = 0;
  171. fl_cinfo = cinfo->ds;
  172. bucket = fl_cinfo->buckets;
  173. for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
  174. if (list_empty(&bucket->committing))
  175. continue;
  176. data = nfs_commitdata_alloc(false);
  177. if (!data)
  178. break;
  179. data->ds_commit_index = i;
  180. list_add(&data->pages, list);
  181. nreq++;
  182. }
  183. /* Clean up on error */
  184. pnfs_generic_retry_commit(cinfo, i);
  185. return nreq;
  186. }
  187. static inline
  188. void pnfs_fetch_commit_bucket_list(struct list_head *pages,
  189. struct nfs_commit_data *data,
  190. struct nfs_commit_info *cinfo)
  191. {
  192. struct pnfs_commit_bucket *bucket;
  193. struct list_head *pos;
  194. bucket = &cinfo->ds->buckets[data->ds_commit_index];
  195. mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
  196. list_for_each(pos, &bucket->committing)
  197. cinfo->ds->ncommitting--;
  198. list_splice_init(&bucket->committing, pages);
  199. data->lseg = bucket->clseg;
  200. bucket->clseg = NULL;
  201. mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
  202. }
  203. /* Helper function for pnfs_generic_commit_pagelist to catch an empty
  204. * page list. This can happen when two commits race.
  205. *
  206. * This must be called instead of nfs_init_commit - call one or the other, but
  207. * not both!
  208. */
  209. static bool
  210. pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
  211. struct nfs_commit_data *data,
  212. struct nfs_commit_info *cinfo)
  213. {
  214. if (list_empty(pages)) {
  215. if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
  216. wake_up_var(&cinfo->mds->rpcs_out);
  217. /* don't call nfs_commitdata_release - it tries to put
  218. * the open_context which is not acquired until nfs_init_commit
  219. * which has not been called on @data */
  220. WARN_ON_ONCE(data->context);
  221. nfs_commit_free(data);
  222. return true;
  223. }
  224. return false;
  225. }
  226. /* This follows nfs_commit_list pretty closely */
  227. int
  228. pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
  229. int how, struct nfs_commit_info *cinfo,
  230. int (*initiate_commit)(struct nfs_commit_data *data,
  231. int how))
  232. {
  233. struct nfs_commit_data *data, *tmp;
  234. LIST_HEAD(list);
  235. unsigned int nreq = 0;
  236. if (!list_empty(mds_pages)) {
  237. data = nfs_commitdata_alloc(true);
  238. data->ds_commit_index = -1;
  239. list_add(&data->pages, &list);
  240. nreq++;
  241. }
  242. nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
  243. if (nreq == 0)
  244. goto out;
  245. atomic_add(nreq, &cinfo->mds->rpcs_out);
  246. list_for_each_entry_safe(data, tmp, &list, pages) {
  247. list_del_init(&data->pages);
  248. if (data->ds_commit_index < 0) {
  249. /* another commit raced with us */
  250. if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
  251. data, cinfo))
  252. continue;
  253. nfs_init_commit(data, mds_pages, NULL, cinfo);
  254. nfs_initiate_commit(NFS_CLIENT(inode), data,
  255. NFS_PROTO(data->inode),
  256. data->mds_ops, how, 0);
  257. } else {
  258. LIST_HEAD(pages);
  259. pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
  260. /* another commit raced with us */
  261. if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
  262. data, cinfo))
  263. continue;
  264. nfs_init_commit(data, &pages, data->lseg, cinfo);
  265. initiate_commit(data, how);
  266. }
  267. }
  268. out:
  269. return PNFS_ATTEMPTED;
  270. }
  271. EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
  272. /*
  273. * Data server cache
  274. *
  275. * Data servers can be mapped to different device ids.
  276. * nfs4_pnfs_ds reference counting
  277. * - set to 1 on allocation
  278. * - incremented when a device id maps a data server already in the cache.
  279. * - decremented when deviceid is removed from the cache.
  280. */
  281. static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
  282. static LIST_HEAD(nfs4_data_server_cache);
  283. /* Debug routines */
  284. static void
  285. print_ds(struct nfs4_pnfs_ds *ds)
  286. {
  287. if (ds == NULL) {
  288. printk(KERN_WARNING "%s NULL device\n", __func__);
  289. return;
  290. }
  291. printk(KERN_WARNING " ds %s\n"
  292. " ref count %d\n"
  293. " client %p\n"
  294. " cl_exchange_flags %x\n",
  295. ds->ds_remotestr,
  296. refcount_read(&ds->ds_count), ds->ds_clp,
  297. ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
  298. }
  299. static bool
  300. same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
  301. {
  302. struct sockaddr_in *a, *b;
  303. struct sockaddr_in6 *a6, *b6;
  304. if (addr1->sa_family != addr2->sa_family)
  305. return false;
  306. switch (addr1->sa_family) {
  307. case AF_INET:
  308. a = (struct sockaddr_in *)addr1;
  309. b = (struct sockaddr_in *)addr2;
  310. if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
  311. a->sin_port == b->sin_port)
  312. return true;
  313. break;
  314. case AF_INET6:
  315. a6 = (struct sockaddr_in6 *)addr1;
  316. b6 = (struct sockaddr_in6 *)addr2;
  317. /* LINKLOCAL addresses must have matching scope_id */
  318. if (ipv6_addr_src_scope(&a6->sin6_addr) ==
  319. IPV6_ADDR_SCOPE_LINKLOCAL &&
  320. a6->sin6_scope_id != b6->sin6_scope_id)
  321. return false;
  322. if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
  323. a6->sin6_port == b6->sin6_port)
  324. return true;
  325. break;
  326. default:
  327. dprintk("%s: unhandled address family: %u\n",
  328. __func__, addr1->sa_family);
  329. return false;
  330. }
  331. return false;
  332. }
  333. /*
  334. * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
  335. * declare a match.
  336. */
  337. static bool
  338. _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
  339. const struct list_head *dsaddrs2)
  340. {
  341. struct nfs4_pnfs_ds_addr *da1, *da2;
  342. struct sockaddr *sa1, *sa2;
  343. bool match = false;
  344. list_for_each_entry(da1, dsaddrs1, da_node) {
  345. sa1 = (struct sockaddr *)&da1->da_addr;
  346. match = false;
  347. list_for_each_entry(da2, dsaddrs2, da_node) {
  348. sa2 = (struct sockaddr *)&da2->da_addr;
  349. match = same_sockaddr(sa1, sa2);
  350. if (match)
  351. break;
  352. }
  353. if (!match)
  354. break;
  355. }
  356. return match;
  357. }
  358. /*
  359. * Lookup DS by addresses. nfs4_ds_cache_lock is held
  360. */
  361. static struct nfs4_pnfs_ds *
  362. _data_server_lookup_locked(const struct list_head *dsaddrs)
  363. {
  364. struct nfs4_pnfs_ds *ds;
  365. list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
  366. if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
  367. return ds;
  368. return NULL;
  369. }
  370. static void destroy_ds(struct nfs4_pnfs_ds *ds)
  371. {
  372. struct nfs4_pnfs_ds_addr *da;
  373. dprintk("--> %s\n", __func__);
  374. ifdebug(FACILITY)
  375. print_ds(ds);
  376. nfs_put_client(ds->ds_clp);
  377. while (!list_empty(&ds->ds_addrs)) {
  378. da = list_first_entry(&ds->ds_addrs,
  379. struct nfs4_pnfs_ds_addr,
  380. da_node);
  381. list_del_init(&da->da_node);
  382. kfree(da->da_remotestr);
  383. kfree(da);
  384. }
  385. kfree(ds->ds_remotestr);
  386. kfree(ds);
  387. }
  388. void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
  389. {
  390. if (refcount_dec_and_lock(&ds->ds_count,
  391. &nfs4_ds_cache_lock)) {
  392. list_del_init(&ds->ds_node);
  393. spin_unlock(&nfs4_ds_cache_lock);
  394. destroy_ds(ds);
  395. }
  396. }
  397. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
  398. /*
  399. * Create a string with a human readable address and port to avoid
  400. * complicated setup around many dprinks.
  401. */
  402. static char *
  403. nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
  404. {
  405. struct nfs4_pnfs_ds_addr *da;
  406. char *remotestr;
  407. size_t len;
  408. char *p;
  409. len = 3; /* '{', '}' and eol */
  410. list_for_each_entry(da, dsaddrs, da_node) {
  411. len += strlen(da->da_remotestr) + 1; /* string plus comma */
  412. }
  413. remotestr = kzalloc(len, gfp_flags);
  414. if (!remotestr)
  415. return NULL;
  416. p = remotestr;
  417. *(p++) = '{';
  418. len--;
  419. list_for_each_entry(da, dsaddrs, da_node) {
  420. size_t ll = strlen(da->da_remotestr);
  421. if (ll > len)
  422. goto out_err;
  423. memcpy(p, da->da_remotestr, ll);
  424. p += ll;
  425. len -= ll;
  426. if (len < 1)
  427. goto out_err;
  428. (*p++) = ',';
  429. len--;
  430. }
  431. if (len < 2)
  432. goto out_err;
  433. *(p++) = '}';
  434. *p = '\0';
  435. return remotestr;
  436. out_err:
  437. kfree(remotestr);
  438. return NULL;
  439. }
  440. /*
  441. * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
  442. * uncached and return cached struct nfs4_pnfs_ds.
  443. */
  444. struct nfs4_pnfs_ds *
  445. nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
  446. {
  447. struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
  448. char *remotestr;
  449. if (list_empty(dsaddrs)) {
  450. dprintk("%s: no addresses defined\n", __func__);
  451. goto out;
  452. }
  453. ds = kzalloc(sizeof(*ds), gfp_flags);
  454. if (!ds)
  455. goto out;
  456. /* this is only used for debugging, so it's ok if its NULL */
  457. remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
  458. spin_lock(&nfs4_ds_cache_lock);
  459. tmp_ds = _data_server_lookup_locked(dsaddrs);
  460. if (tmp_ds == NULL) {
  461. INIT_LIST_HEAD(&ds->ds_addrs);
  462. list_splice_init(dsaddrs, &ds->ds_addrs);
  463. ds->ds_remotestr = remotestr;
  464. refcount_set(&ds->ds_count, 1);
  465. INIT_LIST_HEAD(&ds->ds_node);
  466. ds->ds_clp = NULL;
  467. list_add(&ds->ds_node, &nfs4_data_server_cache);
  468. dprintk("%s add new data server %s\n", __func__,
  469. ds->ds_remotestr);
  470. } else {
  471. kfree(remotestr);
  472. kfree(ds);
  473. refcount_inc(&tmp_ds->ds_count);
  474. dprintk("%s data server %s found, inc'ed ds_count to %d\n",
  475. __func__, tmp_ds->ds_remotestr,
  476. refcount_read(&tmp_ds->ds_count));
  477. ds = tmp_ds;
  478. }
  479. spin_unlock(&nfs4_ds_cache_lock);
  480. out:
  481. return ds;
  482. }
  483. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
  484. static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
  485. {
  486. might_sleep();
  487. wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
  488. TASK_KILLABLE);
  489. }
  490. static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
  491. {
  492. smp_mb__before_atomic();
  493. clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
  494. smp_mb__after_atomic();
  495. wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
  496. }
  497. static struct nfs_client *(*get_v3_ds_connect)(
  498. struct nfs_server *mds_srv,
  499. const struct sockaddr *ds_addr,
  500. int ds_addrlen,
  501. int ds_proto,
  502. unsigned int ds_timeo,
  503. unsigned int ds_retrans);
  504. static bool load_v3_ds_connect(void)
  505. {
  506. if (!get_v3_ds_connect) {
  507. get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
  508. WARN_ON_ONCE(!get_v3_ds_connect);
  509. }
  510. return(get_v3_ds_connect != NULL);
  511. }
  512. void nfs4_pnfs_v3_ds_connect_unload(void)
  513. {
  514. if (get_v3_ds_connect) {
  515. symbol_put(nfs3_set_ds_client);
  516. get_v3_ds_connect = NULL;
  517. }
  518. }
  519. static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
  520. struct nfs4_pnfs_ds *ds,
  521. unsigned int timeo,
  522. unsigned int retrans)
  523. {
  524. struct nfs_client *clp = ERR_PTR(-EIO);
  525. struct nfs4_pnfs_ds_addr *da;
  526. int status = 0;
  527. dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
  528. if (!load_v3_ds_connect())
  529. goto out;
  530. list_for_each_entry(da, &ds->ds_addrs, da_node) {
  531. dprintk("%s: DS %s: trying address %s\n",
  532. __func__, ds->ds_remotestr, da->da_remotestr);
  533. if (!IS_ERR(clp)) {
  534. struct xprt_create xprt_args = {
  535. .ident = XPRT_TRANSPORT_TCP,
  536. .net = clp->cl_net,
  537. .dstaddr = (struct sockaddr *)&da->da_addr,
  538. .addrlen = da->da_addrlen,
  539. .servername = clp->cl_hostname,
  540. };
  541. /* Add this address as an alias */
  542. rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
  543. rpc_clnt_test_and_add_xprt, NULL);
  544. } else
  545. clp = get_v3_ds_connect(mds_srv,
  546. (struct sockaddr *)&da->da_addr,
  547. da->da_addrlen, IPPROTO_TCP,
  548. timeo, retrans);
  549. }
  550. if (IS_ERR(clp)) {
  551. status = PTR_ERR(clp);
  552. goto out;
  553. }
  554. smp_wmb();
  555. ds->ds_clp = clp;
  556. dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
  557. out:
  558. return status;
  559. }
  560. static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
  561. struct nfs4_pnfs_ds *ds,
  562. unsigned int timeo,
  563. unsigned int retrans,
  564. u32 minor_version)
  565. {
  566. struct nfs_client *clp = ERR_PTR(-EIO);
  567. struct nfs4_pnfs_ds_addr *da;
  568. int status = 0;
  569. dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
  570. list_for_each_entry(da, &ds->ds_addrs, da_node) {
  571. dprintk("%s: DS %s: trying address %s\n",
  572. __func__, ds->ds_remotestr, da->da_remotestr);
  573. if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
  574. struct xprt_create xprt_args = {
  575. .ident = XPRT_TRANSPORT_TCP,
  576. .net = clp->cl_net,
  577. .dstaddr = (struct sockaddr *)&da->da_addr,
  578. .addrlen = da->da_addrlen,
  579. .servername = clp->cl_hostname,
  580. };
  581. struct nfs4_add_xprt_data xprtdata = {
  582. .clp = clp,
  583. .cred = nfs4_get_clid_cred(clp),
  584. };
  585. struct rpc_add_xprt_test rpcdata = {
  586. .add_xprt_test = clp->cl_mvops->session_trunk,
  587. .data = &xprtdata,
  588. };
  589. /**
  590. * Test this address for session trunking and
  591. * add as an alias
  592. */
  593. rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
  594. rpc_clnt_setup_test_and_add_xprt,
  595. &rpcdata);
  596. if (xprtdata.cred)
  597. put_rpccred(xprtdata.cred);
  598. } else {
  599. clp = nfs4_set_ds_client(mds_srv,
  600. (struct sockaddr *)&da->da_addr,
  601. da->da_addrlen, IPPROTO_TCP,
  602. timeo, retrans, minor_version);
  603. if (IS_ERR(clp))
  604. continue;
  605. status = nfs4_init_ds_session(clp,
  606. mds_srv->nfs_client->cl_lease_time);
  607. if (status) {
  608. nfs_put_client(clp);
  609. clp = ERR_PTR(-EIO);
  610. continue;
  611. }
  612. }
  613. }
  614. if (IS_ERR(clp)) {
  615. status = PTR_ERR(clp);
  616. goto out;
  617. }
  618. smp_wmb();
  619. ds->ds_clp = clp;
  620. dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
  621. out:
  622. return status;
  623. }
  624. /*
  625. * Create an rpc connection to the nfs4_pnfs_ds data server.
  626. * Currently only supports IPv4 and IPv6 addresses.
  627. * If connection fails, make devid unavailable and return a -errno.
  628. */
  629. int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
  630. struct nfs4_deviceid_node *devid, unsigned int timeo,
  631. unsigned int retrans, u32 version, u32 minor_version)
  632. {
  633. int err;
  634. again:
  635. err = 0;
  636. if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
  637. if (version == 3) {
  638. err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
  639. retrans);
  640. } else if (version == 4) {
  641. err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
  642. retrans, minor_version);
  643. } else {
  644. dprintk("%s: unsupported DS version %d\n", __func__,
  645. version);
  646. err = -EPROTONOSUPPORT;
  647. }
  648. nfs4_clear_ds_conn_bit(ds);
  649. } else {
  650. nfs4_wait_ds_connect(ds);
  651. /* what was waited on didn't connect AND didn't mark unavail */
  652. if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid))
  653. goto again;
  654. }
  655. /*
  656. * At this point the ds->ds_clp should be ready, but it might have
  657. * hit an error.
  658. */
  659. if (!err) {
  660. if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
  661. WARN_ON_ONCE(ds->ds_clp ||
  662. !nfs4_test_deviceid_unavailable(devid));
  663. return -EINVAL;
  664. }
  665. err = nfs_client_init_status(ds->ds_clp);
  666. }
  667. return err;
  668. }
  669. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
  670. /*
  671. * Currently only supports ipv4, ipv6 and one multi-path address.
  672. */
  673. struct nfs4_pnfs_ds_addr *
  674. nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
  675. {
  676. struct nfs4_pnfs_ds_addr *da = NULL;
  677. char *buf, *portstr;
  678. __be16 port;
  679. int nlen, rlen;
  680. int tmp[2];
  681. __be32 *p;
  682. char *netid, *match_netid;
  683. size_t len, match_netid_len;
  684. char *startsep = "";
  685. char *endsep = "";
  686. /* r_netid */
  687. p = xdr_inline_decode(xdr, 4);
  688. if (unlikely(!p))
  689. goto out_err;
  690. nlen = be32_to_cpup(p++);
  691. p = xdr_inline_decode(xdr, nlen);
  692. if (unlikely(!p))
  693. goto out_err;
  694. netid = kmalloc(nlen+1, gfp_flags);
  695. if (unlikely(!netid))
  696. goto out_err;
  697. netid[nlen] = '\0';
  698. memcpy(netid, p, nlen);
  699. /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
  700. p = xdr_inline_decode(xdr, 4);
  701. if (unlikely(!p))
  702. goto out_free_netid;
  703. rlen = be32_to_cpup(p);
  704. p = xdr_inline_decode(xdr, rlen);
  705. if (unlikely(!p))
  706. goto out_free_netid;
  707. /* port is ".ABC.DEF", 8 chars max */
  708. if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
  709. dprintk("%s: Invalid address, length %d\n", __func__,
  710. rlen);
  711. goto out_free_netid;
  712. }
  713. buf = kmalloc(rlen + 1, gfp_flags);
  714. if (!buf) {
  715. dprintk("%s: Not enough memory\n", __func__);
  716. goto out_free_netid;
  717. }
  718. buf[rlen] = '\0';
  719. memcpy(buf, p, rlen);
  720. /* replace port '.' with '-' */
  721. portstr = strrchr(buf, '.');
  722. if (!portstr) {
  723. dprintk("%s: Failed finding expected dot in port\n",
  724. __func__);
  725. goto out_free_buf;
  726. }
  727. *portstr = '-';
  728. /* find '.' between address and port */
  729. portstr = strrchr(buf, '.');
  730. if (!portstr) {
  731. dprintk("%s: Failed finding expected dot between address and "
  732. "port\n", __func__);
  733. goto out_free_buf;
  734. }
  735. *portstr = '\0';
  736. da = kzalloc(sizeof(*da), gfp_flags);
  737. if (unlikely(!da))
  738. goto out_free_buf;
  739. INIT_LIST_HEAD(&da->da_node);
  740. if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
  741. sizeof(da->da_addr))) {
  742. dprintk("%s: error parsing address %s\n", __func__, buf);
  743. goto out_free_da;
  744. }
  745. portstr++;
  746. sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
  747. port = htons((tmp[0] << 8) | (tmp[1]));
  748. switch (da->da_addr.ss_family) {
  749. case AF_INET:
  750. ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
  751. da->da_addrlen = sizeof(struct sockaddr_in);
  752. match_netid = "tcp";
  753. match_netid_len = 3;
  754. break;
  755. case AF_INET6:
  756. ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
  757. da->da_addrlen = sizeof(struct sockaddr_in6);
  758. match_netid = "tcp6";
  759. match_netid_len = 4;
  760. startsep = "[";
  761. endsep = "]";
  762. break;
  763. default:
  764. dprintk("%s: unsupported address family: %u\n",
  765. __func__, da->da_addr.ss_family);
  766. goto out_free_da;
  767. }
  768. if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
  769. dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
  770. __func__, netid, match_netid);
  771. goto out_free_da;
  772. }
  773. /* save human readable address */
  774. len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
  775. da->da_remotestr = kzalloc(len, gfp_flags);
  776. /* NULL is ok, only used for dprintk */
  777. if (da->da_remotestr)
  778. snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
  779. buf, endsep, ntohs(port));
  780. dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
  781. kfree(buf);
  782. kfree(netid);
  783. return da;
  784. out_free_da:
  785. kfree(da);
  786. out_free_buf:
  787. dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
  788. kfree(buf);
  789. out_free_netid:
  790. kfree(netid);
  791. out_err:
  792. return NULL;
  793. }
  794. EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
  795. void
  796. pnfs_layout_mark_request_commit(struct nfs_page *req,
  797. struct pnfs_layout_segment *lseg,
  798. struct nfs_commit_info *cinfo,
  799. u32 ds_commit_idx)
  800. {
  801. struct list_head *list;
  802. struct pnfs_commit_bucket *buckets;
  803. mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
  804. buckets = cinfo->ds->buckets;
  805. list = &buckets[ds_commit_idx].written;
  806. if (list_empty(list)) {
  807. if (!pnfs_is_valid_lseg(lseg)) {
  808. mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
  809. cinfo->completion_ops->resched_write(cinfo, req);
  810. return;
  811. }
  812. /* Non-empty buckets hold a reference on the lseg. That ref
  813. * is normally transferred to the COMMIT call and released
  814. * there. It could also be released if the last req is pulled
  815. * off due to a rewrite, in which case it will be done in
  816. * pnfs_common_clear_request_commit
  817. */
  818. WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
  819. buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
  820. }
  821. set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
  822. cinfo->ds->nwritten++;
  823. nfs_request_add_commit_list_locked(req, list, cinfo);
  824. mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
  825. nfs_mark_page_unstable(req->wb_page, cinfo);
  826. }
  827. EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
  828. int
  829. pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
  830. {
  831. int ret;
  832. if (!pnfs_layoutcommit_outstanding(inode))
  833. return 0;
  834. ret = nfs_commit_inode(inode, FLUSH_SYNC);
  835. if (ret < 0)
  836. return ret;
  837. if (datasync)
  838. return 0;
  839. return pnfs_layoutcommit_inode(inode, true);
  840. }
  841. EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);