fcloop.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. static DEFINE_SPINLOCK(fcloop_lock);
  173. static LIST_HEAD(fcloop_lports);
  174. static LIST_HEAD(fcloop_nports);
  175. struct fcloop_lport {
  176. struct nvme_fc_local_port *localport;
  177. struct list_head lport_list;
  178. struct completion unreg_done;
  179. };
  180. struct fcloop_lport_priv {
  181. struct fcloop_lport *lport;
  182. };
  183. struct fcloop_rport {
  184. struct nvme_fc_remote_port *remoteport;
  185. struct nvmet_fc_target_port *targetport;
  186. struct fcloop_nport *nport;
  187. struct fcloop_lport *lport;
  188. };
  189. struct fcloop_tport {
  190. struct nvmet_fc_target_port *targetport;
  191. struct nvme_fc_remote_port *remoteport;
  192. struct fcloop_nport *nport;
  193. struct fcloop_lport *lport;
  194. };
  195. struct fcloop_nport {
  196. struct fcloop_rport *rport;
  197. struct fcloop_tport *tport;
  198. struct fcloop_lport *lport;
  199. struct list_head nport_list;
  200. struct kref ref;
  201. u64 node_name;
  202. u64 port_name;
  203. u32 port_role;
  204. u32 port_id;
  205. };
  206. struct fcloop_lsreq {
  207. struct fcloop_tport *tport;
  208. struct nvmefc_ls_req *lsreq;
  209. struct work_struct work;
  210. struct nvmefc_tgt_ls_req tgt_ls_req;
  211. int status;
  212. };
  213. enum {
  214. INI_IO_START = 0,
  215. INI_IO_ACTIVE = 1,
  216. INI_IO_ABORTED = 2,
  217. INI_IO_COMPLETED = 3,
  218. };
  219. struct fcloop_fcpreq {
  220. struct fcloop_tport *tport;
  221. struct nvmefc_fcp_req *fcpreq;
  222. spinlock_t reqlock;
  223. u16 status;
  224. u32 inistate;
  225. bool active;
  226. bool aborted;
  227. struct kref ref;
  228. struct work_struct fcp_rcv_work;
  229. struct work_struct abort_rcv_work;
  230. struct work_struct tio_done_work;
  231. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  232. };
  233. struct fcloop_ini_fcpreq {
  234. struct nvmefc_fcp_req *fcpreq;
  235. struct fcloop_fcpreq *tfcp_req;
  236. spinlock_t inilock;
  237. };
  238. static inline struct fcloop_lsreq *
  239. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  240. {
  241. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  242. }
  243. static inline struct fcloop_fcpreq *
  244. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  245. {
  246. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  247. }
  248. static int
  249. fcloop_create_queue(struct nvme_fc_local_port *localport,
  250. unsigned int qidx, u16 qsize,
  251. void **handle)
  252. {
  253. *handle = localport;
  254. return 0;
  255. }
  256. static void
  257. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  258. unsigned int idx, void *handle)
  259. {
  260. }
  261. /*
  262. * Transmit of LS RSP done (e.g. buffers all set). call back up
  263. * initiator "done" flows.
  264. */
  265. static void
  266. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  267. {
  268. struct fcloop_lsreq *tls_req =
  269. container_of(work, struct fcloop_lsreq, work);
  270. struct fcloop_tport *tport = tls_req->tport;
  271. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  272. if (!tport || tport->remoteport)
  273. lsreq->done(lsreq, tls_req->status);
  274. }
  275. static int
  276. fcloop_ls_req(struct nvme_fc_local_port *localport,
  277. struct nvme_fc_remote_port *remoteport,
  278. struct nvmefc_ls_req *lsreq)
  279. {
  280. struct fcloop_lsreq *tls_req = lsreq->private;
  281. struct fcloop_rport *rport = remoteport->private;
  282. int ret = 0;
  283. tls_req->lsreq = lsreq;
  284. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  285. if (!rport->targetport) {
  286. tls_req->status = -ECONNREFUSED;
  287. tls_req->tport = NULL;
  288. schedule_work(&tls_req->work);
  289. return ret;
  290. }
  291. tls_req->status = 0;
  292. tls_req->tport = rport->targetport->private;
  293. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  294. lsreq->rqstaddr, lsreq->rqstlen);
  295. return ret;
  296. }
  297. static int
  298. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  299. struct nvmefc_tgt_ls_req *tgt_lsreq)
  300. {
  301. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  302. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  303. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  304. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  305. lsreq->rsplen : tgt_lsreq->rsplen));
  306. tgt_lsreq->done(tgt_lsreq);
  307. schedule_work(&tls_req->work);
  308. return 0;
  309. }
  310. static void
  311. fcloop_tfcp_req_free(struct kref *ref)
  312. {
  313. struct fcloop_fcpreq *tfcp_req =
  314. container_of(ref, struct fcloop_fcpreq, ref);
  315. kfree(tfcp_req);
  316. }
  317. static void
  318. fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
  319. {
  320. kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
  321. }
  322. static int
  323. fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
  324. {
  325. return kref_get_unless_zero(&tfcp_req->ref);
  326. }
  327. static void
  328. fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
  329. struct fcloop_fcpreq *tfcp_req, int status)
  330. {
  331. struct fcloop_ini_fcpreq *inireq = NULL;
  332. if (fcpreq) {
  333. inireq = fcpreq->private;
  334. spin_lock(&inireq->inilock);
  335. inireq->tfcp_req = NULL;
  336. spin_unlock(&inireq->inilock);
  337. fcpreq->status = status;
  338. fcpreq->done(fcpreq);
  339. }
  340. /* release original io reference on tgt struct */
  341. fcloop_tfcp_req_put(tfcp_req);
  342. }
  343. static void
  344. fcloop_fcp_recv_work(struct work_struct *work)
  345. {
  346. struct fcloop_fcpreq *tfcp_req =
  347. container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
  348. struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
  349. int ret = 0;
  350. bool aborted = false;
  351. spin_lock(&tfcp_req->reqlock);
  352. switch (tfcp_req->inistate) {
  353. case INI_IO_START:
  354. tfcp_req->inistate = INI_IO_ACTIVE;
  355. break;
  356. case INI_IO_ABORTED:
  357. aborted = true;
  358. break;
  359. default:
  360. spin_unlock(&tfcp_req->reqlock);
  361. WARN_ON(1);
  362. return;
  363. }
  364. spin_unlock(&tfcp_req->reqlock);
  365. if (unlikely(aborted))
  366. ret = -ECANCELED;
  367. else
  368. ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
  369. &tfcp_req->tgt_fcp_req,
  370. fcpreq->cmdaddr, fcpreq->cmdlen);
  371. if (ret)
  372. fcloop_call_host_done(fcpreq, tfcp_req, ret);
  373. return;
  374. }
  375. static void
  376. fcloop_fcp_abort_recv_work(struct work_struct *work)
  377. {
  378. struct fcloop_fcpreq *tfcp_req =
  379. container_of(work, struct fcloop_fcpreq, abort_rcv_work);
  380. struct nvmefc_fcp_req *fcpreq;
  381. bool completed = false;
  382. spin_lock(&tfcp_req->reqlock);
  383. fcpreq = tfcp_req->fcpreq;
  384. switch (tfcp_req->inistate) {
  385. case INI_IO_ABORTED:
  386. break;
  387. case INI_IO_COMPLETED:
  388. completed = true;
  389. break;
  390. default:
  391. spin_unlock(&tfcp_req->reqlock);
  392. WARN_ON(1);
  393. return;
  394. }
  395. spin_unlock(&tfcp_req->reqlock);
  396. if (unlikely(completed)) {
  397. /* remove reference taken in original abort downcall */
  398. fcloop_tfcp_req_put(tfcp_req);
  399. return;
  400. }
  401. if (tfcp_req->tport->targetport)
  402. nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
  403. &tfcp_req->tgt_fcp_req);
  404. spin_lock(&tfcp_req->reqlock);
  405. tfcp_req->fcpreq = NULL;
  406. spin_unlock(&tfcp_req->reqlock);
  407. fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
  408. /* call_host_done releases reference for abort downcall */
  409. }
  410. /*
  411. * FCP IO operation done by target completion.
  412. * call back up initiator "done" flows.
  413. */
  414. static void
  415. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  416. {
  417. struct fcloop_fcpreq *tfcp_req =
  418. container_of(work, struct fcloop_fcpreq, tio_done_work);
  419. struct nvmefc_fcp_req *fcpreq;
  420. spin_lock(&tfcp_req->reqlock);
  421. fcpreq = tfcp_req->fcpreq;
  422. tfcp_req->inistate = INI_IO_COMPLETED;
  423. spin_unlock(&tfcp_req->reqlock);
  424. fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
  425. }
  426. static int
  427. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  428. struct nvme_fc_remote_port *remoteport,
  429. void *hw_queue_handle,
  430. struct nvmefc_fcp_req *fcpreq)
  431. {
  432. struct fcloop_rport *rport = remoteport->private;
  433. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  434. struct fcloop_fcpreq *tfcp_req;
  435. if (!rport->targetport)
  436. return -ECONNREFUSED;
  437. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  438. if (!tfcp_req)
  439. return -ENOMEM;
  440. inireq->fcpreq = fcpreq;
  441. inireq->tfcp_req = tfcp_req;
  442. spin_lock_init(&inireq->inilock);
  443. tfcp_req->fcpreq = fcpreq;
  444. tfcp_req->tport = rport->targetport->private;
  445. tfcp_req->inistate = INI_IO_START;
  446. spin_lock_init(&tfcp_req->reqlock);
  447. INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
  448. INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
  449. INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
  450. kref_init(&tfcp_req->ref);
  451. schedule_work(&tfcp_req->fcp_rcv_work);
  452. return 0;
  453. }
  454. static void
  455. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  456. struct scatterlist *io_sg, u32 offset, u32 length)
  457. {
  458. void *data_p, *io_p;
  459. u32 data_len, io_len, tlen;
  460. io_p = sg_virt(io_sg);
  461. io_len = io_sg->length;
  462. for ( ; offset; ) {
  463. tlen = min_t(u32, offset, io_len);
  464. offset -= tlen;
  465. io_len -= tlen;
  466. if (!io_len) {
  467. io_sg = sg_next(io_sg);
  468. io_p = sg_virt(io_sg);
  469. io_len = io_sg->length;
  470. } else
  471. io_p += tlen;
  472. }
  473. data_p = sg_virt(data_sg);
  474. data_len = data_sg->length;
  475. for ( ; length; ) {
  476. tlen = min_t(u32, io_len, data_len);
  477. tlen = min_t(u32, tlen, length);
  478. if (op == NVMET_FCOP_WRITEDATA)
  479. memcpy(data_p, io_p, tlen);
  480. else
  481. memcpy(io_p, data_p, tlen);
  482. length -= tlen;
  483. io_len -= tlen;
  484. if ((!io_len) && (length)) {
  485. io_sg = sg_next(io_sg);
  486. io_p = sg_virt(io_sg);
  487. io_len = io_sg->length;
  488. } else
  489. io_p += tlen;
  490. data_len -= tlen;
  491. if ((!data_len) && (length)) {
  492. data_sg = sg_next(data_sg);
  493. data_p = sg_virt(data_sg);
  494. data_len = data_sg->length;
  495. } else
  496. data_p += tlen;
  497. }
  498. }
  499. static int
  500. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  501. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  502. {
  503. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  504. struct nvmefc_fcp_req *fcpreq;
  505. u32 rsplen = 0, xfrlen = 0;
  506. int fcp_err = 0, active, aborted;
  507. u8 op = tgt_fcpreq->op;
  508. spin_lock(&tfcp_req->reqlock);
  509. fcpreq = tfcp_req->fcpreq;
  510. active = tfcp_req->active;
  511. aborted = tfcp_req->aborted;
  512. tfcp_req->active = true;
  513. spin_unlock(&tfcp_req->reqlock);
  514. if (unlikely(active))
  515. /* illegal - call while i/o active */
  516. return -EALREADY;
  517. if (unlikely(aborted)) {
  518. /* target transport has aborted i/o prior */
  519. spin_lock(&tfcp_req->reqlock);
  520. tfcp_req->active = false;
  521. spin_unlock(&tfcp_req->reqlock);
  522. tgt_fcpreq->transferred_length = 0;
  523. tgt_fcpreq->fcp_error = -ECANCELED;
  524. tgt_fcpreq->done(tgt_fcpreq);
  525. return 0;
  526. }
  527. /*
  528. * if fcpreq is NULL, the I/O has been aborted (from
  529. * initiator side). For the target side, act as if all is well
  530. * but don't actually move data.
  531. */
  532. switch (op) {
  533. case NVMET_FCOP_WRITEDATA:
  534. xfrlen = tgt_fcpreq->transfer_length;
  535. if (fcpreq) {
  536. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  537. fcpreq->first_sgl, tgt_fcpreq->offset,
  538. xfrlen);
  539. fcpreq->transferred_length += xfrlen;
  540. }
  541. break;
  542. case NVMET_FCOP_READDATA:
  543. case NVMET_FCOP_READDATA_RSP:
  544. xfrlen = tgt_fcpreq->transfer_length;
  545. if (fcpreq) {
  546. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  547. fcpreq->first_sgl, tgt_fcpreq->offset,
  548. xfrlen);
  549. fcpreq->transferred_length += xfrlen;
  550. }
  551. if (op == NVMET_FCOP_READDATA)
  552. break;
  553. /* Fall-Thru to RSP handling */
  554. /* FALLTHRU */
  555. case NVMET_FCOP_RSP:
  556. if (fcpreq) {
  557. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  558. fcpreq->rsplen : tgt_fcpreq->rsplen);
  559. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  560. if (rsplen < tgt_fcpreq->rsplen)
  561. fcp_err = -E2BIG;
  562. fcpreq->rcv_rsplen = rsplen;
  563. fcpreq->status = 0;
  564. }
  565. tfcp_req->status = 0;
  566. break;
  567. default:
  568. fcp_err = -EINVAL;
  569. break;
  570. }
  571. spin_lock(&tfcp_req->reqlock);
  572. tfcp_req->active = false;
  573. spin_unlock(&tfcp_req->reqlock);
  574. tgt_fcpreq->transferred_length = xfrlen;
  575. tgt_fcpreq->fcp_error = fcp_err;
  576. tgt_fcpreq->done(tgt_fcpreq);
  577. return 0;
  578. }
  579. static void
  580. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  581. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  582. {
  583. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  584. /*
  585. * mark aborted only in case there were 2 threads in transport
  586. * (one doing io, other doing abort) and only kills ops posted
  587. * after the abort request
  588. */
  589. spin_lock(&tfcp_req->reqlock);
  590. tfcp_req->aborted = true;
  591. spin_unlock(&tfcp_req->reqlock);
  592. tfcp_req->status = NVME_SC_INTERNAL;
  593. /*
  594. * nothing more to do. If io wasn't active, the transport should
  595. * immediately call the req_release. If it was active, the op
  596. * will complete, and the lldd should call req_release.
  597. */
  598. }
  599. static void
  600. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  601. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  602. {
  603. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  604. schedule_work(&tfcp_req->tio_done_work);
  605. }
  606. static void
  607. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  608. struct nvme_fc_remote_port *remoteport,
  609. struct nvmefc_ls_req *lsreq)
  610. {
  611. }
  612. static void
  613. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  614. struct nvme_fc_remote_port *remoteport,
  615. void *hw_queue_handle,
  616. struct nvmefc_fcp_req *fcpreq)
  617. {
  618. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  619. struct fcloop_fcpreq *tfcp_req;
  620. bool abortio = true;
  621. spin_lock(&inireq->inilock);
  622. tfcp_req = inireq->tfcp_req;
  623. if (tfcp_req)
  624. fcloop_tfcp_req_get(tfcp_req);
  625. spin_unlock(&inireq->inilock);
  626. if (!tfcp_req)
  627. /* abort has already been called */
  628. return;
  629. /* break initiator/target relationship for io */
  630. spin_lock(&tfcp_req->reqlock);
  631. switch (tfcp_req->inistate) {
  632. case INI_IO_START:
  633. case INI_IO_ACTIVE:
  634. tfcp_req->inistate = INI_IO_ABORTED;
  635. break;
  636. case INI_IO_COMPLETED:
  637. abortio = false;
  638. break;
  639. default:
  640. spin_unlock(&tfcp_req->reqlock);
  641. WARN_ON(1);
  642. return;
  643. }
  644. spin_unlock(&tfcp_req->reqlock);
  645. if (abortio)
  646. /* leave the reference while the work item is scheduled */
  647. WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
  648. else {
  649. /*
  650. * as the io has already had the done callback made,
  651. * nothing more to do. So release the reference taken above
  652. */
  653. fcloop_tfcp_req_put(tfcp_req);
  654. }
  655. }
  656. static void
  657. fcloop_nport_free(struct kref *ref)
  658. {
  659. struct fcloop_nport *nport =
  660. container_of(ref, struct fcloop_nport, ref);
  661. unsigned long flags;
  662. spin_lock_irqsave(&fcloop_lock, flags);
  663. list_del(&nport->nport_list);
  664. spin_unlock_irqrestore(&fcloop_lock, flags);
  665. kfree(nport);
  666. }
  667. static void
  668. fcloop_nport_put(struct fcloop_nport *nport)
  669. {
  670. kref_put(&nport->ref, fcloop_nport_free);
  671. }
  672. static int
  673. fcloop_nport_get(struct fcloop_nport *nport)
  674. {
  675. return kref_get_unless_zero(&nport->ref);
  676. }
  677. static void
  678. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  679. {
  680. struct fcloop_lport_priv *lport_priv = localport->private;
  681. struct fcloop_lport *lport = lport_priv->lport;
  682. /* release any threads waiting for the unreg to complete */
  683. complete(&lport->unreg_done);
  684. }
  685. static void
  686. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  687. {
  688. struct fcloop_rport *rport = remoteport->private;
  689. fcloop_nport_put(rport->nport);
  690. }
  691. static void
  692. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  693. {
  694. struct fcloop_tport *tport = targetport->private;
  695. fcloop_nport_put(tport->nport);
  696. }
  697. #define FCLOOP_HW_QUEUES 4
  698. #define FCLOOP_SGL_SEGS 256
  699. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  700. static struct nvme_fc_port_template fctemplate = {
  701. .localport_delete = fcloop_localport_delete,
  702. .remoteport_delete = fcloop_remoteport_delete,
  703. .create_queue = fcloop_create_queue,
  704. .delete_queue = fcloop_delete_queue,
  705. .ls_req = fcloop_ls_req,
  706. .fcp_io = fcloop_fcp_req,
  707. .ls_abort = fcloop_ls_abort,
  708. .fcp_abort = fcloop_fcp_abort,
  709. .max_hw_queues = FCLOOP_HW_QUEUES,
  710. .max_sgl_segments = FCLOOP_SGL_SEGS,
  711. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  712. .dma_boundary = FCLOOP_DMABOUND_4G,
  713. /* sizes of additional private data for data structures */
  714. .local_priv_sz = sizeof(struct fcloop_lport_priv),
  715. .remote_priv_sz = sizeof(struct fcloop_rport),
  716. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  717. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  718. };
  719. static struct nvmet_fc_target_template tgttemplate = {
  720. .targetport_delete = fcloop_targetport_delete,
  721. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  722. .fcp_op = fcloop_fcp_op,
  723. .fcp_abort = fcloop_tgt_fcp_abort,
  724. .fcp_req_release = fcloop_fcp_req_release,
  725. .max_hw_queues = FCLOOP_HW_QUEUES,
  726. .max_sgl_segments = FCLOOP_SGL_SEGS,
  727. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  728. .dma_boundary = FCLOOP_DMABOUND_4G,
  729. /* optional features */
  730. .target_features = 0,
  731. /* sizes of additional private data for data structures */
  732. .target_priv_sz = sizeof(struct fcloop_tport),
  733. };
  734. static ssize_t
  735. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  736. const char *buf, size_t count)
  737. {
  738. struct nvme_fc_port_info pinfo;
  739. struct fcloop_ctrl_options *opts;
  740. struct nvme_fc_local_port *localport;
  741. struct fcloop_lport *lport;
  742. struct fcloop_lport_priv *lport_priv;
  743. unsigned long flags;
  744. int ret = -ENOMEM;
  745. lport = kzalloc(sizeof(*lport), GFP_KERNEL);
  746. if (!lport)
  747. return -ENOMEM;
  748. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  749. if (!opts)
  750. goto out_free_lport;
  751. ret = fcloop_parse_options(opts, buf);
  752. if (ret)
  753. goto out_free_opts;
  754. /* everything there ? */
  755. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  756. ret = -EINVAL;
  757. goto out_free_opts;
  758. }
  759. memset(&pinfo, 0, sizeof(pinfo));
  760. pinfo.node_name = opts->wwnn;
  761. pinfo.port_name = opts->wwpn;
  762. pinfo.port_role = opts->roles;
  763. pinfo.port_id = opts->fcaddr;
  764. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  765. if (!ret) {
  766. /* success */
  767. lport_priv = localport->private;
  768. lport_priv->lport = lport;
  769. lport->localport = localport;
  770. INIT_LIST_HEAD(&lport->lport_list);
  771. spin_lock_irqsave(&fcloop_lock, flags);
  772. list_add_tail(&lport->lport_list, &fcloop_lports);
  773. spin_unlock_irqrestore(&fcloop_lock, flags);
  774. }
  775. out_free_opts:
  776. kfree(opts);
  777. out_free_lport:
  778. /* free only if we're going to fail */
  779. if (ret)
  780. kfree(lport);
  781. return ret ? ret : count;
  782. }
  783. static void
  784. __unlink_local_port(struct fcloop_lport *lport)
  785. {
  786. list_del(&lport->lport_list);
  787. }
  788. static int
  789. __wait_localport_unreg(struct fcloop_lport *lport)
  790. {
  791. int ret;
  792. init_completion(&lport->unreg_done);
  793. ret = nvme_fc_unregister_localport(lport->localport);
  794. wait_for_completion(&lport->unreg_done);
  795. kfree(lport);
  796. return ret;
  797. }
  798. static ssize_t
  799. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  800. const char *buf, size_t count)
  801. {
  802. struct fcloop_lport *tlport, *lport = NULL;
  803. u64 nodename, portname;
  804. unsigned long flags;
  805. int ret;
  806. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  807. if (ret)
  808. return ret;
  809. spin_lock_irqsave(&fcloop_lock, flags);
  810. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  811. if (tlport->localport->node_name == nodename &&
  812. tlport->localport->port_name == portname) {
  813. lport = tlport;
  814. __unlink_local_port(lport);
  815. break;
  816. }
  817. }
  818. spin_unlock_irqrestore(&fcloop_lock, flags);
  819. if (!lport)
  820. return -ENOENT;
  821. ret = __wait_localport_unreg(lport);
  822. return ret ? ret : count;
  823. }
  824. static struct fcloop_nport *
  825. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  826. {
  827. struct fcloop_nport *newnport, *nport = NULL;
  828. struct fcloop_lport *tmplport, *lport = NULL;
  829. struct fcloop_ctrl_options *opts;
  830. unsigned long flags;
  831. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  832. int ret;
  833. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  834. if (!opts)
  835. return NULL;
  836. ret = fcloop_parse_options(opts, buf);
  837. if (ret)
  838. goto out_free_opts;
  839. /* everything there ? */
  840. if ((opts->mask & opts_mask) != opts_mask) {
  841. ret = -EINVAL;
  842. goto out_free_opts;
  843. }
  844. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  845. if (!newnport)
  846. goto out_free_opts;
  847. INIT_LIST_HEAD(&newnport->nport_list);
  848. newnport->node_name = opts->wwnn;
  849. newnport->port_name = opts->wwpn;
  850. if (opts->mask & NVMF_OPT_ROLES)
  851. newnport->port_role = opts->roles;
  852. if (opts->mask & NVMF_OPT_FCADDR)
  853. newnport->port_id = opts->fcaddr;
  854. kref_init(&newnport->ref);
  855. spin_lock_irqsave(&fcloop_lock, flags);
  856. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  857. if (tmplport->localport->node_name == opts->wwnn &&
  858. tmplport->localport->port_name == opts->wwpn)
  859. goto out_invalid_opts;
  860. if (tmplport->localport->node_name == opts->lpwwnn &&
  861. tmplport->localport->port_name == opts->lpwwpn)
  862. lport = tmplport;
  863. }
  864. if (remoteport) {
  865. if (!lport)
  866. goto out_invalid_opts;
  867. newnport->lport = lport;
  868. }
  869. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  870. if (nport->node_name == opts->wwnn &&
  871. nport->port_name == opts->wwpn) {
  872. if ((remoteport && nport->rport) ||
  873. (!remoteport && nport->tport)) {
  874. nport = NULL;
  875. goto out_invalid_opts;
  876. }
  877. fcloop_nport_get(nport);
  878. spin_unlock_irqrestore(&fcloop_lock, flags);
  879. if (remoteport)
  880. nport->lport = lport;
  881. if (opts->mask & NVMF_OPT_ROLES)
  882. nport->port_role = opts->roles;
  883. if (opts->mask & NVMF_OPT_FCADDR)
  884. nport->port_id = opts->fcaddr;
  885. goto out_free_newnport;
  886. }
  887. }
  888. list_add_tail(&newnport->nport_list, &fcloop_nports);
  889. spin_unlock_irqrestore(&fcloop_lock, flags);
  890. kfree(opts);
  891. return newnport;
  892. out_invalid_opts:
  893. spin_unlock_irqrestore(&fcloop_lock, flags);
  894. out_free_newnport:
  895. kfree(newnport);
  896. out_free_opts:
  897. kfree(opts);
  898. return nport;
  899. }
  900. static ssize_t
  901. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  902. const char *buf, size_t count)
  903. {
  904. struct nvme_fc_remote_port *remoteport;
  905. struct fcloop_nport *nport;
  906. struct fcloop_rport *rport;
  907. struct nvme_fc_port_info pinfo;
  908. int ret;
  909. nport = fcloop_alloc_nport(buf, count, true);
  910. if (!nport)
  911. return -EIO;
  912. memset(&pinfo, 0, sizeof(pinfo));
  913. pinfo.node_name = nport->node_name;
  914. pinfo.port_name = nport->port_name;
  915. pinfo.port_role = nport->port_role;
  916. pinfo.port_id = nport->port_id;
  917. ret = nvme_fc_register_remoteport(nport->lport->localport,
  918. &pinfo, &remoteport);
  919. if (ret || !remoteport) {
  920. fcloop_nport_put(nport);
  921. return ret;
  922. }
  923. /* success */
  924. rport = remoteport->private;
  925. rport->remoteport = remoteport;
  926. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  927. if (nport->tport) {
  928. nport->tport->remoteport = remoteport;
  929. nport->tport->lport = nport->lport;
  930. }
  931. rport->nport = nport;
  932. rport->lport = nport->lport;
  933. nport->rport = rport;
  934. return count;
  935. }
  936. static struct fcloop_rport *
  937. __unlink_remote_port(struct fcloop_nport *nport)
  938. {
  939. struct fcloop_rport *rport = nport->rport;
  940. if (rport && nport->tport)
  941. nport->tport->remoteport = NULL;
  942. nport->rport = NULL;
  943. return rport;
  944. }
  945. static int
  946. __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  947. {
  948. if (!rport)
  949. return -EALREADY;
  950. return nvme_fc_unregister_remoteport(rport->remoteport);
  951. }
  952. static ssize_t
  953. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  954. const char *buf, size_t count)
  955. {
  956. struct fcloop_nport *nport = NULL, *tmpport;
  957. static struct fcloop_rport *rport;
  958. u64 nodename, portname;
  959. unsigned long flags;
  960. int ret;
  961. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  962. if (ret)
  963. return ret;
  964. spin_lock_irqsave(&fcloop_lock, flags);
  965. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  966. if (tmpport->node_name == nodename &&
  967. tmpport->port_name == portname && tmpport->rport) {
  968. nport = tmpport;
  969. rport = __unlink_remote_port(nport);
  970. break;
  971. }
  972. }
  973. spin_unlock_irqrestore(&fcloop_lock, flags);
  974. if (!nport)
  975. return -ENOENT;
  976. ret = __remoteport_unreg(nport, rport);
  977. return ret ? ret : count;
  978. }
  979. static ssize_t
  980. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  981. const char *buf, size_t count)
  982. {
  983. struct nvmet_fc_target_port *targetport;
  984. struct fcloop_nport *nport;
  985. struct fcloop_tport *tport;
  986. struct nvmet_fc_port_info tinfo;
  987. int ret;
  988. nport = fcloop_alloc_nport(buf, count, false);
  989. if (!nport)
  990. return -EIO;
  991. tinfo.node_name = nport->node_name;
  992. tinfo.port_name = nport->port_name;
  993. tinfo.port_id = nport->port_id;
  994. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  995. &targetport);
  996. if (ret) {
  997. fcloop_nport_put(nport);
  998. return ret;
  999. }
  1000. /* success */
  1001. tport = targetport->private;
  1002. tport->targetport = targetport;
  1003. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  1004. if (nport->rport)
  1005. nport->rport->targetport = targetport;
  1006. tport->nport = nport;
  1007. tport->lport = nport->lport;
  1008. nport->tport = tport;
  1009. return count;
  1010. }
  1011. static struct fcloop_tport *
  1012. __unlink_target_port(struct fcloop_nport *nport)
  1013. {
  1014. struct fcloop_tport *tport = nport->tport;
  1015. if (tport && nport->rport)
  1016. nport->rport->targetport = NULL;
  1017. nport->tport = NULL;
  1018. return tport;
  1019. }
  1020. static int
  1021. __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  1022. {
  1023. if (!tport)
  1024. return -EALREADY;
  1025. return nvmet_fc_unregister_targetport(tport->targetport);
  1026. }
  1027. static ssize_t
  1028. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  1029. const char *buf, size_t count)
  1030. {
  1031. struct fcloop_nport *nport = NULL, *tmpport;
  1032. struct fcloop_tport *tport = NULL;
  1033. u64 nodename, portname;
  1034. unsigned long flags;
  1035. int ret;
  1036. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  1037. if (ret)
  1038. return ret;
  1039. spin_lock_irqsave(&fcloop_lock, flags);
  1040. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  1041. if (tmpport->node_name == nodename &&
  1042. tmpport->port_name == portname && tmpport->tport) {
  1043. nport = tmpport;
  1044. tport = __unlink_target_port(nport);
  1045. break;
  1046. }
  1047. }
  1048. spin_unlock_irqrestore(&fcloop_lock, flags);
  1049. if (!nport)
  1050. return -ENOENT;
  1051. ret = __targetport_unreg(nport, tport);
  1052. return ret ? ret : count;
  1053. }
  1054. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  1055. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  1056. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  1057. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  1058. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  1059. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  1060. static struct attribute *fcloop_dev_attrs[] = {
  1061. &dev_attr_add_local_port.attr,
  1062. &dev_attr_del_local_port.attr,
  1063. &dev_attr_add_remote_port.attr,
  1064. &dev_attr_del_remote_port.attr,
  1065. &dev_attr_add_target_port.attr,
  1066. &dev_attr_del_target_port.attr,
  1067. NULL
  1068. };
  1069. static struct attribute_group fclopp_dev_attrs_group = {
  1070. .attrs = fcloop_dev_attrs,
  1071. };
  1072. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  1073. &fclopp_dev_attrs_group,
  1074. NULL,
  1075. };
  1076. static struct class *fcloop_class;
  1077. static struct device *fcloop_device;
  1078. static int __init fcloop_init(void)
  1079. {
  1080. int ret;
  1081. fcloop_class = class_create(THIS_MODULE, "fcloop");
  1082. if (IS_ERR(fcloop_class)) {
  1083. pr_err("couldn't register class fcloop\n");
  1084. ret = PTR_ERR(fcloop_class);
  1085. return ret;
  1086. }
  1087. fcloop_device = device_create_with_groups(
  1088. fcloop_class, NULL, MKDEV(0, 0), NULL,
  1089. fcloop_dev_attr_groups, "ctl");
  1090. if (IS_ERR(fcloop_device)) {
  1091. pr_err("couldn't create ctl device!\n");
  1092. ret = PTR_ERR(fcloop_device);
  1093. goto out_destroy_class;
  1094. }
  1095. get_device(fcloop_device);
  1096. return 0;
  1097. out_destroy_class:
  1098. class_destroy(fcloop_class);
  1099. return ret;
  1100. }
  1101. static void __exit fcloop_exit(void)
  1102. {
  1103. struct fcloop_lport *lport;
  1104. struct fcloop_nport *nport;
  1105. struct fcloop_tport *tport;
  1106. struct fcloop_rport *rport;
  1107. unsigned long flags;
  1108. int ret;
  1109. spin_lock_irqsave(&fcloop_lock, flags);
  1110. for (;;) {
  1111. nport = list_first_entry_or_null(&fcloop_nports,
  1112. typeof(*nport), nport_list);
  1113. if (!nport)
  1114. break;
  1115. tport = __unlink_target_port(nport);
  1116. rport = __unlink_remote_port(nport);
  1117. spin_unlock_irqrestore(&fcloop_lock, flags);
  1118. ret = __targetport_unreg(nport, tport);
  1119. if (ret)
  1120. pr_warn("%s: Failed deleting target port\n", __func__);
  1121. ret = __remoteport_unreg(nport, rport);
  1122. if (ret)
  1123. pr_warn("%s: Failed deleting remote port\n", __func__);
  1124. spin_lock_irqsave(&fcloop_lock, flags);
  1125. }
  1126. for (;;) {
  1127. lport = list_first_entry_or_null(&fcloop_lports,
  1128. typeof(*lport), lport_list);
  1129. if (!lport)
  1130. break;
  1131. __unlink_local_port(lport);
  1132. spin_unlock_irqrestore(&fcloop_lock, flags);
  1133. ret = __wait_localport_unreg(lport);
  1134. if (ret)
  1135. pr_warn("%s: Failed deleting local port\n", __func__);
  1136. spin_lock_irqsave(&fcloop_lock, flags);
  1137. }
  1138. spin_unlock_irqrestore(&fcloop_lock, flags);
  1139. put_device(fcloop_device);
  1140. device_destroy(fcloop_class, MKDEV(0, 0));
  1141. class_destroy(fcloop_class);
  1142. }
  1143. module_init(fcloop_init);
  1144. module_exit(fcloop_exit);
  1145. MODULE_LICENSE("GPL v2");