nvmet.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * Copyright (c) 2015-2016 HGST, a Western Digital Company.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. */
  13. #ifndef _NVMET_H
  14. #define _NVMET_H
  15. #include <linux/dma-mapping.h>
  16. #include <linux/types.h>
  17. #include <linux/device.h>
  18. #include <linux/kref.h>
  19. #include <linux/percpu-refcount.h>
  20. #include <linux/list.h>
  21. #include <linux/mutex.h>
  22. #include <linux/uuid.h>
  23. #include <linux/nvme.h>
  24. #include <linux/configfs.h>
  25. #include <linux/rcupdate.h>
  26. #include <linux/blkdev.h>
  27. #define NVMET_ASYNC_EVENTS 4
  28. #define NVMET_ERROR_LOG_SLOTS 128
  29. /*
  30. * Supported optional AENs:
  31. */
  32. #define NVMET_AEN_CFG_OPTIONAL \
  33. (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
  34. /*
  35. * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
  36. */
  37. #define NVMET_AEN_CFG_ALL \
  38. (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
  39. NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
  40. NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
  41. /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
  42. * The 16 bit shift is to set IATTR bit to 1, which means offending
  43. * offset starts in the data section of connect()
  44. */
  45. #define IPO_IATTR_CONNECT_DATA(x) \
  46. (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
  47. #define IPO_IATTR_CONNECT_SQE(x) \
  48. (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
  49. struct nvmet_ns {
  50. struct list_head dev_link;
  51. struct percpu_ref ref;
  52. struct block_device *bdev;
  53. struct file *file;
  54. bool readonly;
  55. u32 nsid;
  56. u32 blksize_shift;
  57. loff_t size;
  58. u8 nguid[16];
  59. uuid_t uuid;
  60. u32 anagrpid;
  61. bool buffered_io;
  62. bool enabled;
  63. struct nvmet_subsys *subsys;
  64. const char *device_path;
  65. struct config_group device_group;
  66. struct config_group group;
  67. struct completion disable_done;
  68. mempool_t *bvec_pool;
  69. struct kmem_cache *bvec_cache;
  70. };
  71. static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
  72. {
  73. return container_of(to_config_group(item), struct nvmet_ns, group);
  74. }
  75. struct nvmet_cq {
  76. u16 qid;
  77. u16 size;
  78. };
  79. struct nvmet_sq {
  80. struct nvmet_ctrl *ctrl;
  81. struct percpu_ref ref;
  82. u16 qid;
  83. u16 size;
  84. u32 sqhd;
  85. struct completion free_done;
  86. struct completion confirm_done;
  87. };
  88. struct nvmet_ana_group {
  89. struct config_group group;
  90. struct nvmet_port *port;
  91. u32 grpid;
  92. };
  93. static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
  94. {
  95. return container_of(to_config_group(item), struct nvmet_ana_group,
  96. group);
  97. }
  98. /**
  99. * struct nvmet_port - Common structure to keep port
  100. * information for the target.
  101. * @entry: Entry into referrals or transport list.
  102. * @disc_addr: Address information is stored in a format defined
  103. * for a discovery log page entry.
  104. * @group: ConfigFS group for this element's folder.
  105. * @priv: Private data for the transport.
  106. */
  107. struct nvmet_port {
  108. struct list_head entry;
  109. struct nvmf_disc_rsp_page_entry disc_addr;
  110. struct config_group group;
  111. struct config_group subsys_group;
  112. struct list_head subsystems;
  113. struct config_group referrals_group;
  114. struct list_head referrals;
  115. struct config_group ana_groups_group;
  116. struct nvmet_ana_group ana_default_group;
  117. enum nvme_ana_state *ana_state;
  118. void *priv;
  119. bool enabled;
  120. int inline_data_size;
  121. };
  122. static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
  123. {
  124. return container_of(to_config_group(item), struct nvmet_port,
  125. group);
  126. }
  127. static inline struct nvmet_port *ana_groups_to_port(
  128. struct config_item *item)
  129. {
  130. return container_of(to_config_group(item), struct nvmet_port,
  131. ana_groups_group);
  132. }
  133. struct nvmet_ctrl {
  134. struct nvmet_subsys *subsys;
  135. struct nvmet_cq **cqs;
  136. struct nvmet_sq **sqs;
  137. struct mutex lock;
  138. u64 cap;
  139. u32 cc;
  140. u32 csts;
  141. uuid_t hostid;
  142. u16 cntlid;
  143. u32 kato;
  144. struct nvmet_port *port;
  145. u32 aen_enabled;
  146. unsigned long aen_masked;
  147. struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
  148. unsigned int nr_async_event_cmds;
  149. struct list_head async_events;
  150. struct work_struct async_event_work;
  151. struct list_head subsys_entry;
  152. struct kref ref;
  153. struct delayed_work ka_work;
  154. struct work_struct fatal_err_work;
  155. const struct nvmet_fabrics_ops *ops;
  156. __le32 *changed_ns_list;
  157. u32 nr_changed_ns;
  158. char subsysnqn[NVMF_NQN_FIELD_LEN];
  159. char hostnqn[NVMF_NQN_FIELD_LEN];
  160. };
  161. struct nvmet_subsys {
  162. enum nvme_subsys_type type;
  163. struct mutex lock;
  164. struct kref ref;
  165. struct list_head namespaces;
  166. unsigned int nr_namespaces;
  167. unsigned int max_nsid;
  168. struct list_head ctrls;
  169. struct list_head hosts;
  170. bool allow_any_host;
  171. u16 max_qid;
  172. u64 ver;
  173. u64 serial;
  174. char *subsysnqn;
  175. struct config_group group;
  176. struct config_group namespaces_group;
  177. struct config_group allowed_hosts_group;
  178. };
  179. static inline struct nvmet_subsys *to_subsys(struct config_item *item)
  180. {
  181. return container_of(to_config_group(item), struct nvmet_subsys, group);
  182. }
  183. static inline struct nvmet_subsys *namespaces_to_subsys(
  184. struct config_item *item)
  185. {
  186. return container_of(to_config_group(item), struct nvmet_subsys,
  187. namespaces_group);
  188. }
  189. struct nvmet_host {
  190. struct config_group group;
  191. };
  192. static inline struct nvmet_host *to_host(struct config_item *item)
  193. {
  194. return container_of(to_config_group(item), struct nvmet_host, group);
  195. }
  196. static inline char *nvmet_host_name(struct nvmet_host *host)
  197. {
  198. return config_item_name(&host->group.cg_item);
  199. }
  200. struct nvmet_host_link {
  201. struct list_head entry;
  202. struct nvmet_host *host;
  203. };
  204. struct nvmet_subsys_link {
  205. struct list_head entry;
  206. struct nvmet_subsys *subsys;
  207. };
  208. struct nvmet_req;
  209. struct nvmet_fabrics_ops {
  210. struct module *owner;
  211. unsigned int type;
  212. unsigned int msdbd;
  213. bool has_keyed_sgls : 1;
  214. void (*queue_response)(struct nvmet_req *req);
  215. int (*add_port)(struct nvmet_port *port);
  216. void (*remove_port)(struct nvmet_port *port);
  217. void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
  218. void (*disc_traddr)(struct nvmet_req *req,
  219. struct nvmet_port *port, char *traddr);
  220. };
  221. #define NVMET_MAX_INLINE_BIOVEC 8
  222. struct nvmet_req {
  223. struct nvme_command *cmd;
  224. struct nvme_completion *rsp;
  225. struct nvmet_sq *sq;
  226. struct nvmet_cq *cq;
  227. struct nvmet_ns *ns;
  228. struct scatterlist *sg;
  229. struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
  230. union {
  231. struct {
  232. struct bio inline_bio;
  233. } b;
  234. struct {
  235. bool mpool_alloc;
  236. struct kiocb iocb;
  237. struct bio_vec *bvec;
  238. struct work_struct work;
  239. } f;
  240. };
  241. int sg_cnt;
  242. /* data length as parsed from the command: */
  243. size_t data_len;
  244. /* data length as parsed from the SGL descriptor: */
  245. size_t transfer_len;
  246. struct nvmet_port *port;
  247. void (*execute)(struct nvmet_req *req);
  248. const struct nvmet_fabrics_ops *ops;
  249. };
  250. extern struct workqueue_struct *buffered_io_wq;
  251. static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
  252. {
  253. req->rsp->status = cpu_to_le16(status << 1);
  254. }
  255. static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
  256. {
  257. req->rsp->result.u32 = cpu_to_le32(result);
  258. }
  259. /*
  260. * NVMe command writes actually are DMA reads for us on the target side.
  261. */
  262. static inline enum dma_data_direction
  263. nvmet_data_dir(struct nvmet_req *req)
  264. {
  265. return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  266. }
  267. struct nvmet_async_event {
  268. struct list_head entry;
  269. u8 event_type;
  270. u8 event_info;
  271. u8 log_page;
  272. };
  273. u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
  274. u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
  275. u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
  276. u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
  277. u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
  278. u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
  279. bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
  280. struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
  281. void nvmet_req_uninit(struct nvmet_req *req);
  282. void nvmet_req_execute(struct nvmet_req *req);
  283. void nvmet_req_complete(struct nvmet_req *req, u16 status);
  284. void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
  285. u16 size);
  286. void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
  287. u16 size);
  288. void nvmet_sq_destroy(struct nvmet_sq *sq);
  289. int nvmet_sq_init(struct nvmet_sq *sq);
  290. void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
  291. void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
  292. u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
  293. struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
  294. u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
  295. struct nvmet_req *req, struct nvmet_ctrl **ret);
  296. void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
  297. u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
  298. struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
  299. enum nvme_subsys_type type);
  300. void nvmet_subsys_put(struct nvmet_subsys *subsys);
  301. void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
  302. struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
  303. void nvmet_put_namespace(struct nvmet_ns *ns);
  304. int nvmet_ns_enable(struct nvmet_ns *ns);
  305. void nvmet_ns_disable(struct nvmet_ns *ns);
  306. struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
  307. void nvmet_ns_free(struct nvmet_ns *ns);
  308. void nvmet_send_ana_event(struct nvmet_subsys *subsys,
  309. struct nvmet_port *port);
  310. void nvmet_port_send_ana_event(struct nvmet_port *port);
  311. int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
  312. void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
  313. int nvmet_enable_port(struct nvmet_port *port);
  314. void nvmet_disable_port(struct nvmet_port *port);
  315. void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
  316. void nvmet_referral_disable(struct nvmet_port *port);
  317. u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
  318. size_t len);
  319. u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
  320. size_t len);
  321. u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
  322. u32 nvmet_get_log_page_len(struct nvme_command *cmd);
  323. #define NVMET_QUEUE_SIZE 1024
  324. #define NVMET_NR_QUEUES 128
  325. #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
  326. /*
  327. * Nice round number that makes a list of nsids fit into a page.
  328. * Should become tunable at some point in the future.
  329. */
  330. #define NVMET_MAX_NAMESPACES 1024
  331. /*
  332. * 0 is not a valid ANA group ID, so we start numbering at 1.
  333. *
  334. * ANA Group 1 exists without manual intervention, has namespaces assigned to it
  335. * by default, and is available in an optimized state through all ports.
  336. */
  337. #define NVMET_MAX_ANAGRPS 128
  338. #define NVMET_DEFAULT_ANA_GRPID 1
  339. #define NVMET_KAS 10
  340. #define NVMET_DISC_KATO 120
  341. int __init nvmet_init_configfs(void);
  342. void __exit nvmet_exit_configfs(void);
  343. int __init nvmet_init_discovery(void);
  344. void nvmet_exit_discovery(void);
  345. extern struct nvmet_subsys *nvmet_disc_subsys;
  346. extern u64 nvmet_genctr;
  347. extern struct rw_semaphore nvmet_config_sem;
  348. extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
  349. extern u64 nvmet_ana_chgcnt;
  350. extern struct rw_semaphore nvmet_ana_sem;
  351. bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
  352. const char *hostnqn);
  353. int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
  354. int nvmet_file_ns_enable(struct nvmet_ns *ns);
  355. void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
  356. void nvmet_file_ns_disable(struct nvmet_ns *ns);
  357. u16 nvmet_bdev_flush(struct nvmet_req *req);
  358. u16 nvmet_file_flush(struct nvmet_req *req);
  359. void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
  360. static inline u32 nvmet_rw_len(struct nvmet_req *req)
  361. {
  362. return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
  363. req->ns->blksize_shift;
  364. }
  365. #endif /* _NVMET_H */