nbd.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Network block device - make block devices work over TCP
  4. *
  5. * Note that you can not swap over this thing, yet. Seems to work but
  6. * deadlocks sometimes - you can not swap over TCP in general.
  7. *
  8. * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
  9. * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
  10. *
  11. * (part of code stolen from loop.c)
  12. */
  13. #define pr_fmt(fmt) "nbd: " fmt
  14. #include <linux/major.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/sched.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/fs.h>
  21. #include <linux/bio.h>
  22. #include <linux/stat.h>
  23. #include <linux/errno.h>
  24. #include <linux/file.h>
  25. #include <linux/ioctl.h>
  26. #include <linux/mutex.h>
  27. #include <linux/compiler.h>
  28. #include <linux/completion.h>
  29. #include <linux/err.h>
  30. #include <linux/kernel.h>
  31. #include <linux/slab.h>
  32. #include <net/sock.h>
  33. #include <linux/net.h>
  34. #include <linux/kthread.h>
  35. #include <linux/types.h>
  36. #include <linux/debugfs.h>
  37. #include <linux/blk-mq.h>
  38. #include <linux/uaccess.h>
  39. #include <asm/types.h>
  40. #include <linux/nbd.h>
  41. #include <linux/nbd-netlink.h>
  42. #include <net/genetlink.h>
  43. #define CREATE_TRACE_POINTS
  44. #include <trace/events/nbd.h>
  45. static DEFINE_IDR(nbd_index_idr);
  46. static DEFINE_MUTEX(nbd_index_mutex);
  47. static struct workqueue_struct *nbd_del_wq;
  48. static int nbd_total_devices = 0;
  49. struct nbd_sock {
  50. struct socket *sock;
  51. struct mutex tx_lock;
  52. struct request *pending;
  53. int sent;
  54. bool dead;
  55. int fallback_index;
  56. int cookie;
  57. };
  58. struct recv_thread_args {
  59. struct work_struct work;
  60. struct nbd_device *nbd;
  61. struct nbd_sock *nsock;
  62. int index;
  63. };
  64. struct link_dead_args {
  65. struct work_struct work;
  66. int index;
  67. };
  68. #define NBD_RT_TIMEDOUT 0
  69. #define NBD_RT_DISCONNECT_REQUESTED 1
  70. #define NBD_RT_DISCONNECTED 2
  71. #define NBD_RT_HAS_PID_FILE 3
  72. #define NBD_RT_HAS_CONFIG_REF 4
  73. #define NBD_RT_BOUND 5
  74. #define NBD_RT_DISCONNECT_ON_CLOSE 6
  75. #define NBD_RT_HAS_BACKEND_FILE 7
  76. #define NBD_DESTROY_ON_DISCONNECT 0
  77. #define NBD_DISCONNECT_REQUESTED 1
  78. struct nbd_config {
  79. u32 flags;
  80. unsigned long runtime_flags;
  81. u64 dead_conn_timeout;
  82. struct nbd_sock **socks;
  83. int num_connections;
  84. atomic_t live_connections;
  85. wait_queue_head_t conn_wait;
  86. atomic_t recv_threads;
  87. wait_queue_head_t recv_wq;
  88. unsigned int blksize_bits;
  89. loff_t bytesize;
  90. #if IS_ENABLED(CONFIG_DEBUG_FS)
  91. struct dentry *dbg_dir;
  92. #endif
  93. };
  94. static inline unsigned int nbd_blksize(struct nbd_config *config)
  95. {
  96. return 1u << config->blksize_bits;
  97. }
  98. struct nbd_device {
  99. struct blk_mq_tag_set tag_set;
  100. int index;
  101. refcount_t config_refs;
  102. refcount_t refs;
  103. struct nbd_config *config;
  104. struct mutex config_lock;
  105. struct gendisk *disk;
  106. struct workqueue_struct *recv_workq;
  107. struct work_struct remove_work;
  108. struct list_head list;
  109. struct task_struct *task_setup;
  110. unsigned long flags;
  111. pid_t pid; /* pid of nbd-client, if attached */
  112. char *backend;
  113. };
  114. #define NBD_CMD_REQUEUED 1
  115. /*
  116. * This flag will be set if nbd_queue_rq() succeed, and will be checked and
  117. * cleared in completion. Both setting and clearing of the flag are protected
  118. * by cmd->lock.
  119. */
  120. #define NBD_CMD_INFLIGHT 2
  121. struct nbd_cmd {
  122. struct nbd_device *nbd;
  123. struct mutex lock;
  124. int index;
  125. int cookie;
  126. int retries;
  127. blk_status_t status;
  128. unsigned long flags;
  129. u32 cmd_cookie;
  130. };
  131. #if IS_ENABLED(CONFIG_DEBUG_FS)
  132. static struct dentry *nbd_dbg_dir;
  133. #endif
  134. #define nbd_name(nbd) ((nbd)->disk->disk_name)
  135. #define NBD_DEF_BLKSIZE_BITS 10
  136. static unsigned int nbds_max = 16;
  137. static int max_part = 16;
  138. static int part_shift;
  139. static int nbd_dev_dbg_init(struct nbd_device *nbd);
  140. static void nbd_dev_dbg_close(struct nbd_device *nbd);
  141. static void nbd_config_put(struct nbd_device *nbd);
  142. static void nbd_connect_reply(struct genl_info *info, int index);
  143. static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
  144. static void nbd_dead_link_work(struct work_struct *work);
  145. static void nbd_disconnect_and_put(struct nbd_device *nbd);
  146. static inline struct device *nbd_to_dev(struct nbd_device *nbd)
  147. {
  148. return disk_to_dev(nbd->disk);
  149. }
  150. static void nbd_requeue_cmd(struct nbd_cmd *cmd)
  151. {
  152. struct request *req = blk_mq_rq_from_pdu(cmd);
  153. lockdep_assert_held(&cmd->lock);
  154. /*
  155. * Clear INFLIGHT flag so that this cmd won't be completed in
  156. * normal completion path
  157. *
  158. * INFLIGHT flag will be set when the cmd is queued to nbd next
  159. * time.
  160. */
  161. __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
  162. if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
  163. blk_mq_requeue_request(req, true);
  164. }
  165. #define NBD_COOKIE_BITS 32
  166. static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
  167. {
  168. struct request *req = blk_mq_rq_from_pdu(cmd);
  169. u32 tag = blk_mq_unique_tag(req);
  170. u64 cookie = cmd->cmd_cookie;
  171. return (cookie << NBD_COOKIE_BITS) | tag;
  172. }
  173. static u32 nbd_handle_to_tag(u64 handle)
  174. {
  175. return (u32)handle;
  176. }
  177. static u32 nbd_handle_to_cookie(u64 handle)
  178. {
  179. return (u32)(handle >> NBD_COOKIE_BITS);
  180. }
  181. static const char *nbdcmd_to_ascii(int cmd)
  182. {
  183. switch (cmd) {
  184. case NBD_CMD_READ: return "read";
  185. case NBD_CMD_WRITE: return "write";
  186. case NBD_CMD_DISC: return "disconnect";
  187. case NBD_CMD_FLUSH: return "flush";
  188. case NBD_CMD_TRIM: return "trim/discard";
  189. }
  190. return "invalid";
  191. }
  192. static ssize_t pid_show(struct device *dev,
  193. struct device_attribute *attr, char *buf)
  194. {
  195. struct gendisk *disk = dev_to_disk(dev);
  196. struct nbd_device *nbd = disk->private_data;
  197. return sprintf(buf, "%d\n", nbd->pid);
  198. }
  199. static const struct device_attribute pid_attr = {
  200. .attr = { .name = "pid", .mode = 0444},
  201. .show = pid_show,
  202. };
  203. static ssize_t backend_show(struct device *dev,
  204. struct device_attribute *attr, char *buf)
  205. {
  206. struct gendisk *disk = dev_to_disk(dev);
  207. struct nbd_device *nbd = disk->private_data;
  208. return sprintf(buf, "%s\n", nbd->backend ?: "");
  209. }
  210. static const struct device_attribute backend_attr = {
  211. .attr = { .name = "backend", .mode = 0444},
  212. .show = backend_show,
  213. };
  214. static void nbd_dev_remove(struct nbd_device *nbd)
  215. {
  216. struct gendisk *disk = nbd->disk;
  217. del_gendisk(disk);
  218. blk_mq_free_tag_set(&nbd->tag_set);
  219. /*
  220. * Remove from idr after del_gendisk() completes, so if the same ID is
  221. * reused, the following add_disk() will succeed.
  222. */
  223. mutex_lock(&nbd_index_mutex);
  224. idr_remove(&nbd_index_idr, nbd->index);
  225. mutex_unlock(&nbd_index_mutex);
  226. destroy_workqueue(nbd->recv_workq);
  227. put_disk(disk);
  228. }
  229. static void nbd_dev_remove_work(struct work_struct *work)
  230. {
  231. nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
  232. }
  233. static void nbd_put(struct nbd_device *nbd)
  234. {
  235. if (!refcount_dec_and_test(&nbd->refs))
  236. return;
  237. /* Call del_gendisk() asynchrounously to prevent deadlock */
  238. if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
  239. queue_work(nbd_del_wq, &nbd->remove_work);
  240. else
  241. nbd_dev_remove(nbd);
  242. }
  243. static int nbd_disconnected(struct nbd_config *config)
  244. {
  245. return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
  246. test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
  247. }
  248. static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
  249. int notify)
  250. {
  251. if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
  252. struct link_dead_args *args;
  253. args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
  254. if (args) {
  255. INIT_WORK(&args->work, nbd_dead_link_work);
  256. args->index = nbd->index;
  257. queue_work(system_wq, &args->work);
  258. }
  259. }
  260. if (!nsock->dead) {
  261. kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
  262. if (atomic_dec_return(&nbd->config->live_connections) == 0) {
  263. if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
  264. &nbd->config->runtime_flags)) {
  265. set_bit(NBD_RT_DISCONNECTED,
  266. &nbd->config->runtime_flags);
  267. dev_info(nbd_to_dev(nbd),
  268. "Disconnected due to user request.\n");
  269. }
  270. }
  271. }
  272. nsock->dead = true;
  273. nsock->pending = NULL;
  274. nsock->sent = 0;
  275. }
  276. static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
  277. loff_t blksize)
  278. {
  279. struct queue_limits lim;
  280. int error;
  281. if (!blksize)
  282. blksize = 1u << NBD_DEF_BLKSIZE_BITS;
  283. if (blk_validate_block_size(blksize))
  284. return -EINVAL;
  285. if (bytesize < 0)
  286. return -EINVAL;
  287. nbd->config->bytesize = bytesize;
  288. nbd->config->blksize_bits = __ffs(blksize);
  289. if (!nbd->pid)
  290. return 0;
  291. lim = queue_limits_start_update(nbd->disk->queue);
  292. if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
  293. lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT;
  294. else
  295. lim.max_hw_discard_sectors = 0;
  296. if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
  297. lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
  298. } else if (nbd->config->flags & NBD_FLAG_SEND_FUA) {
  299. lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
  300. } else {
  301. lim.features |= BLK_FEAT_WRITE_CACHE;
  302. lim.features &= ~BLK_FEAT_FUA;
  303. }
  304. if (nbd->config->flags & NBD_FLAG_ROTATIONAL)
  305. lim.features |= BLK_FEAT_ROTATIONAL;
  306. if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES)
  307. lim.max_write_zeroes_sectors = UINT_MAX >> SECTOR_SHIFT;
  308. lim.logical_block_size = blksize;
  309. lim.physical_block_size = blksize;
  310. error = queue_limits_commit_update(nbd->disk->queue, &lim);
  311. if (error)
  312. return error;
  313. if (max_part)
  314. set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
  315. if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
  316. kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
  317. return 0;
  318. }
  319. static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
  320. loff_t blksize)
  321. {
  322. int error;
  323. blk_mq_freeze_queue(nbd->disk->queue);
  324. error = __nbd_set_size(nbd, bytesize, blksize);
  325. blk_mq_unfreeze_queue(nbd->disk->queue);
  326. return error;
  327. }
  328. static void nbd_complete_rq(struct request *req)
  329. {
  330. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
  331. dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
  332. cmd->status ? "failed" : "done");
  333. blk_mq_end_request(req, cmd->status);
  334. }
  335. /*
  336. * Forcibly shutdown the socket causing all listeners to error
  337. */
  338. static void sock_shutdown(struct nbd_device *nbd)
  339. {
  340. struct nbd_config *config = nbd->config;
  341. int i;
  342. if (config->num_connections == 0)
  343. return;
  344. if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
  345. return;
  346. for (i = 0; i < config->num_connections; i++) {
  347. struct nbd_sock *nsock = config->socks[i];
  348. mutex_lock(&nsock->tx_lock);
  349. nbd_mark_nsock_dead(nbd, nsock, 0);
  350. mutex_unlock(&nsock->tx_lock);
  351. }
  352. dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
  353. }
  354. static u32 req_to_nbd_cmd_type(struct request *req)
  355. {
  356. switch (req_op(req)) {
  357. case REQ_OP_DISCARD:
  358. return NBD_CMD_TRIM;
  359. case REQ_OP_FLUSH:
  360. return NBD_CMD_FLUSH;
  361. case REQ_OP_WRITE:
  362. return NBD_CMD_WRITE;
  363. case REQ_OP_READ:
  364. return NBD_CMD_READ;
  365. case REQ_OP_WRITE_ZEROES:
  366. return NBD_CMD_WRITE_ZEROES;
  367. default:
  368. return U32_MAX;
  369. }
  370. }
  371. static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
  372. {
  373. if (refcount_inc_not_zero(&nbd->config_refs)) {
  374. /*
  375. * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
  376. * and reading nbd->config is ordered. The pair is the barrier in
  377. * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
  378. * before nbd->config.
  379. */
  380. smp_mb__after_atomic();
  381. return nbd->config;
  382. }
  383. return NULL;
  384. }
  385. static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
  386. {
  387. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
  388. struct nbd_device *nbd = cmd->nbd;
  389. struct nbd_config *config;
  390. if (!mutex_trylock(&cmd->lock))
  391. return BLK_EH_RESET_TIMER;
  392. if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
  393. mutex_unlock(&cmd->lock);
  394. return BLK_EH_DONE;
  395. }
  396. config = nbd_get_config_unlocked(nbd);
  397. if (!config) {
  398. cmd->status = BLK_STS_TIMEOUT;
  399. __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
  400. mutex_unlock(&cmd->lock);
  401. goto done;
  402. }
  403. if (config->num_connections > 1 ||
  404. (config->num_connections == 1 && nbd->tag_set.timeout)) {
  405. dev_err_ratelimited(nbd_to_dev(nbd),
  406. "Connection timed out, retrying (%d/%d alive)\n",
  407. atomic_read(&config->live_connections),
  408. config->num_connections);
  409. /*
  410. * Hooray we have more connections, requeue this IO, the submit
  411. * path will put it on a real connection. Or if only one
  412. * connection is configured, the submit path will wait util
  413. * a new connection is reconfigured or util dead timeout.
  414. */
  415. if (config->socks) {
  416. if (cmd->index < config->num_connections) {
  417. struct nbd_sock *nsock =
  418. config->socks[cmd->index];
  419. mutex_lock(&nsock->tx_lock);
  420. /* We can have multiple outstanding requests, so
  421. * we don't want to mark the nsock dead if we've
  422. * already reconnected with a new socket, so
  423. * only mark it dead if its the same socket we
  424. * were sent out on.
  425. */
  426. if (cmd->cookie == nsock->cookie)
  427. nbd_mark_nsock_dead(nbd, nsock, 1);
  428. mutex_unlock(&nsock->tx_lock);
  429. }
  430. nbd_requeue_cmd(cmd);
  431. mutex_unlock(&cmd->lock);
  432. nbd_config_put(nbd);
  433. return BLK_EH_DONE;
  434. }
  435. }
  436. if (!nbd->tag_set.timeout) {
  437. /*
  438. * Userspace sets timeout=0 to disable socket disconnection,
  439. * so just warn and reset the timer.
  440. */
  441. struct nbd_sock *nsock = config->socks[cmd->index];
  442. cmd->retries++;
  443. dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
  444. req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
  445. (unsigned long long)blk_rq_pos(req) << 9,
  446. blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
  447. mutex_lock(&nsock->tx_lock);
  448. if (cmd->cookie != nsock->cookie) {
  449. nbd_requeue_cmd(cmd);
  450. mutex_unlock(&nsock->tx_lock);
  451. mutex_unlock(&cmd->lock);
  452. nbd_config_put(nbd);
  453. return BLK_EH_DONE;
  454. }
  455. mutex_unlock(&nsock->tx_lock);
  456. mutex_unlock(&cmd->lock);
  457. nbd_config_put(nbd);
  458. return BLK_EH_RESET_TIMER;
  459. }
  460. dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
  461. set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
  462. cmd->status = BLK_STS_IOERR;
  463. __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
  464. mutex_unlock(&cmd->lock);
  465. sock_shutdown(nbd);
  466. nbd_config_put(nbd);
  467. done:
  468. blk_mq_complete_request(req);
  469. return BLK_EH_DONE;
  470. }
  471. static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
  472. struct iov_iter *iter, int msg_flags, int *sent)
  473. {
  474. int result;
  475. struct msghdr msg = {} ;
  476. unsigned int noreclaim_flag;
  477. if (unlikely(!sock)) {
  478. dev_err_ratelimited(disk_to_dev(nbd->disk),
  479. "Attempted %s on closed socket in sock_xmit\n",
  480. (send ? "send" : "recv"));
  481. return -EINVAL;
  482. }
  483. msg.msg_iter = *iter;
  484. noreclaim_flag = memalloc_noreclaim_save();
  485. do {
  486. sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
  487. sock->sk->sk_use_task_frag = false;
  488. msg.msg_flags = msg_flags | MSG_NOSIGNAL;
  489. if (send)
  490. result = sock_sendmsg(sock, &msg);
  491. else
  492. result = sock_recvmsg(sock, &msg, msg.msg_flags);
  493. if (result <= 0) {
  494. if (result == 0)
  495. result = -EPIPE; /* short read */
  496. break;
  497. }
  498. if (sent)
  499. *sent += result;
  500. } while (msg_data_left(&msg));
  501. memalloc_noreclaim_restore(noreclaim_flag);
  502. return result;
  503. }
  504. /*
  505. * Send or receive packet. Return a positive value on success and
  506. * negtive value on failure, and never return 0.
  507. */
  508. static int sock_xmit(struct nbd_device *nbd, int index, int send,
  509. struct iov_iter *iter, int msg_flags, int *sent)
  510. {
  511. struct nbd_config *config = nbd->config;
  512. struct socket *sock = config->socks[index]->sock;
  513. return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
  514. }
  515. /*
  516. * Different settings for sk->sk_sndtimeo can result in different return values
  517. * if there is a signal pending when we enter sendmsg, because reasons?
  518. */
  519. static inline int was_interrupted(int result)
  520. {
  521. return result == -ERESTARTSYS || result == -EINTR;
  522. }
  523. /*
  524. * Returns BLK_STS_RESOURCE if the caller should retry after a delay.
  525. * Returns BLK_STS_IOERR if sending failed.
  526. */
  527. static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
  528. int index)
  529. {
  530. struct request *req = blk_mq_rq_from_pdu(cmd);
  531. struct nbd_config *config = nbd->config;
  532. struct nbd_sock *nsock = config->socks[index];
  533. int result;
  534. struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
  535. struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
  536. struct iov_iter from;
  537. struct bio *bio;
  538. u64 handle;
  539. u32 type;
  540. u32 nbd_cmd_flags = 0;
  541. int sent = nsock->sent, skip = 0;
  542. lockdep_assert_held(&cmd->lock);
  543. lockdep_assert_held(&nsock->tx_lock);
  544. iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
  545. type = req_to_nbd_cmd_type(req);
  546. if (type == U32_MAX)
  547. return BLK_STS_IOERR;
  548. if (rq_data_dir(req) == WRITE &&
  549. (config->flags & NBD_FLAG_READ_ONLY)) {
  550. dev_err_ratelimited(disk_to_dev(nbd->disk),
  551. "Write on read-only\n");
  552. return BLK_STS_IOERR;
  553. }
  554. if (req->cmd_flags & REQ_FUA)
  555. nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
  556. if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES))
  557. nbd_cmd_flags |= NBD_CMD_FLAG_NO_HOLE;
  558. /* We did a partial send previously, and we at least sent the whole
  559. * request struct, so just go and send the rest of the pages in the
  560. * request.
  561. */
  562. if (sent) {
  563. if (sent >= sizeof(request)) {
  564. skip = sent - sizeof(request);
  565. /* initialize handle for tracing purposes */
  566. handle = nbd_cmd_handle(cmd);
  567. goto send_pages;
  568. }
  569. iov_iter_advance(&from, sent);
  570. } else {
  571. cmd->cmd_cookie++;
  572. }
  573. cmd->index = index;
  574. cmd->cookie = nsock->cookie;
  575. cmd->retries = 0;
  576. request.type = htonl(type | nbd_cmd_flags);
  577. if (type != NBD_CMD_FLUSH) {
  578. request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
  579. request.len = htonl(blk_rq_bytes(req));
  580. }
  581. handle = nbd_cmd_handle(cmd);
  582. request.cookie = cpu_to_be64(handle);
  583. trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
  584. dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
  585. req, nbdcmd_to_ascii(type),
  586. (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
  587. result = sock_xmit(nbd, index, 1, &from,
  588. (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
  589. trace_nbd_header_sent(req, handle);
  590. if (result < 0) {
  591. if (was_interrupted(result)) {
  592. /* If we haven't sent anything we can just return BUSY,
  593. * however if we have sent something we need to make
  594. * sure we only allow this req to be sent until we are
  595. * completely done.
  596. */
  597. if (sent) {
  598. nsock->pending = req;
  599. nsock->sent = sent;
  600. }
  601. set_bit(NBD_CMD_REQUEUED, &cmd->flags);
  602. return BLK_STS_RESOURCE;
  603. }
  604. dev_err_ratelimited(disk_to_dev(nbd->disk),
  605. "Send control failed (result %d)\n", result);
  606. goto requeue;
  607. }
  608. send_pages:
  609. if (type != NBD_CMD_WRITE)
  610. goto out;
  611. bio = req->bio;
  612. while (bio) {
  613. struct bio *next = bio->bi_next;
  614. struct bvec_iter iter;
  615. struct bio_vec bvec;
  616. bio_for_each_segment(bvec, bio, iter) {
  617. bool is_last = !next && bio_iter_last(bvec, iter);
  618. int flags = is_last ? 0 : MSG_MORE;
  619. dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
  620. req, bvec.bv_len);
  621. iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
  622. if (skip) {
  623. if (skip >= iov_iter_count(&from)) {
  624. skip -= iov_iter_count(&from);
  625. continue;
  626. }
  627. iov_iter_advance(&from, skip);
  628. skip = 0;
  629. }
  630. result = sock_xmit(nbd, index, 1, &from, flags, &sent);
  631. if (result < 0) {
  632. if (was_interrupted(result)) {
  633. /* We've already sent the header, we
  634. * have no choice but to set pending and
  635. * return BUSY.
  636. */
  637. nsock->pending = req;
  638. nsock->sent = sent;
  639. set_bit(NBD_CMD_REQUEUED, &cmd->flags);
  640. return BLK_STS_RESOURCE;
  641. }
  642. dev_err(disk_to_dev(nbd->disk),
  643. "Send data failed (result %d)\n",
  644. result);
  645. goto requeue;
  646. }
  647. /*
  648. * The completion might already have come in,
  649. * so break for the last one instead of letting
  650. * the iterator do it. This prevents use-after-free
  651. * of the bio.
  652. */
  653. if (is_last)
  654. break;
  655. }
  656. bio = next;
  657. }
  658. out:
  659. trace_nbd_payload_sent(req, handle);
  660. nsock->pending = NULL;
  661. nsock->sent = 0;
  662. __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
  663. return BLK_STS_OK;
  664. requeue:
  665. /* retry on a different socket */
  666. dev_err_ratelimited(disk_to_dev(nbd->disk),
  667. "Request send failed, requeueing\n");
  668. nbd_mark_nsock_dead(nbd, nsock, 1);
  669. nbd_requeue_cmd(cmd);
  670. return BLK_STS_OK;
  671. }
  672. static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
  673. struct nbd_reply *reply)
  674. {
  675. struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
  676. struct iov_iter to;
  677. int result;
  678. reply->magic = 0;
  679. iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
  680. result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
  681. if (result < 0) {
  682. if (!nbd_disconnected(nbd->config))
  683. dev_err(disk_to_dev(nbd->disk),
  684. "Receive control failed (result %d)\n", result);
  685. return result;
  686. }
  687. if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
  688. dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
  689. (unsigned long)ntohl(reply->magic));
  690. return -EPROTO;
  691. }
  692. return 0;
  693. }
  694. /* NULL returned = something went wrong, inform userspace */
  695. static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
  696. struct nbd_reply *reply)
  697. {
  698. int result;
  699. struct nbd_cmd *cmd;
  700. struct request *req = NULL;
  701. u64 handle;
  702. u16 hwq;
  703. u32 tag;
  704. int ret = 0;
  705. handle = be64_to_cpu(reply->cookie);
  706. tag = nbd_handle_to_tag(handle);
  707. hwq = blk_mq_unique_tag_to_hwq(tag);
  708. if (hwq < nbd->tag_set.nr_hw_queues)
  709. req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
  710. blk_mq_unique_tag_to_tag(tag));
  711. if (!req || !blk_mq_request_started(req)) {
  712. dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
  713. tag, req);
  714. return ERR_PTR(-ENOENT);
  715. }
  716. trace_nbd_header_received(req, handle);
  717. cmd = blk_mq_rq_to_pdu(req);
  718. mutex_lock(&cmd->lock);
  719. if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
  720. dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
  721. tag, cmd->status, cmd->flags);
  722. ret = -ENOENT;
  723. goto out;
  724. }
  725. if (cmd->index != index) {
  726. dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
  727. tag, index, cmd->index);
  728. ret = -ENOENT;
  729. goto out;
  730. }
  731. if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
  732. dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
  733. req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
  734. ret = -ENOENT;
  735. goto out;
  736. }
  737. if (cmd->status != BLK_STS_OK) {
  738. dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
  739. req);
  740. ret = -ENOENT;
  741. goto out;
  742. }
  743. if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
  744. dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
  745. req);
  746. ret = -ENOENT;
  747. goto out;
  748. }
  749. if (ntohl(reply->error)) {
  750. dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
  751. ntohl(reply->error));
  752. cmd->status = BLK_STS_IOERR;
  753. goto out;
  754. }
  755. dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
  756. if (rq_data_dir(req) != WRITE) {
  757. struct req_iterator iter;
  758. struct bio_vec bvec;
  759. struct iov_iter to;
  760. rq_for_each_segment(bvec, req, iter) {
  761. iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
  762. result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
  763. if (result < 0) {
  764. dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
  765. result);
  766. /*
  767. * If we've disconnected, we need to make sure we
  768. * complete this request, otherwise error out
  769. * and let the timeout stuff handle resubmitting
  770. * this request onto another connection.
  771. */
  772. if (nbd_disconnected(nbd->config)) {
  773. cmd->status = BLK_STS_IOERR;
  774. goto out;
  775. }
  776. ret = -EIO;
  777. goto out;
  778. }
  779. dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
  780. req, bvec.bv_len);
  781. }
  782. }
  783. out:
  784. trace_nbd_payload_received(req, handle);
  785. mutex_unlock(&cmd->lock);
  786. return ret ? ERR_PTR(ret) : cmd;
  787. }
  788. static void recv_work(struct work_struct *work)
  789. {
  790. struct recv_thread_args *args = container_of(work,
  791. struct recv_thread_args,
  792. work);
  793. struct nbd_device *nbd = args->nbd;
  794. struct nbd_config *config = nbd->config;
  795. struct request_queue *q = nbd->disk->queue;
  796. struct nbd_sock *nsock = args->nsock;
  797. struct nbd_cmd *cmd;
  798. struct request *rq;
  799. while (1) {
  800. struct nbd_reply reply;
  801. if (nbd_read_reply(nbd, nsock->sock, &reply))
  802. break;
  803. /*
  804. * Grab .q_usage_counter so request pool won't go away, then no
  805. * request use-after-free is possible during nbd_handle_reply().
  806. * If queue is frozen, there won't be any inflight requests, we
  807. * needn't to handle the incoming garbage message.
  808. */
  809. if (!percpu_ref_tryget(&q->q_usage_counter)) {
  810. dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
  811. __func__);
  812. break;
  813. }
  814. cmd = nbd_handle_reply(nbd, args->index, &reply);
  815. if (IS_ERR(cmd)) {
  816. percpu_ref_put(&q->q_usage_counter);
  817. break;
  818. }
  819. rq = blk_mq_rq_from_pdu(cmd);
  820. if (likely(!blk_should_fake_timeout(rq->q))) {
  821. bool complete;
  822. mutex_lock(&cmd->lock);
  823. complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
  824. &cmd->flags);
  825. mutex_unlock(&cmd->lock);
  826. if (complete)
  827. blk_mq_complete_request(rq);
  828. }
  829. percpu_ref_put(&q->q_usage_counter);
  830. }
  831. mutex_lock(&nsock->tx_lock);
  832. nbd_mark_nsock_dead(nbd, nsock, 1);
  833. mutex_unlock(&nsock->tx_lock);
  834. nbd_config_put(nbd);
  835. atomic_dec(&config->recv_threads);
  836. wake_up(&config->recv_wq);
  837. kfree(args);
  838. }
  839. static bool nbd_clear_req(struct request *req, void *data)
  840. {
  841. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
  842. /* don't abort one completed request */
  843. if (blk_mq_request_completed(req))
  844. return true;
  845. mutex_lock(&cmd->lock);
  846. if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
  847. mutex_unlock(&cmd->lock);
  848. return true;
  849. }
  850. cmd->status = BLK_STS_IOERR;
  851. mutex_unlock(&cmd->lock);
  852. blk_mq_complete_request(req);
  853. return true;
  854. }
  855. static void nbd_clear_que(struct nbd_device *nbd)
  856. {
  857. blk_mq_quiesce_queue(nbd->disk->queue);
  858. blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
  859. blk_mq_unquiesce_queue(nbd->disk->queue);
  860. dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
  861. }
  862. static int find_fallback(struct nbd_device *nbd, int index)
  863. {
  864. struct nbd_config *config = nbd->config;
  865. int new_index = -1;
  866. struct nbd_sock *nsock = config->socks[index];
  867. int fallback = nsock->fallback_index;
  868. if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
  869. return new_index;
  870. if (config->num_connections <= 1) {
  871. dev_err_ratelimited(disk_to_dev(nbd->disk),
  872. "Dead connection, failed to find a fallback\n");
  873. return new_index;
  874. }
  875. if (fallback >= 0 && fallback < config->num_connections &&
  876. !config->socks[fallback]->dead)
  877. return fallback;
  878. if (nsock->fallback_index < 0 ||
  879. nsock->fallback_index >= config->num_connections ||
  880. config->socks[nsock->fallback_index]->dead) {
  881. int i;
  882. for (i = 0; i < config->num_connections; i++) {
  883. if (i == index)
  884. continue;
  885. if (!config->socks[i]->dead) {
  886. new_index = i;
  887. break;
  888. }
  889. }
  890. nsock->fallback_index = new_index;
  891. if (new_index < 0) {
  892. dev_err_ratelimited(disk_to_dev(nbd->disk),
  893. "Dead connection, failed to find a fallback\n");
  894. return new_index;
  895. }
  896. }
  897. new_index = nsock->fallback_index;
  898. return new_index;
  899. }
  900. static int wait_for_reconnect(struct nbd_device *nbd)
  901. {
  902. struct nbd_config *config = nbd->config;
  903. if (!config->dead_conn_timeout)
  904. return 0;
  905. if (!wait_event_timeout(config->conn_wait,
  906. test_bit(NBD_RT_DISCONNECTED,
  907. &config->runtime_flags) ||
  908. atomic_read(&config->live_connections) > 0,
  909. config->dead_conn_timeout))
  910. return 0;
  911. return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
  912. }
  913. static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
  914. {
  915. struct request *req = blk_mq_rq_from_pdu(cmd);
  916. struct nbd_device *nbd = cmd->nbd;
  917. struct nbd_config *config;
  918. struct nbd_sock *nsock;
  919. blk_status_t ret;
  920. lockdep_assert_held(&cmd->lock);
  921. config = nbd_get_config_unlocked(nbd);
  922. if (!config) {
  923. dev_err_ratelimited(disk_to_dev(nbd->disk),
  924. "Socks array is empty\n");
  925. return BLK_STS_IOERR;
  926. }
  927. if (index >= config->num_connections) {
  928. dev_err_ratelimited(disk_to_dev(nbd->disk),
  929. "Attempted send on invalid socket\n");
  930. nbd_config_put(nbd);
  931. return BLK_STS_IOERR;
  932. }
  933. cmd->status = BLK_STS_OK;
  934. again:
  935. nsock = config->socks[index];
  936. mutex_lock(&nsock->tx_lock);
  937. if (nsock->dead) {
  938. int old_index = index;
  939. index = find_fallback(nbd, index);
  940. mutex_unlock(&nsock->tx_lock);
  941. if (index < 0) {
  942. if (wait_for_reconnect(nbd)) {
  943. index = old_index;
  944. goto again;
  945. }
  946. /* All the sockets should already be down at this point,
  947. * we just want to make sure that DISCONNECTED is set so
  948. * any requests that come in that were queue'ed waiting
  949. * for the reconnect timer don't trigger the timer again
  950. * and instead just error out.
  951. */
  952. sock_shutdown(nbd);
  953. nbd_config_put(nbd);
  954. return BLK_STS_IOERR;
  955. }
  956. goto again;
  957. }
  958. /* Handle the case that we have a pending request that was partially
  959. * transmitted that _has_ to be serviced first. We need to call requeue
  960. * here so that it gets put _after_ the request that is already on the
  961. * dispatch list.
  962. */
  963. blk_mq_start_request(req);
  964. if (unlikely(nsock->pending && nsock->pending != req)) {
  965. nbd_requeue_cmd(cmd);
  966. ret = BLK_STS_OK;
  967. goto out;
  968. }
  969. ret = nbd_send_cmd(nbd, cmd, index);
  970. out:
  971. mutex_unlock(&nsock->tx_lock);
  972. nbd_config_put(nbd);
  973. return ret;
  974. }
  975. static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
  976. const struct blk_mq_queue_data *bd)
  977. {
  978. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
  979. blk_status_t ret;
  980. /*
  981. * Since we look at the bio's to send the request over the network we
  982. * need to make sure the completion work doesn't mark this request done
  983. * before we are done doing our send. This keeps us from dereferencing
  984. * freed data if we have particularly fast completions (ie we get the
  985. * completion before we exit sock_xmit on the last bvec) or in the case
  986. * that the server is misbehaving (or there was an error) before we're
  987. * done sending everything over the wire.
  988. */
  989. mutex_lock(&cmd->lock);
  990. clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
  991. /* We can be called directly from the user space process, which means we
  992. * could possibly have signals pending so our sendmsg will fail. In
  993. * this case we need to return that we are busy, otherwise error out as
  994. * appropriate.
  995. */
  996. ret = nbd_handle_cmd(cmd, hctx->queue_num);
  997. mutex_unlock(&cmd->lock);
  998. return ret;
  999. }
  1000. static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
  1001. int *err)
  1002. {
  1003. struct socket *sock;
  1004. *err = 0;
  1005. sock = sockfd_lookup(fd, err);
  1006. if (!sock)
  1007. return NULL;
  1008. if (sock->ops->shutdown == sock_no_shutdown) {
  1009. dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
  1010. *err = -EINVAL;
  1011. sockfd_put(sock);
  1012. return NULL;
  1013. }
  1014. return sock;
  1015. }
  1016. static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
  1017. bool netlink)
  1018. {
  1019. struct nbd_config *config = nbd->config;
  1020. struct socket *sock;
  1021. struct nbd_sock **socks;
  1022. struct nbd_sock *nsock;
  1023. int err;
  1024. /* Arg will be cast to int, check it to avoid overflow */
  1025. if (arg > INT_MAX)
  1026. return -EINVAL;
  1027. sock = nbd_get_socket(nbd, arg, &err);
  1028. if (!sock)
  1029. return err;
  1030. /*
  1031. * We need to make sure we don't get any errant requests while we're
  1032. * reallocating the ->socks array.
  1033. */
  1034. blk_mq_freeze_queue(nbd->disk->queue);
  1035. if (!netlink && !nbd->task_setup &&
  1036. !test_bit(NBD_RT_BOUND, &config->runtime_flags))
  1037. nbd->task_setup = current;
  1038. if (!netlink &&
  1039. (nbd->task_setup != current ||
  1040. test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
  1041. dev_err(disk_to_dev(nbd->disk),
  1042. "Device being setup by another task");
  1043. err = -EBUSY;
  1044. goto put_socket;
  1045. }
  1046. nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
  1047. if (!nsock) {
  1048. err = -ENOMEM;
  1049. goto put_socket;
  1050. }
  1051. socks = krealloc(config->socks, (config->num_connections + 1) *
  1052. sizeof(struct nbd_sock *), GFP_KERNEL);
  1053. if (!socks) {
  1054. kfree(nsock);
  1055. err = -ENOMEM;
  1056. goto put_socket;
  1057. }
  1058. config->socks = socks;
  1059. nsock->fallback_index = -1;
  1060. nsock->dead = false;
  1061. mutex_init(&nsock->tx_lock);
  1062. nsock->sock = sock;
  1063. nsock->pending = NULL;
  1064. nsock->sent = 0;
  1065. nsock->cookie = 0;
  1066. socks[config->num_connections++] = nsock;
  1067. atomic_inc(&config->live_connections);
  1068. blk_mq_unfreeze_queue(nbd->disk->queue);
  1069. return 0;
  1070. put_socket:
  1071. blk_mq_unfreeze_queue(nbd->disk->queue);
  1072. sockfd_put(sock);
  1073. return err;
  1074. }
  1075. static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
  1076. {
  1077. struct nbd_config *config = nbd->config;
  1078. struct socket *sock, *old;
  1079. struct recv_thread_args *args;
  1080. int i;
  1081. int err;
  1082. sock = nbd_get_socket(nbd, arg, &err);
  1083. if (!sock)
  1084. return err;
  1085. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1086. if (!args) {
  1087. sockfd_put(sock);
  1088. return -ENOMEM;
  1089. }
  1090. for (i = 0; i < config->num_connections; i++) {
  1091. struct nbd_sock *nsock = config->socks[i];
  1092. if (!nsock->dead)
  1093. continue;
  1094. mutex_lock(&nsock->tx_lock);
  1095. if (!nsock->dead) {
  1096. mutex_unlock(&nsock->tx_lock);
  1097. continue;
  1098. }
  1099. sk_set_memalloc(sock->sk);
  1100. if (nbd->tag_set.timeout)
  1101. sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
  1102. atomic_inc(&config->recv_threads);
  1103. refcount_inc(&nbd->config_refs);
  1104. old = nsock->sock;
  1105. nsock->fallback_index = -1;
  1106. nsock->sock = sock;
  1107. nsock->dead = false;
  1108. INIT_WORK(&args->work, recv_work);
  1109. args->index = i;
  1110. args->nbd = nbd;
  1111. args->nsock = nsock;
  1112. nsock->cookie++;
  1113. mutex_unlock(&nsock->tx_lock);
  1114. sockfd_put(old);
  1115. clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
  1116. /* We take the tx_mutex in an error path in the recv_work, so we
  1117. * need to queue_work outside of the tx_mutex.
  1118. */
  1119. queue_work(nbd->recv_workq, &args->work);
  1120. atomic_inc(&config->live_connections);
  1121. wake_up(&config->conn_wait);
  1122. return 0;
  1123. }
  1124. sockfd_put(sock);
  1125. kfree(args);
  1126. return -ENOSPC;
  1127. }
  1128. static void nbd_bdev_reset(struct nbd_device *nbd)
  1129. {
  1130. if (disk_openers(nbd->disk) > 1)
  1131. return;
  1132. set_capacity(nbd->disk, 0);
  1133. }
  1134. static void nbd_parse_flags(struct nbd_device *nbd)
  1135. {
  1136. if (nbd->config->flags & NBD_FLAG_READ_ONLY)
  1137. set_disk_ro(nbd->disk, true);
  1138. else
  1139. set_disk_ro(nbd->disk, false);
  1140. }
  1141. static void send_disconnects(struct nbd_device *nbd)
  1142. {
  1143. struct nbd_config *config = nbd->config;
  1144. struct nbd_request request = {
  1145. .magic = htonl(NBD_REQUEST_MAGIC),
  1146. .type = htonl(NBD_CMD_DISC),
  1147. };
  1148. struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
  1149. struct iov_iter from;
  1150. int i, ret;
  1151. for (i = 0; i < config->num_connections; i++) {
  1152. struct nbd_sock *nsock = config->socks[i];
  1153. iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
  1154. mutex_lock(&nsock->tx_lock);
  1155. ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
  1156. if (ret < 0)
  1157. dev_err(disk_to_dev(nbd->disk),
  1158. "Send disconnect failed %d\n", ret);
  1159. mutex_unlock(&nsock->tx_lock);
  1160. }
  1161. }
  1162. static int nbd_disconnect(struct nbd_device *nbd)
  1163. {
  1164. struct nbd_config *config = nbd->config;
  1165. dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
  1166. set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
  1167. set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
  1168. send_disconnects(nbd);
  1169. return 0;
  1170. }
  1171. static void nbd_clear_sock(struct nbd_device *nbd)
  1172. {
  1173. sock_shutdown(nbd);
  1174. nbd_clear_que(nbd);
  1175. nbd->task_setup = NULL;
  1176. }
  1177. static void nbd_config_put(struct nbd_device *nbd)
  1178. {
  1179. if (refcount_dec_and_mutex_lock(&nbd->config_refs,
  1180. &nbd->config_lock)) {
  1181. struct nbd_config *config = nbd->config;
  1182. nbd_dev_dbg_close(nbd);
  1183. invalidate_disk(nbd->disk);
  1184. if (nbd->config->bytesize)
  1185. kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
  1186. if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
  1187. &config->runtime_flags))
  1188. device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
  1189. nbd->pid = 0;
  1190. if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
  1191. &config->runtime_flags)) {
  1192. device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
  1193. kfree(nbd->backend);
  1194. nbd->backend = NULL;
  1195. }
  1196. nbd_clear_sock(nbd);
  1197. if (config->num_connections) {
  1198. int i;
  1199. for (i = 0; i < config->num_connections; i++) {
  1200. sockfd_put(config->socks[i]->sock);
  1201. kfree(config->socks[i]);
  1202. }
  1203. kfree(config->socks);
  1204. }
  1205. kfree(nbd->config);
  1206. nbd->config = NULL;
  1207. nbd->tag_set.timeout = 0;
  1208. mutex_unlock(&nbd->config_lock);
  1209. nbd_put(nbd);
  1210. module_put(THIS_MODULE);
  1211. }
  1212. }
  1213. static int nbd_start_device(struct nbd_device *nbd)
  1214. {
  1215. struct nbd_config *config = nbd->config;
  1216. int num_connections = config->num_connections;
  1217. int error = 0, i;
  1218. if (nbd->pid)
  1219. return -EBUSY;
  1220. if (!config->socks)
  1221. return -EINVAL;
  1222. if (num_connections > 1 &&
  1223. !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
  1224. dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
  1225. return -EINVAL;
  1226. }
  1227. blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
  1228. nbd->pid = task_pid_nr(current);
  1229. nbd_parse_flags(nbd);
  1230. error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
  1231. if (error) {
  1232. dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
  1233. return error;
  1234. }
  1235. set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
  1236. nbd_dev_dbg_init(nbd);
  1237. for (i = 0; i < num_connections; i++) {
  1238. struct recv_thread_args *args;
  1239. args = kzalloc(sizeof(*args), GFP_KERNEL);
  1240. if (!args) {
  1241. sock_shutdown(nbd);
  1242. /*
  1243. * If num_connections is m (2 < m),
  1244. * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
  1245. * But NO.(n + 1) failed. We still have n recv threads.
  1246. * So, add flush_workqueue here to prevent recv threads
  1247. * dropping the last config_refs and trying to destroy
  1248. * the workqueue from inside the workqueue.
  1249. */
  1250. if (i)
  1251. flush_workqueue(nbd->recv_workq);
  1252. return -ENOMEM;
  1253. }
  1254. sk_set_memalloc(config->socks[i]->sock->sk);
  1255. if (nbd->tag_set.timeout)
  1256. config->socks[i]->sock->sk->sk_sndtimeo =
  1257. nbd->tag_set.timeout;
  1258. atomic_inc(&config->recv_threads);
  1259. refcount_inc(&nbd->config_refs);
  1260. INIT_WORK(&args->work, recv_work);
  1261. args->nbd = nbd;
  1262. args->nsock = config->socks[i];
  1263. args->index = i;
  1264. queue_work(nbd->recv_workq, &args->work);
  1265. }
  1266. return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
  1267. }
  1268. static int nbd_start_device_ioctl(struct nbd_device *nbd)
  1269. {
  1270. struct nbd_config *config = nbd->config;
  1271. int ret;
  1272. ret = nbd_start_device(nbd);
  1273. if (ret)
  1274. return ret;
  1275. if (max_part)
  1276. set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
  1277. mutex_unlock(&nbd->config_lock);
  1278. ret = wait_event_interruptible(config->recv_wq,
  1279. atomic_read(&config->recv_threads) == 0);
  1280. if (ret) {
  1281. sock_shutdown(nbd);
  1282. nbd_clear_que(nbd);
  1283. }
  1284. flush_workqueue(nbd->recv_workq);
  1285. mutex_lock(&nbd->config_lock);
  1286. nbd_bdev_reset(nbd);
  1287. /* user requested, ignore socket errors */
  1288. if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
  1289. ret = 0;
  1290. if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
  1291. ret = -ETIMEDOUT;
  1292. return ret;
  1293. }
  1294. static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
  1295. {
  1296. nbd_clear_sock(nbd);
  1297. disk_force_media_change(nbd->disk);
  1298. nbd_bdev_reset(nbd);
  1299. if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
  1300. &nbd->config->runtime_flags))
  1301. nbd_config_put(nbd);
  1302. }
  1303. static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
  1304. {
  1305. nbd->tag_set.timeout = timeout * HZ;
  1306. if (timeout)
  1307. blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
  1308. else
  1309. blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
  1310. }
  1311. /* Must be called with config_lock held */
  1312. static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
  1313. unsigned int cmd, unsigned long arg)
  1314. {
  1315. struct nbd_config *config = nbd->config;
  1316. loff_t bytesize;
  1317. switch (cmd) {
  1318. case NBD_DISCONNECT:
  1319. return nbd_disconnect(nbd);
  1320. case NBD_CLEAR_SOCK:
  1321. nbd_clear_sock_ioctl(nbd);
  1322. return 0;
  1323. case NBD_SET_SOCK:
  1324. return nbd_add_socket(nbd, arg, false);
  1325. case NBD_SET_BLKSIZE:
  1326. return nbd_set_size(nbd, config->bytesize, arg);
  1327. case NBD_SET_SIZE:
  1328. return nbd_set_size(nbd, arg, nbd_blksize(config));
  1329. case NBD_SET_SIZE_BLOCKS:
  1330. if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
  1331. return -EINVAL;
  1332. return nbd_set_size(nbd, bytesize, nbd_blksize(config));
  1333. case NBD_SET_TIMEOUT:
  1334. nbd_set_cmd_timeout(nbd, arg);
  1335. return 0;
  1336. case NBD_SET_FLAGS:
  1337. config->flags = arg;
  1338. return 0;
  1339. case NBD_DO_IT:
  1340. return nbd_start_device_ioctl(nbd);
  1341. case NBD_CLEAR_QUE:
  1342. /*
  1343. * This is for compatibility only. The queue is always cleared
  1344. * by NBD_DO_IT or NBD_CLEAR_SOCK.
  1345. */
  1346. return 0;
  1347. case NBD_PRINT_DEBUG:
  1348. /*
  1349. * For compatibility only, we no longer keep a list of
  1350. * outstanding requests.
  1351. */
  1352. return 0;
  1353. }
  1354. return -ENOTTY;
  1355. }
  1356. static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
  1357. unsigned int cmd, unsigned long arg)
  1358. {
  1359. struct nbd_device *nbd = bdev->bd_disk->private_data;
  1360. struct nbd_config *config = nbd->config;
  1361. int error = -EINVAL;
  1362. if (!capable(CAP_SYS_ADMIN))
  1363. return -EPERM;
  1364. /* The block layer will pass back some non-nbd ioctls in case we have
  1365. * special handling for them, but we don't so just return an error.
  1366. */
  1367. if (_IOC_TYPE(cmd) != 0xab)
  1368. return -EINVAL;
  1369. mutex_lock(&nbd->config_lock);
  1370. /* Don't allow ioctl operations on a nbd device that was created with
  1371. * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
  1372. */
  1373. if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
  1374. (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
  1375. error = __nbd_ioctl(bdev, nbd, cmd, arg);
  1376. else
  1377. dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
  1378. mutex_unlock(&nbd->config_lock);
  1379. return error;
  1380. }
  1381. static int nbd_alloc_and_init_config(struct nbd_device *nbd)
  1382. {
  1383. struct nbd_config *config;
  1384. if (WARN_ON(nbd->config))
  1385. return -EINVAL;
  1386. if (!try_module_get(THIS_MODULE))
  1387. return -ENODEV;
  1388. config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
  1389. if (!config) {
  1390. module_put(THIS_MODULE);
  1391. return -ENOMEM;
  1392. }
  1393. atomic_set(&config->recv_threads, 0);
  1394. init_waitqueue_head(&config->recv_wq);
  1395. init_waitqueue_head(&config->conn_wait);
  1396. config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
  1397. atomic_set(&config->live_connections, 0);
  1398. nbd->config = config;
  1399. /*
  1400. * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
  1401. * its pair is the barrier in nbd_get_config_unlocked().
  1402. * So nbd_get_config_unlocked() won't see nbd->config as null after
  1403. * refcount_inc_not_zero() succeed.
  1404. */
  1405. smp_mb__before_atomic();
  1406. refcount_set(&nbd->config_refs, 1);
  1407. return 0;
  1408. }
  1409. static int nbd_open(struct gendisk *disk, blk_mode_t mode)
  1410. {
  1411. struct nbd_device *nbd;
  1412. struct nbd_config *config;
  1413. int ret = 0;
  1414. mutex_lock(&nbd_index_mutex);
  1415. nbd = disk->private_data;
  1416. if (!nbd) {
  1417. ret = -ENXIO;
  1418. goto out;
  1419. }
  1420. if (!refcount_inc_not_zero(&nbd->refs)) {
  1421. ret = -ENXIO;
  1422. goto out;
  1423. }
  1424. config = nbd_get_config_unlocked(nbd);
  1425. if (!config) {
  1426. mutex_lock(&nbd->config_lock);
  1427. if (refcount_inc_not_zero(&nbd->config_refs)) {
  1428. mutex_unlock(&nbd->config_lock);
  1429. goto out;
  1430. }
  1431. ret = nbd_alloc_and_init_config(nbd);
  1432. if (ret) {
  1433. mutex_unlock(&nbd->config_lock);
  1434. goto out;
  1435. }
  1436. refcount_inc(&nbd->refs);
  1437. mutex_unlock(&nbd->config_lock);
  1438. if (max_part)
  1439. set_bit(GD_NEED_PART_SCAN, &disk->state);
  1440. } else if (nbd_disconnected(config)) {
  1441. if (max_part)
  1442. set_bit(GD_NEED_PART_SCAN, &disk->state);
  1443. }
  1444. out:
  1445. mutex_unlock(&nbd_index_mutex);
  1446. return ret;
  1447. }
  1448. static void nbd_release(struct gendisk *disk)
  1449. {
  1450. struct nbd_device *nbd = disk->private_data;
  1451. if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
  1452. disk_openers(disk) == 0)
  1453. nbd_disconnect_and_put(nbd);
  1454. nbd_config_put(nbd);
  1455. nbd_put(nbd);
  1456. }
  1457. static void nbd_free_disk(struct gendisk *disk)
  1458. {
  1459. struct nbd_device *nbd = disk->private_data;
  1460. kfree(nbd);
  1461. }
  1462. static const struct block_device_operations nbd_fops =
  1463. {
  1464. .owner = THIS_MODULE,
  1465. .open = nbd_open,
  1466. .release = nbd_release,
  1467. .ioctl = nbd_ioctl,
  1468. .compat_ioctl = nbd_ioctl,
  1469. .free_disk = nbd_free_disk,
  1470. };
  1471. #if IS_ENABLED(CONFIG_DEBUG_FS)
  1472. static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
  1473. {
  1474. struct nbd_device *nbd = s->private;
  1475. if (nbd->pid)
  1476. seq_printf(s, "recv: %d\n", nbd->pid);
  1477. return 0;
  1478. }
  1479. DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
  1480. static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
  1481. {
  1482. struct nbd_device *nbd = s->private;
  1483. u32 flags = nbd->config->flags;
  1484. seq_printf(s, "Hex: 0x%08x\n\n", flags);
  1485. seq_puts(s, "Known flags:\n");
  1486. if (flags & NBD_FLAG_HAS_FLAGS)
  1487. seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
  1488. if (flags & NBD_FLAG_READ_ONLY)
  1489. seq_puts(s, "NBD_FLAG_READ_ONLY\n");
  1490. if (flags & NBD_FLAG_SEND_FLUSH)
  1491. seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
  1492. if (flags & NBD_FLAG_SEND_FUA)
  1493. seq_puts(s, "NBD_FLAG_SEND_FUA\n");
  1494. if (flags & NBD_FLAG_SEND_TRIM)
  1495. seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
  1496. if (flags & NBD_FLAG_SEND_WRITE_ZEROES)
  1497. seq_puts(s, "NBD_FLAG_SEND_WRITE_ZEROES\n");
  1498. if (flags & NBD_FLAG_ROTATIONAL)
  1499. seq_puts(s, "NBD_FLAG_ROTATIONAL\n");
  1500. return 0;
  1501. }
  1502. DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
  1503. static int nbd_dev_dbg_init(struct nbd_device *nbd)
  1504. {
  1505. struct dentry *dir;
  1506. struct nbd_config *config = nbd->config;
  1507. if (!nbd_dbg_dir)
  1508. return -EIO;
  1509. dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
  1510. if (IS_ERR(dir)) {
  1511. dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
  1512. nbd_name(nbd));
  1513. return -EIO;
  1514. }
  1515. config->dbg_dir = dir;
  1516. debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
  1517. debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
  1518. debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
  1519. debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
  1520. debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
  1521. return 0;
  1522. }
  1523. static void nbd_dev_dbg_close(struct nbd_device *nbd)
  1524. {
  1525. debugfs_remove_recursive(nbd->config->dbg_dir);
  1526. }
  1527. static int nbd_dbg_init(void)
  1528. {
  1529. struct dentry *dbg_dir;
  1530. dbg_dir = debugfs_create_dir("nbd", NULL);
  1531. if (IS_ERR(dbg_dir))
  1532. return -EIO;
  1533. nbd_dbg_dir = dbg_dir;
  1534. return 0;
  1535. }
  1536. static void nbd_dbg_close(void)
  1537. {
  1538. debugfs_remove_recursive(nbd_dbg_dir);
  1539. }
  1540. #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
  1541. static int nbd_dev_dbg_init(struct nbd_device *nbd)
  1542. {
  1543. return 0;
  1544. }
  1545. static void nbd_dev_dbg_close(struct nbd_device *nbd)
  1546. {
  1547. }
  1548. static int nbd_dbg_init(void)
  1549. {
  1550. return 0;
  1551. }
  1552. static void nbd_dbg_close(void)
  1553. {
  1554. }
  1555. #endif
  1556. static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
  1557. unsigned int hctx_idx, unsigned int numa_node)
  1558. {
  1559. struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
  1560. cmd->nbd = set->driver_data;
  1561. cmd->flags = 0;
  1562. mutex_init(&cmd->lock);
  1563. return 0;
  1564. }
  1565. static const struct blk_mq_ops nbd_mq_ops = {
  1566. .queue_rq = nbd_queue_rq,
  1567. .complete = nbd_complete_rq,
  1568. .init_request = nbd_init_request,
  1569. .timeout = nbd_xmit_timeout,
  1570. };
  1571. static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
  1572. {
  1573. struct queue_limits lim = {
  1574. .max_hw_sectors = 65536,
  1575. .io_opt = 256 << SECTOR_SHIFT,
  1576. .max_segments = USHRT_MAX,
  1577. .max_segment_size = UINT_MAX,
  1578. };
  1579. struct nbd_device *nbd;
  1580. struct gendisk *disk;
  1581. int err = -ENOMEM;
  1582. nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
  1583. if (!nbd)
  1584. goto out;
  1585. nbd->tag_set.ops = &nbd_mq_ops;
  1586. nbd->tag_set.nr_hw_queues = 1;
  1587. nbd->tag_set.queue_depth = 128;
  1588. nbd->tag_set.numa_node = NUMA_NO_NODE;
  1589. nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
  1590. nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
  1591. BLK_MQ_F_BLOCKING;
  1592. nbd->tag_set.driver_data = nbd;
  1593. INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
  1594. nbd->backend = NULL;
  1595. err = blk_mq_alloc_tag_set(&nbd->tag_set);
  1596. if (err)
  1597. goto out_free_nbd;
  1598. mutex_lock(&nbd_index_mutex);
  1599. if (index >= 0) {
  1600. err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
  1601. GFP_KERNEL);
  1602. if (err == -ENOSPC)
  1603. err = -EEXIST;
  1604. } else {
  1605. err = idr_alloc(&nbd_index_idr, nbd, 0,
  1606. (MINORMASK >> part_shift) + 1, GFP_KERNEL);
  1607. if (err >= 0)
  1608. index = err;
  1609. }
  1610. nbd->index = index;
  1611. mutex_unlock(&nbd_index_mutex);
  1612. if (err < 0)
  1613. goto out_free_tags;
  1614. disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
  1615. if (IS_ERR(disk)) {
  1616. err = PTR_ERR(disk);
  1617. goto out_free_idr;
  1618. }
  1619. nbd->disk = disk;
  1620. nbd->recv_workq = alloc_workqueue("nbd%d-recv",
  1621. WQ_MEM_RECLAIM | WQ_HIGHPRI |
  1622. WQ_UNBOUND, 0, nbd->index);
  1623. if (!nbd->recv_workq) {
  1624. dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
  1625. err = -ENOMEM;
  1626. goto out_err_disk;
  1627. }
  1628. mutex_init(&nbd->config_lock);
  1629. refcount_set(&nbd->config_refs, 0);
  1630. /*
  1631. * Start out with a zero references to keep other threads from using
  1632. * this device until it is fully initialized.
  1633. */
  1634. refcount_set(&nbd->refs, 0);
  1635. INIT_LIST_HEAD(&nbd->list);
  1636. disk->major = NBD_MAJOR;
  1637. disk->first_minor = index << part_shift;
  1638. disk->minors = 1 << part_shift;
  1639. disk->fops = &nbd_fops;
  1640. disk->private_data = nbd;
  1641. sprintf(disk->disk_name, "nbd%d", index);
  1642. err = add_disk(disk);
  1643. if (err)
  1644. goto out_free_work;
  1645. /*
  1646. * Now publish the device.
  1647. */
  1648. refcount_set(&nbd->refs, refs);
  1649. nbd_total_devices++;
  1650. return nbd;
  1651. out_free_work:
  1652. destroy_workqueue(nbd->recv_workq);
  1653. out_err_disk:
  1654. put_disk(disk);
  1655. out_free_idr:
  1656. mutex_lock(&nbd_index_mutex);
  1657. idr_remove(&nbd_index_idr, index);
  1658. mutex_unlock(&nbd_index_mutex);
  1659. out_free_tags:
  1660. blk_mq_free_tag_set(&nbd->tag_set);
  1661. out_free_nbd:
  1662. kfree(nbd);
  1663. out:
  1664. return ERR_PTR(err);
  1665. }
  1666. static struct nbd_device *nbd_find_get_unused(void)
  1667. {
  1668. struct nbd_device *nbd;
  1669. int id;
  1670. lockdep_assert_held(&nbd_index_mutex);
  1671. idr_for_each_entry(&nbd_index_idr, nbd, id) {
  1672. if (refcount_read(&nbd->config_refs) ||
  1673. test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
  1674. continue;
  1675. if (refcount_inc_not_zero(&nbd->refs))
  1676. return nbd;
  1677. }
  1678. return NULL;
  1679. }
  1680. /* Netlink interface. */
  1681. static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
  1682. [NBD_ATTR_INDEX] = { .type = NLA_U32 },
  1683. [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
  1684. [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
  1685. [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
  1686. [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
  1687. [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
  1688. [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
  1689. [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
  1690. [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
  1691. [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
  1692. };
  1693. static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
  1694. [NBD_SOCK_FD] = { .type = NLA_U32 },
  1695. };
  1696. /* We don't use this right now since we don't parse the incoming list, but we
  1697. * still want it here so userspace knows what to expect.
  1698. */
  1699. static const struct nla_policy __attribute__((unused))
  1700. nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
  1701. [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
  1702. [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
  1703. };
  1704. static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
  1705. {
  1706. struct nbd_config *config = nbd->config;
  1707. u64 bsize = nbd_blksize(config);
  1708. u64 bytes = config->bytesize;
  1709. if (info->attrs[NBD_ATTR_SIZE_BYTES])
  1710. bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
  1711. if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
  1712. bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
  1713. if (bytes != config->bytesize || bsize != nbd_blksize(config))
  1714. return nbd_set_size(nbd, bytes, bsize);
  1715. return 0;
  1716. }
  1717. static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
  1718. {
  1719. struct nbd_device *nbd;
  1720. struct nbd_config *config;
  1721. int index = -1;
  1722. int ret;
  1723. bool put_dev = false;
  1724. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  1725. return -EPERM;
  1726. if (info->attrs[NBD_ATTR_INDEX]) {
  1727. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  1728. /*
  1729. * Too big first_minor can cause duplicate creation of
  1730. * sysfs files/links, since index << part_shift might overflow, or
  1731. * MKDEV() expect that the max bits of first_minor is 20.
  1732. */
  1733. if (index < 0 || index > MINORMASK >> part_shift) {
  1734. pr_err("illegal input index %d\n", index);
  1735. return -EINVAL;
  1736. }
  1737. }
  1738. if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
  1739. pr_err("must specify at least one socket\n");
  1740. return -EINVAL;
  1741. }
  1742. if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
  1743. pr_err("must specify a size in bytes for the device\n");
  1744. return -EINVAL;
  1745. }
  1746. again:
  1747. mutex_lock(&nbd_index_mutex);
  1748. if (index == -1) {
  1749. nbd = nbd_find_get_unused();
  1750. } else {
  1751. nbd = idr_find(&nbd_index_idr, index);
  1752. if (nbd) {
  1753. if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
  1754. test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
  1755. !refcount_inc_not_zero(&nbd->refs)) {
  1756. mutex_unlock(&nbd_index_mutex);
  1757. pr_err("device at index %d is going down\n",
  1758. index);
  1759. return -EINVAL;
  1760. }
  1761. }
  1762. }
  1763. mutex_unlock(&nbd_index_mutex);
  1764. if (!nbd) {
  1765. nbd = nbd_dev_add(index, 2);
  1766. if (IS_ERR(nbd)) {
  1767. pr_err("failed to add new device\n");
  1768. return PTR_ERR(nbd);
  1769. }
  1770. }
  1771. mutex_lock(&nbd->config_lock);
  1772. if (refcount_read(&nbd->config_refs)) {
  1773. mutex_unlock(&nbd->config_lock);
  1774. nbd_put(nbd);
  1775. if (index == -1)
  1776. goto again;
  1777. pr_err("nbd%d already in use\n", index);
  1778. return -EBUSY;
  1779. }
  1780. ret = nbd_alloc_and_init_config(nbd);
  1781. if (ret) {
  1782. mutex_unlock(&nbd->config_lock);
  1783. nbd_put(nbd);
  1784. pr_err("couldn't allocate config\n");
  1785. return ret;
  1786. }
  1787. config = nbd->config;
  1788. set_bit(NBD_RT_BOUND, &config->runtime_flags);
  1789. ret = nbd_genl_size_set(info, nbd);
  1790. if (ret)
  1791. goto out;
  1792. if (info->attrs[NBD_ATTR_TIMEOUT])
  1793. nbd_set_cmd_timeout(nbd,
  1794. nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
  1795. if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
  1796. config->dead_conn_timeout =
  1797. nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
  1798. config->dead_conn_timeout *= HZ;
  1799. }
  1800. if (info->attrs[NBD_ATTR_SERVER_FLAGS])
  1801. config->flags =
  1802. nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
  1803. if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
  1804. u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
  1805. if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
  1806. /*
  1807. * We have 1 ref to keep the device around, and then 1
  1808. * ref for our current operation here, which will be
  1809. * inherited by the config. If we already have
  1810. * DESTROY_ON_DISCONNECT set then we know we don't have
  1811. * that extra ref already held so we don't need the
  1812. * put_dev.
  1813. */
  1814. if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
  1815. &nbd->flags))
  1816. put_dev = true;
  1817. } else {
  1818. if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
  1819. &nbd->flags))
  1820. refcount_inc(&nbd->refs);
  1821. }
  1822. if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
  1823. set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
  1824. &config->runtime_flags);
  1825. }
  1826. }
  1827. if (info->attrs[NBD_ATTR_SOCKETS]) {
  1828. struct nlattr *attr;
  1829. int rem, fd;
  1830. nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
  1831. rem) {
  1832. struct nlattr *socks[NBD_SOCK_MAX+1];
  1833. if (nla_type(attr) != NBD_SOCK_ITEM) {
  1834. pr_err("socks must be embedded in a SOCK_ITEM attr\n");
  1835. ret = -EINVAL;
  1836. goto out;
  1837. }
  1838. ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
  1839. attr,
  1840. nbd_sock_policy,
  1841. info->extack);
  1842. if (ret != 0) {
  1843. pr_err("error processing sock list\n");
  1844. ret = -EINVAL;
  1845. goto out;
  1846. }
  1847. if (!socks[NBD_SOCK_FD])
  1848. continue;
  1849. fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
  1850. ret = nbd_add_socket(nbd, fd, true);
  1851. if (ret)
  1852. goto out;
  1853. }
  1854. }
  1855. ret = nbd_start_device(nbd);
  1856. if (ret)
  1857. goto out;
  1858. if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
  1859. nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
  1860. GFP_KERNEL);
  1861. if (!nbd->backend) {
  1862. ret = -ENOMEM;
  1863. goto out;
  1864. }
  1865. }
  1866. ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
  1867. if (ret) {
  1868. dev_err(disk_to_dev(nbd->disk),
  1869. "device_create_file failed for backend!\n");
  1870. goto out;
  1871. }
  1872. set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
  1873. out:
  1874. mutex_unlock(&nbd->config_lock);
  1875. if (!ret) {
  1876. set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
  1877. refcount_inc(&nbd->config_refs);
  1878. nbd_connect_reply(info, nbd->index);
  1879. }
  1880. nbd_config_put(nbd);
  1881. if (put_dev)
  1882. nbd_put(nbd);
  1883. return ret;
  1884. }
  1885. static void nbd_disconnect_and_put(struct nbd_device *nbd)
  1886. {
  1887. mutex_lock(&nbd->config_lock);
  1888. nbd_disconnect(nbd);
  1889. sock_shutdown(nbd);
  1890. wake_up(&nbd->config->conn_wait);
  1891. /*
  1892. * Make sure recv thread has finished, we can safely call nbd_clear_que()
  1893. * to cancel the inflight I/Os.
  1894. */
  1895. flush_workqueue(nbd->recv_workq);
  1896. nbd_clear_que(nbd);
  1897. nbd->task_setup = NULL;
  1898. clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
  1899. mutex_unlock(&nbd->config_lock);
  1900. if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
  1901. &nbd->config->runtime_flags))
  1902. nbd_config_put(nbd);
  1903. }
  1904. static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
  1905. {
  1906. struct nbd_device *nbd;
  1907. int index;
  1908. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  1909. return -EPERM;
  1910. if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
  1911. pr_err("must specify an index to disconnect\n");
  1912. return -EINVAL;
  1913. }
  1914. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  1915. mutex_lock(&nbd_index_mutex);
  1916. nbd = idr_find(&nbd_index_idr, index);
  1917. if (!nbd) {
  1918. mutex_unlock(&nbd_index_mutex);
  1919. pr_err("couldn't find device at index %d\n", index);
  1920. return -EINVAL;
  1921. }
  1922. if (!refcount_inc_not_zero(&nbd->refs)) {
  1923. mutex_unlock(&nbd_index_mutex);
  1924. pr_err("device at index %d is going down\n", index);
  1925. return -EINVAL;
  1926. }
  1927. mutex_unlock(&nbd_index_mutex);
  1928. if (!refcount_inc_not_zero(&nbd->config_refs))
  1929. goto put_nbd;
  1930. nbd_disconnect_and_put(nbd);
  1931. nbd_config_put(nbd);
  1932. put_nbd:
  1933. nbd_put(nbd);
  1934. return 0;
  1935. }
  1936. static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
  1937. {
  1938. struct nbd_device *nbd = NULL;
  1939. struct nbd_config *config;
  1940. int index;
  1941. int ret = 0;
  1942. bool put_dev = false;
  1943. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  1944. return -EPERM;
  1945. if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
  1946. pr_err("must specify a device to reconfigure\n");
  1947. return -EINVAL;
  1948. }
  1949. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  1950. mutex_lock(&nbd_index_mutex);
  1951. nbd = idr_find(&nbd_index_idr, index);
  1952. if (!nbd) {
  1953. mutex_unlock(&nbd_index_mutex);
  1954. pr_err("couldn't find a device at index %d\n", index);
  1955. return -EINVAL;
  1956. }
  1957. if (nbd->backend) {
  1958. if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
  1959. if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
  1960. nbd->backend)) {
  1961. mutex_unlock(&nbd_index_mutex);
  1962. dev_err(nbd_to_dev(nbd),
  1963. "backend image doesn't match with %s\n",
  1964. nbd->backend);
  1965. return -EINVAL;
  1966. }
  1967. } else {
  1968. mutex_unlock(&nbd_index_mutex);
  1969. dev_err(nbd_to_dev(nbd), "must specify backend\n");
  1970. return -EINVAL;
  1971. }
  1972. }
  1973. if (!refcount_inc_not_zero(&nbd->refs)) {
  1974. mutex_unlock(&nbd_index_mutex);
  1975. pr_err("device at index %d is going down\n", index);
  1976. return -EINVAL;
  1977. }
  1978. mutex_unlock(&nbd_index_mutex);
  1979. config = nbd_get_config_unlocked(nbd);
  1980. if (!config) {
  1981. dev_err(nbd_to_dev(nbd),
  1982. "not configured, cannot reconfigure\n");
  1983. nbd_put(nbd);
  1984. return -EINVAL;
  1985. }
  1986. mutex_lock(&nbd->config_lock);
  1987. if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
  1988. !nbd->pid) {
  1989. dev_err(nbd_to_dev(nbd),
  1990. "not configured, cannot reconfigure\n");
  1991. ret = -EINVAL;
  1992. goto out;
  1993. }
  1994. ret = nbd_genl_size_set(info, nbd);
  1995. if (ret)
  1996. goto out;
  1997. if (info->attrs[NBD_ATTR_TIMEOUT])
  1998. nbd_set_cmd_timeout(nbd,
  1999. nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
  2000. if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
  2001. config->dead_conn_timeout =
  2002. nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
  2003. config->dead_conn_timeout *= HZ;
  2004. }
  2005. if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
  2006. u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
  2007. if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
  2008. if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
  2009. &nbd->flags))
  2010. put_dev = true;
  2011. } else {
  2012. if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
  2013. &nbd->flags))
  2014. refcount_inc(&nbd->refs);
  2015. }
  2016. if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
  2017. set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
  2018. &config->runtime_flags);
  2019. } else {
  2020. clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
  2021. &config->runtime_flags);
  2022. }
  2023. }
  2024. if (info->attrs[NBD_ATTR_SOCKETS]) {
  2025. struct nlattr *attr;
  2026. int rem, fd;
  2027. nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
  2028. rem) {
  2029. struct nlattr *socks[NBD_SOCK_MAX+1];
  2030. if (nla_type(attr) != NBD_SOCK_ITEM) {
  2031. pr_err("socks must be embedded in a SOCK_ITEM attr\n");
  2032. ret = -EINVAL;
  2033. goto out;
  2034. }
  2035. ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
  2036. attr,
  2037. nbd_sock_policy,
  2038. info->extack);
  2039. if (ret != 0) {
  2040. pr_err("error processing sock list\n");
  2041. ret = -EINVAL;
  2042. goto out;
  2043. }
  2044. if (!socks[NBD_SOCK_FD])
  2045. continue;
  2046. fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
  2047. ret = nbd_reconnect_socket(nbd, fd);
  2048. if (ret) {
  2049. if (ret == -ENOSPC)
  2050. ret = 0;
  2051. goto out;
  2052. }
  2053. dev_info(nbd_to_dev(nbd), "reconnected socket\n");
  2054. }
  2055. }
  2056. out:
  2057. mutex_unlock(&nbd->config_lock);
  2058. nbd_config_put(nbd);
  2059. nbd_put(nbd);
  2060. if (put_dev)
  2061. nbd_put(nbd);
  2062. return ret;
  2063. }
  2064. static const struct genl_small_ops nbd_connect_genl_ops[] = {
  2065. {
  2066. .cmd = NBD_CMD_CONNECT,
  2067. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2068. .doit = nbd_genl_connect,
  2069. },
  2070. {
  2071. .cmd = NBD_CMD_DISCONNECT,
  2072. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2073. .doit = nbd_genl_disconnect,
  2074. },
  2075. {
  2076. .cmd = NBD_CMD_RECONFIGURE,
  2077. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2078. .doit = nbd_genl_reconfigure,
  2079. },
  2080. {
  2081. .cmd = NBD_CMD_STATUS,
  2082. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  2083. .doit = nbd_genl_status,
  2084. },
  2085. };
  2086. static const struct genl_multicast_group nbd_mcast_grps[] = {
  2087. { .name = NBD_GENL_MCAST_GROUP_NAME, },
  2088. };
  2089. static struct genl_family nbd_genl_family __ro_after_init = {
  2090. .hdrsize = 0,
  2091. .name = NBD_GENL_FAMILY_NAME,
  2092. .version = NBD_GENL_VERSION,
  2093. .module = THIS_MODULE,
  2094. .small_ops = nbd_connect_genl_ops,
  2095. .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
  2096. .resv_start_op = NBD_CMD_STATUS + 1,
  2097. .maxattr = NBD_ATTR_MAX,
  2098. .netnsok = 1,
  2099. .policy = nbd_attr_policy,
  2100. .mcgrps = nbd_mcast_grps,
  2101. .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
  2102. };
  2103. MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
  2104. static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
  2105. {
  2106. struct nlattr *dev_opt;
  2107. u8 connected = 0;
  2108. int ret;
  2109. /* This is a little racey, but for status it's ok. The
  2110. * reason we don't take a ref here is because we can't
  2111. * take a ref in the index == -1 case as we would need
  2112. * to put under the nbd_index_mutex, which could
  2113. * deadlock if we are configured to remove ourselves
  2114. * once we're disconnected.
  2115. */
  2116. if (refcount_read(&nbd->config_refs))
  2117. connected = 1;
  2118. dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
  2119. if (!dev_opt)
  2120. return -EMSGSIZE;
  2121. ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
  2122. if (ret)
  2123. return -EMSGSIZE;
  2124. ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
  2125. connected);
  2126. if (ret)
  2127. return -EMSGSIZE;
  2128. nla_nest_end(reply, dev_opt);
  2129. return 0;
  2130. }
  2131. static int status_cb(int id, void *ptr, void *data)
  2132. {
  2133. struct nbd_device *nbd = ptr;
  2134. return populate_nbd_status(nbd, (struct sk_buff *)data);
  2135. }
  2136. static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
  2137. {
  2138. struct nlattr *dev_list;
  2139. struct sk_buff *reply;
  2140. void *reply_head;
  2141. size_t msg_size;
  2142. int index = -1;
  2143. int ret = -ENOMEM;
  2144. if (info->attrs[NBD_ATTR_INDEX])
  2145. index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
  2146. mutex_lock(&nbd_index_mutex);
  2147. msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
  2148. nla_attr_size(sizeof(u8)));
  2149. msg_size *= (index == -1) ? nbd_total_devices : 1;
  2150. reply = genlmsg_new(msg_size, GFP_KERNEL);
  2151. if (!reply)
  2152. goto out;
  2153. reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
  2154. NBD_CMD_STATUS);
  2155. if (!reply_head) {
  2156. nlmsg_free(reply);
  2157. goto out;
  2158. }
  2159. dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
  2160. if (!dev_list) {
  2161. nlmsg_free(reply);
  2162. ret = -EMSGSIZE;
  2163. goto out;
  2164. }
  2165. if (index == -1) {
  2166. ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
  2167. if (ret) {
  2168. nlmsg_free(reply);
  2169. goto out;
  2170. }
  2171. } else {
  2172. struct nbd_device *nbd;
  2173. nbd = idr_find(&nbd_index_idr, index);
  2174. if (nbd) {
  2175. ret = populate_nbd_status(nbd, reply);
  2176. if (ret) {
  2177. nlmsg_free(reply);
  2178. goto out;
  2179. }
  2180. }
  2181. }
  2182. nla_nest_end(reply, dev_list);
  2183. genlmsg_end(reply, reply_head);
  2184. ret = genlmsg_reply(reply, info);
  2185. out:
  2186. mutex_unlock(&nbd_index_mutex);
  2187. return ret;
  2188. }
  2189. static void nbd_connect_reply(struct genl_info *info, int index)
  2190. {
  2191. struct sk_buff *skb;
  2192. void *msg_head;
  2193. int ret;
  2194. skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
  2195. if (!skb)
  2196. return;
  2197. msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
  2198. NBD_CMD_CONNECT);
  2199. if (!msg_head) {
  2200. nlmsg_free(skb);
  2201. return;
  2202. }
  2203. ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
  2204. if (ret) {
  2205. nlmsg_free(skb);
  2206. return;
  2207. }
  2208. genlmsg_end(skb, msg_head);
  2209. genlmsg_reply(skb, info);
  2210. }
  2211. static void nbd_mcast_index(int index)
  2212. {
  2213. struct sk_buff *skb;
  2214. void *msg_head;
  2215. int ret;
  2216. skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
  2217. if (!skb)
  2218. return;
  2219. msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
  2220. NBD_CMD_LINK_DEAD);
  2221. if (!msg_head) {
  2222. nlmsg_free(skb);
  2223. return;
  2224. }
  2225. ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
  2226. if (ret) {
  2227. nlmsg_free(skb);
  2228. return;
  2229. }
  2230. genlmsg_end(skb, msg_head);
  2231. genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
  2232. }
  2233. static void nbd_dead_link_work(struct work_struct *work)
  2234. {
  2235. struct link_dead_args *args = container_of(work, struct link_dead_args,
  2236. work);
  2237. nbd_mcast_index(args->index);
  2238. kfree(args);
  2239. }
  2240. static int __init nbd_init(void)
  2241. {
  2242. int i;
  2243. BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
  2244. if (max_part < 0) {
  2245. pr_err("max_part must be >= 0\n");
  2246. return -EINVAL;
  2247. }
  2248. part_shift = 0;
  2249. if (max_part > 0) {
  2250. part_shift = fls(max_part);
  2251. /*
  2252. * Adjust max_part according to part_shift as it is exported
  2253. * to user space so that user can know the max number of
  2254. * partition kernel should be able to manage.
  2255. *
  2256. * Note that -1 is required because partition 0 is reserved
  2257. * for the whole disk.
  2258. */
  2259. max_part = (1UL << part_shift) - 1;
  2260. }
  2261. if ((1UL << part_shift) > DISK_MAX_PARTS)
  2262. return -EINVAL;
  2263. if (nbds_max > 1UL << (MINORBITS - part_shift))
  2264. return -EINVAL;
  2265. if (register_blkdev(NBD_MAJOR, "nbd"))
  2266. return -EIO;
  2267. nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
  2268. if (!nbd_del_wq) {
  2269. unregister_blkdev(NBD_MAJOR, "nbd");
  2270. return -ENOMEM;
  2271. }
  2272. if (genl_register_family(&nbd_genl_family)) {
  2273. destroy_workqueue(nbd_del_wq);
  2274. unregister_blkdev(NBD_MAJOR, "nbd");
  2275. return -EINVAL;
  2276. }
  2277. nbd_dbg_init();
  2278. for (i = 0; i < nbds_max; i++)
  2279. nbd_dev_add(i, 1);
  2280. return 0;
  2281. }
  2282. static int nbd_exit_cb(int id, void *ptr, void *data)
  2283. {
  2284. struct list_head *list = (struct list_head *)data;
  2285. struct nbd_device *nbd = ptr;
  2286. /* Skip nbd that is being removed asynchronously */
  2287. if (refcount_read(&nbd->refs))
  2288. list_add_tail(&nbd->list, list);
  2289. return 0;
  2290. }
  2291. static void __exit nbd_cleanup(void)
  2292. {
  2293. struct nbd_device *nbd;
  2294. LIST_HEAD(del_list);
  2295. /*
  2296. * Unregister netlink interface prior to waiting
  2297. * for the completion of netlink commands.
  2298. */
  2299. genl_unregister_family(&nbd_genl_family);
  2300. nbd_dbg_close();
  2301. mutex_lock(&nbd_index_mutex);
  2302. idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
  2303. mutex_unlock(&nbd_index_mutex);
  2304. while (!list_empty(&del_list)) {
  2305. nbd = list_first_entry(&del_list, struct nbd_device, list);
  2306. list_del_init(&nbd->list);
  2307. if (refcount_read(&nbd->config_refs))
  2308. pr_err("possibly leaking nbd_config (ref %d)\n",
  2309. refcount_read(&nbd->config_refs));
  2310. if (refcount_read(&nbd->refs) != 1)
  2311. pr_err("possibly leaking a device\n");
  2312. nbd_put(nbd);
  2313. }
  2314. /* Also wait for nbd_dev_remove_work() completes */
  2315. destroy_workqueue(nbd_del_wq);
  2316. idr_destroy(&nbd_index_idr);
  2317. unregister_blkdev(NBD_MAJOR, "nbd");
  2318. }
  2319. module_init(nbd_init);
  2320. module_exit(nbd_cleanup);
  2321. MODULE_DESCRIPTION("Network Block Device");
  2322. MODULE_LICENSE("GPL");
  2323. module_param(nbds_max, int, 0444);
  2324. MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
  2325. module_param(max_part, int, 0444);
  2326. MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");