xenbus_dev_frontend.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. /*
  2. * Driver giving user-space access to the kernel's xenbus connection
  3. * to xenstore.
  4. *
  5. * Copyright (c) 2005, Christian Limpach
  6. * Copyright (c) 2005, Rusty Russell, IBM Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version 2
  10. * as published by the Free Software Foundation; or, when distributed
  11. * separately from the Linux kernel or incorporated into other
  12. * software packages, subject to the following license:
  13. *
  14. * Permission is hereby granted, free of charge, to any person obtaining a copy
  15. * of this source file (the "Software"), to deal in the Software without
  16. * restriction, including without limitation the rights to use, copy, modify,
  17. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  18. * and to permit persons to whom the Software is furnished to do so, subject to
  19. * the following conditions:
  20. *
  21. * The above copyright notice and this permission notice shall be included in
  22. * all copies or substantial portions of the Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  27. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  29. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  30. * IN THE SOFTWARE.
  31. *
  32. * Changes:
  33. * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
  34. * and /proc/xen compatibility mount point.
  35. * Turned xenfs into a loadable module.
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/kernel.h>
  39. #include <linux/errno.h>
  40. #include <linux/uio.h>
  41. #include <linux/notifier.h>
  42. #include <linux/wait.h>
  43. #include <linux/fs.h>
  44. #include <linux/poll.h>
  45. #include <linux/mutex.h>
  46. #include <linux/sched.h>
  47. #include <linux/spinlock.h>
  48. #include <linux/mount.h>
  49. #include <linux/pagemap.h>
  50. #include <linux/uaccess.h>
  51. #include <linux/init.h>
  52. #include <linux/namei.h>
  53. #include <linux/string.h>
  54. #include <linux/slab.h>
  55. #include <linux/miscdevice.h>
  56. #include <linux/workqueue.h>
  57. #include <xen/xenbus.h>
  58. #include <xen/xen.h>
  59. #include <asm/xen/hypervisor.h>
  60. #include "xenbus.h"
  61. unsigned int xb_dev_generation_id;
  62. /*
  63. * An element of a list of outstanding transactions, for which we're
  64. * still waiting a reply.
  65. */
  66. struct xenbus_transaction_holder {
  67. struct list_head list;
  68. struct xenbus_transaction handle;
  69. unsigned int generation_id;
  70. };
  71. /*
  72. * A buffer of data on the queue.
  73. */
  74. struct read_buffer {
  75. struct list_head list;
  76. unsigned int cons;
  77. unsigned int len;
  78. char msg[];
  79. };
  80. struct xenbus_file_priv {
  81. /*
  82. * msgbuffer_mutex is held while partial requests are built up
  83. * and complete requests are acted on. It therefore protects
  84. * the "transactions" and "watches" lists, and the partial
  85. * request length and buffer.
  86. *
  87. * reply_mutex protects the reply being built up to return to
  88. * usermode. It nests inside msgbuffer_mutex but may be held
  89. * alone during a watch callback.
  90. */
  91. struct mutex msgbuffer_mutex;
  92. /* In-progress transactions */
  93. struct list_head transactions;
  94. /* Active watches. */
  95. struct list_head watches;
  96. /* Partial request. */
  97. unsigned int len;
  98. union {
  99. struct xsd_sockmsg msg;
  100. char buffer[XENSTORE_PAYLOAD_MAX];
  101. } u;
  102. /* Response queue. */
  103. struct mutex reply_mutex;
  104. struct list_head read_buffers;
  105. wait_queue_head_t read_waitq;
  106. struct kref kref;
  107. struct work_struct wq;
  108. };
  109. /* Read out any raw xenbus messages queued up. */
  110. static ssize_t xenbus_file_read(struct file *filp,
  111. char __user *ubuf,
  112. size_t len, loff_t *ppos)
  113. {
  114. struct xenbus_file_priv *u = filp->private_data;
  115. struct read_buffer *rb;
  116. unsigned i;
  117. int ret;
  118. mutex_lock(&u->reply_mutex);
  119. again:
  120. while (list_empty(&u->read_buffers)) {
  121. mutex_unlock(&u->reply_mutex);
  122. if (filp->f_flags & O_NONBLOCK)
  123. return -EAGAIN;
  124. ret = wait_event_interruptible(u->read_waitq,
  125. !list_empty(&u->read_buffers));
  126. if (ret)
  127. return ret;
  128. mutex_lock(&u->reply_mutex);
  129. }
  130. rb = list_entry(u->read_buffers.next, struct read_buffer, list);
  131. i = 0;
  132. while (i < len) {
  133. unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
  134. ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
  135. i += sz - ret;
  136. rb->cons += sz - ret;
  137. if (ret != 0) {
  138. if (i == 0)
  139. i = -EFAULT;
  140. goto out;
  141. }
  142. /* Clear out buffer if it has been consumed */
  143. if (rb->cons == rb->len) {
  144. list_del(&rb->list);
  145. kfree(rb);
  146. if (list_empty(&u->read_buffers))
  147. break;
  148. rb = list_entry(u->read_buffers.next,
  149. struct read_buffer, list);
  150. }
  151. }
  152. if (i == 0)
  153. goto again;
  154. out:
  155. mutex_unlock(&u->reply_mutex);
  156. return i;
  157. }
  158. /*
  159. * Add a buffer to the queue. Caller must hold the appropriate lock
  160. * if the queue is not local. (Commonly the caller will build up
  161. * multiple queued buffers on a temporary local list, and then add it
  162. * to the appropriate list under lock once all the buffers have een
  163. * successfully allocated.)
  164. */
  165. static int queue_reply(struct list_head *queue, const void *data, size_t len)
  166. {
  167. struct read_buffer *rb;
  168. if (len == 0)
  169. return 0;
  170. if (len > XENSTORE_PAYLOAD_MAX)
  171. return -EINVAL;
  172. rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
  173. if (rb == NULL)
  174. return -ENOMEM;
  175. rb->cons = 0;
  176. rb->len = len;
  177. memcpy(rb->msg, data, len);
  178. list_add_tail(&rb->list, queue);
  179. return 0;
  180. }
  181. /*
  182. * Free all the read_buffer s on a list.
  183. * Caller must have sole reference to list.
  184. */
  185. static void queue_cleanup(struct list_head *list)
  186. {
  187. struct read_buffer *rb;
  188. while (!list_empty(list)) {
  189. rb = list_entry(list->next, struct read_buffer, list);
  190. list_del(list->next);
  191. kfree(rb);
  192. }
  193. }
  194. struct watch_adapter {
  195. struct list_head list;
  196. struct xenbus_watch watch;
  197. struct xenbus_file_priv *dev_data;
  198. char *token;
  199. };
  200. static void free_watch_adapter(struct watch_adapter *watch)
  201. {
  202. kfree(watch->watch.node);
  203. kfree(watch->token);
  204. kfree(watch);
  205. }
  206. static struct watch_adapter *alloc_watch_adapter(const char *path,
  207. const char *token)
  208. {
  209. struct watch_adapter *watch;
  210. watch = kzalloc(sizeof(*watch), GFP_KERNEL);
  211. if (watch == NULL)
  212. goto out_fail;
  213. watch->watch.node = kstrdup(path, GFP_KERNEL);
  214. if (watch->watch.node == NULL)
  215. goto out_free;
  216. watch->token = kstrdup(token, GFP_KERNEL);
  217. if (watch->token == NULL)
  218. goto out_free;
  219. return watch;
  220. out_free:
  221. free_watch_adapter(watch);
  222. out_fail:
  223. return NULL;
  224. }
  225. static void watch_fired(struct xenbus_watch *watch,
  226. const char *path,
  227. const char *token)
  228. {
  229. struct watch_adapter *adap;
  230. struct xsd_sockmsg hdr;
  231. const char *token_caller;
  232. int path_len, tok_len, body_len;
  233. int ret;
  234. LIST_HEAD(staging_q);
  235. adap = container_of(watch, struct watch_adapter, watch);
  236. token_caller = adap->token;
  237. path_len = strlen(path) + 1;
  238. tok_len = strlen(token_caller) + 1;
  239. body_len = path_len + tok_len;
  240. hdr.type = XS_WATCH_EVENT;
  241. hdr.len = body_len;
  242. mutex_lock(&adap->dev_data->reply_mutex);
  243. ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
  244. if (!ret)
  245. ret = queue_reply(&staging_q, path, path_len);
  246. if (!ret)
  247. ret = queue_reply(&staging_q, token_caller, tok_len);
  248. if (!ret) {
  249. /* success: pass reply list onto watcher */
  250. list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
  251. wake_up(&adap->dev_data->read_waitq);
  252. } else
  253. queue_cleanup(&staging_q);
  254. mutex_unlock(&adap->dev_data->reply_mutex);
  255. }
  256. static void xenbus_worker(struct work_struct *wq)
  257. {
  258. struct xenbus_file_priv *u;
  259. struct xenbus_transaction_holder *trans, *tmp;
  260. struct watch_adapter *watch, *tmp_watch;
  261. struct read_buffer *rb, *tmp_rb;
  262. u = container_of(wq, struct xenbus_file_priv, wq);
  263. /*
  264. * No need for locking here because there are no other users,
  265. * by definition.
  266. */
  267. list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
  268. xenbus_transaction_end(trans->handle, 1);
  269. list_del(&trans->list);
  270. kfree(trans);
  271. }
  272. list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
  273. unregister_xenbus_watch(&watch->watch);
  274. list_del(&watch->list);
  275. free_watch_adapter(watch);
  276. }
  277. list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
  278. list_del(&rb->list);
  279. kfree(rb);
  280. }
  281. kfree(u);
  282. }
  283. static void xenbus_file_free(struct kref *kref)
  284. {
  285. struct xenbus_file_priv *u;
  286. /*
  287. * We might be called in xenbus_thread().
  288. * Use workqueue to avoid deadlock.
  289. */
  290. u = container_of(kref, struct xenbus_file_priv, kref);
  291. schedule_work(&u->wq);
  292. }
  293. static struct xenbus_transaction_holder *xenbus_get_transaction(
  294. struct xenbus_file_priv *u, uint32_t tx_id)
  295. {
  296. struct xenbus_transaction_holder *trans;
  297. list_for_each_entry(trans, &u->transactions, list)
  298. if (trans->handle.id == tx_id)
  299. return trans;
  300. return NULL;
  301. }
  302. void xenbus_dev_queue_reply(struct xb_req_data *req)
  303. {
  304. struct xenbus_file_priv *u = req->par;
  305. struct xenbus_transaction_holder *trans = NULL;
  306. int rc;
  307. LIST_HEAD(staging_q);
  308. xs_request_exit(req);
  309. mutex_lock(&u->msgbuffer_mutex);
  310. if (req->type == XS_TRANSACTION_START) {
  311. trans = xenbus_get_transaction(u, 0);
  312. if (WARN_ON(!trans))
  313. goto out;
  314. if (req->msg.type == XS_ERROR) {
  315. list_del(&trans->list);
  316. kfree(trans);
  317. } else {
  318. rc = kstrtou32(req->body, 10, &trans->handle.id);
  319. if (WARN_ON(rc))
  320. goto out;
  321. }
  322. } else if (req->type == XS_TRANSACTION_END) {
  323. trans = xenbus_get_transaction(u, req->msg.tx_id);
  324. if (WARN_ON(!trans))
  325. goto out;
  326. list_del(&trans->list);
  327. kfree(trans);
  328. }
  329. mutex_unlock(&u->msgbuffer_mutex);
  330. mutex_lock(&u->reply_mutex);
  331. rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg));
  332. if (!rc)
  333. rc = queue_reply(&staging_q, req->body, req->msg.len);
  334. if (!rc) {
  335. list_splice_tail(&staging_q, &u->read_buffers);
  336. wake_up(&u->read_waitq);
  337. } else {
  338. queue_cleanup(&staging_q);
  339. }
  340. mutex_unlock(&u->reply_mutex);
  341. kfree(req->body);
  342. kfree(req);
  343. kref_put(&u->kref, xenbus_file_free);
  344. return;
  345. out:
  346. mutex_unlock(&u->msgbuffer_mutex);
  347. }
  348. static int xenbus_command_reply(struct xenbus_file_priv *u,
  349. unsigned int msg_type, const char *reply)
  350. {
  351. struct {
  352. struct xsd_sockmsg hdr;
  353. char body[16];
  354. } msg;
  355. int rc;
  356. msg.hdr = u->u.msg;
  357. msg.hdr.type = msg_type;
  358. msg.hdr.len = strlen(reply) + 1;
  359. if (msg.hdr.len > sizeof(msg.body))
  360. return -E2BIG;
  361. memcpy(&msg.body, reply, msg.hdr.len);
  362. mutex_lock(&u->reply_mutex);
  363. rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
  364. wake_up(&u->read_waitq);
  365. mutex_unlock(&u->reply_mutex);
  366. if (!rc)
  367. kref_put(&u->kref, xenbus_file_free);
  368. return rc;
  369. }
  370. static int xenbus_write_transaction(unsigned msg_type,
  371. struct xenbus_file_priv *u)
  372. {
  373. int rc;
  374. struct xenbus_transaction_holder *trans = NULL;
  375. struct {
  376. struct xsd_sockmsg hdr;
  377. char body[];
  378. } *msg = (void *)u->u.buffer;
  379. if (msg_type == XS_TRANSACTION_START) {
  380. trans = kzalloc(sizeof(*trans), GFP_KERNEL);
  381. if (!trans) {
  382. rc = -ENOMEM;
  383. goto out;
  384. }
  385. trans->generation_id = xb_dev_generation_id;
  386. list_add(&trans->list, &u->transactions);
  387. } else if (msg->hdr.tx_id != 0 &&
  388. !xenbus_get_transaction(u, msg->hdr.tx_id))
  389. return xenbus_command_reply(u, XS_ERROR, "ENOENT");
  390. else if (msg_type == XS_TRANSACTION_END &&
  391. !(msg->hdr.len == 2 &&
  392. (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
  393. return xenbus_command_reply(u, XS_ERROR, "EINVAL");
  394. else if (msg_type == XS_TRANSACTION_END) {
  395. trans = xenbus_get_transaction(u, msg->hdr.tx_id);
  396. if (trans && trans->generation_id != xb_dev_generation_id) {
  397. list_del(&trans->list);
  398. kfree(trans);
  399. if (!strcmp(msg->body, "T"))
  400. return xenbus_command_reply(u, XS_ERROR,
  401. "EAGAIN");
  402. else
  403. return xenbus_command_reply(u,
  404. XS_TRANSACTION_END,
  405. "OK");
  406. }
  407. }
  408. rc = xenbus_dev_request_and_reply(&msg->hdr, u);
  409. if (rc && trans) {
  410. list_del(&trans->list);
  411. kfree(trans);
  412. }
  413. out:
  414. return rc;
  415. }
  416. static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
  417. {
  418. struct watch_adapter *watch;
  419. char *path, *token;
  420. int err, rc;
  421. LIST_HEAD(staging_q);
  422. path = u->u.buffer + sizeof(u->u.msg);
  423. token = memchr(path, 0, u->u.msg.len);
  424. if (token == NULL) {
  425. rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
  426. goto out;
  427. }
  428. token++;
  429. if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
  430. rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
  431. goto out;
  432. }
  433. if (msg_type == XS_WATCH) {
  434. watch = alloc_watch_adapter(path, token);
  435. if (watch == NULL) {
  436. rc = -ENOMEM;
  437. goto out;
  438. }
  439. watch->watch.callback = watch_fired;
  440. watch->dev_data = u;
  441. err = register_xenbus_watch(&watch->watch);
  442. if (err) {
  443. free_watch_adapter(watch);
  444. rc = err;
  445. goto out;
  446. }
  447. list_add(&watch->list, &u->watches);
  448. } else {
  449. list_for_each_entry(watch, &u->watches, list) {
  450. if (!strcmp(watch->token, token) &&
  451. !strcmp(watch->watch.node, path)) {
  452. unregister_xenbus_watch(&watch->watch);
  453. list_del(&watch->list);
  454. free_watch_adapter(watch);
  455. break;
  456. }
  457. }
  458. }
  459. /* Success. Synthesize a reply to say all is OK. */
  460. rc = xenbus_command_reply(u, msg_type, "OK");
  461. out:
  462. return rc;
  463. }
  464. static ssize_t xenbus_file_write(struct file *filp,
  465. const char __user *ubuf,
  466. size_t len, loff_t *ppos)
  467. {
  468. struct xenbus_file_priv *u = filp->private_data;
  469. uint32_t msg_type;
  470. int rc = len;
  471. int ret;
  472. LIST_HEAD(staging_q);
  473. /*
  474. * We're expecting usermode to be writing properly formed
  475. * xenbus messages. If they write an incomplete message we
  476. * buffer it up. Once it is complete, we act on it.
  477. */
  478. /*
  479. * Make sure concurrent writers can't stomp all over each
  480. * other's messages and make a mess of our partial message
  481. * buffer. We don't make any attemppt to stop multiple
  482. * writers from making a mess of each other's incomplete
  483. * messages; we're just trying to guarantee our own internal
  484. * consistency and make sure that single writes are handled
  485. * atomically.
  486. */
  487. mutex_lock(&u->msgbuffer_mutex);
  488. /* Get this out of the way early to avoid confusion */
  489. if (len == 0)
  490. goto out;
  491. /* Can't write a xenbus message larger we can buffer */
  492. if (len > sizeof(u->u.buffer) - u->len) {
  493. /* On error, dump existing buffer */
  494. u->len = 0;
  495. rc = -EINVAL;
  496. goto out;
  497. }
  498. ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
  499. if (ret != 0) {
  500. rc = -EFAULT;
  501. goto out;
  502. }
  503. /* Deal with a partial copy. */
  504. len -= ret;
  505. rc = len;
  506. u->len += len;
  507. /* Return if we haven't got a full message yet */
  508. if (u->len < sizeof(u->u.msg))
  509. goto out; /* not even the header yet */
  510. /* If we're expecting a message that's larger than we can
  511. possibly send, dump what we have and return an error. */
  512. if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
  513. rc = -E2BIG;
  514. u->len = 0;
  515. goto out;
  516. }
  517. if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
  518. goto out; /* incomplete data portion */
  519. /*
  520. * OK, now we have a complete message. Do something with it.
  521. */
  522. kref_get(&u->kref);
  523. msg_type = u->u.msg.type;
  524. switch (msg_type) {
  525. case XS_WATCH:
  526. case XS_UNWATCH:
  527. /* (Un)Ask for some path to be watched for changes */
  528. ret = xenbus_write_watch(msg_type, u);
  529. break;
  530. default:
  531. /* Send out a transaction */
  532. ret = xenbus_write_transaction(msg_type, u);
  533. break;
  534. }
  535. if (ret != 0) {
  536. rc = ret;
  537. kref_put(&u->kref, xenbus_file_free);
  538. }
  539. /* Buffered message consumed */
  540. u->len = 0;
  541. out:
  542. mutex_unlock(&u->msgbuffer_mutex);
  543. return rc;
  544. }
  545. static int xenbus_file_open(struct inode *inode, struct file *filp)
  546. {
  547. struct xenbus_file_priv *u;
  548. if (xen_store_evtchn == 0)
  549. return -ENOENT;
  550. stream_open(inode, filp);
  551. u = kzalloc(sizeof(*u), GFP_KERNEL);
  552. if (u == NULL)
  553. return -ENOMEM;
  554. kref_init(&u->kref);
  555. INIT_LIST_HEAD(&u->transactions);
  556. INIT_LIST_HEAD(&u->watches);
  557. INIT_LIST_HEAD(&u->read_buffers);
  558. init_waitqueue_head(&u->read_waitq);
  559. INIT_WORK(&u->wq, xenbus_worker);
  560. mutex_init(&u->reply_mutex);
  561. mutex_init(&u->msgbuffer_mutex);
  562. filp->private_data = u;
  563. return 0;
  564. }
  565. static int xenbus_file_release(struct inode *inode, struct file *filp)
  566. {
  567. struct xenbus_file_priv *u = filp->private_data;
  568. kref_put(&u->kref, xenbus_file_free);
  569. return 0;
  570. }
  571. static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
  572. {
  573. struct xenbus_file_priv *u = file->private_data;
  574. poll_wait(file, &u->read_waitq, wait);
  575. if (!list_empty(&u->read_buffers))
  576. return EPOLLIN | EPOLLRDNORM;
  577. return 0;
  578. }
  579. const struct file_operations xen_xenbus_fops = {
  580. .read = xenbus_file_read,
  581. .write = xenbus_file_write,
  582. .open = xenbus_file_open,
  583. .release = xenbus_file_release,
  584. .poll = xenbus_file_poll,
  585. .llseek = no_llseek,
  586. };
  587. EXPORT_SYMBOL_GPL(xen_xenbus_fops);
  588. static struct miscdevice xenbus_dev = {
  589. .minor = MISC_DYNAMIC_MINOR,
  590. .name = "xen/xenbus",
  591. .fops = &xen_xenbus_fops,
  592. };
  593. static int __init xenbus_init(void)
  594. {
  595. int err;
  596. if (!xen_domain())
  597. return -ENODEV;
  598. err = misc_register(&xenbus_dev);
  599. if (err)
  600. pr_err("Could not register xenbus frontend device\n");
  601. return err;
  602. }
  603. device_initcall(xenbus_init);