vmbus_drv.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Authors:
  18. * Haiyang Zhang <haiyangz@microsoft.com>
  19. * Hank Janssen <hjanssen@microsoft.com>
  20. * K. Y. Srinivasan <kys@microsoft.com>
  21. *
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/init.h>
  25. #include <linux/module.h>
  26. #include <linux/device.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/slab.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/hyperv.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/clockchips.h>
  35. #include <linux/cpu.h>
  36. #include <linux/sched/task_stack.h>
  37. #include <asm/mshyperv.h>
  38. #include <linux/notifier.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/screen_info.h>
  41. #include <linux/kdebug.h>
  42. #include <linux/efi.h>
  43. #include <linux/random.h>
  44. #include <linux/kernel.h>
  45. #include "hyperv_vmbus.h"
  46. struct vmbus_dynid {
  47. struct list_head node;
  48. struct hv_vmbus_device_id id;
  49. };
  50. static struct acpi_device *hv_acpi_dev;
  51. static struct completion probe_event;
  52. static int hyperv_cpuhp_online;
  53. static void *hv_panic_page;
  54. /*
  55. * Boolean to control whether to report panic messages over Hyper-V.
  56. *
  57. * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
  58. */
  59. static int sysctl_record_panic_msg = 1;
  60. static int hyperv_report_reg(void)
  61. {
  62. return !sysctl_record_panic_msg || !hv_panic_page;
  63. }
  64. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  65. void *args)
  66. {
  67. struct pt_regs *regs;
  68. vmbus_initiate_unload(true);
  69. /*
  70. * Hyper-V should be notified only once about a panic. If we will be
  71. * doing hyperv_report_panic_msg() later with kmsg data, don't do
  72. * the notification here.
  73. */
  74. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
  75. && hyperv_report_reg()) {
  76. regs = current_pt_regs();
  77. hyperv_report_panic(regs, val, false);
  78. }
  79. return NOTIFY_DONE;
  80. }
  81. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  82. void *args)
  83. {
  84. struct die_args *die = (struct die_args *)args;
  85. struct pt_regs *regs = die->regs;
  86. /*
  87. * Hyper-V should be notified only once about a panic. If we will be
  88. * doing hyperv_report_panic_msg() later with kmsg data, don't do
  89. * the notification here.
  90. */
  91. if (hyperv_report_reg())
  92. hyperv_report_panic(regs, val, true);
  93. return NOTIFY_DONE;
  94. }
  95. static struct notifier_block hyperv_die_block = {
  96. .notifier_call = hyperv_die_event,
  97. };
  98. static struct notifier_block hyperv_panic_block = {
  99. .notifier_call = hyperv_panic_event,
  100. };
  101. static const char *fb_mmio_name = "fb_range";
  102. static struct resource *fb_mmio;
  103. static struct resource *hyperv_mmio;
  104. static DEFINE_SEMAPHORE(hyperv_mmio_lock);
  105. static int vmbus_exists(void)
  106. {
  107. if (hv_acpi_dev == NULL)
  108. return -ENODEV;
  109. return 0;
  110. }
  111. #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
  112. static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
  113. {
  114. int i;
  115. for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
  116. sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
  117. }
  118. static u8 channel_monitor_group(const struct vmbus_channel *channel)
  119. {
  120. return (u8)channel->offermsg.monitorid / 32;
  121. }
  122. static u8 channel_monitor_offset(const struct vmbus_channel *channel)
  123. {
  124. return (u8)channel->offermsg.monitorid % 32;
  125. }
  126. static u32 channel_pending(const struct vmbus_channel *channel,
  127. const struct hv_monitor_page *monitor_page)
  128. {
  129. u8 monitor_group = channel_monitor_group(channel);
  130. return monitor_page->trigger_group[monitor_group].pending;
  131. }
  132. static u32 channel_latency(const struct vmbus_channel *channel,
  133. const struct hv_monitor_page *monitor_page)
  134. {
  135. u8 monitor_group = channel_monitor_group(channel);
  136. u8 monitor_offset = channel_monitor_offset(channel);
  137. return monitor_page->latency[monitor_group][monitor_offset];
  138. }
  139. static u32 channel_conn_id(struct vmbus_channel *channel,
  140. struct hv_monitor_page *monitor_page)
  141. {
  142. u8 monitor_group = channel_monitor_group(channel);
  143. u8 monitor_offset = channel_monitor_offset(channel);
  144. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  145. }
  146. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  147. char *buf)
  148. {
  149. struct hv_device *hv_dev = device_to_hv_device(dev);
  150. if (!hv_dev->channel)
  151. return -ENODEV;
  152. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  153. }
  154. static DEVICE_ATTR_RO(id);
  155. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  156. char *buf)
  157. {
  158. struct hv_device *hv_dev = device_to_hv_device(dev);
  159. if (!hv_dev->channel)
  160. return -ENODEV;
  161. return sprintf(buf, "%d\n", hv_dev->channel->state);
  162. }
  163. static DEVICE_ATTR_RO(state);
  164. static ssize_t monitor_id_show(struct device *dev,
  165. struct device_attribute *dev_attr, char *buf)
  166. {
  167. struct hv_device *hv_dev = device_to_hv_device(dev);
  168. if (!hv_dev->channel)
  169. return -ENODEV;
  170. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  171. }
  172. static DEVICE_ATTR_RO(monitor_id);
  173. static ssize_t class_id_show(struct device *dev,
  174. struct device_attribute *dev_attr, char *buf)
  175. {
  176. struct hv_device *hv_dev = device_to_hv_device(dev);
  177. if (!hv_dev->channel)
  178. return -ENODEV;
  179. return sprintf(buf, "{%pUl}\n",
  180. hv_dev->channel->offermsg.offer.if_type.b);
  181. }
  182. static DEVICE_ATTR_RO(class_id);
  183. static ssize_t device_id_show(struct device *dev,
  184. struct device_attribute *dev_attr, char *buf)
  185. {
  186. struct hv_device *hv_dev = device_to_hv_device(dev);
  187. if (!hv_dev->channel)
  188. return -ENODEV;
  189. return sprintf(buf, "{%pUl}\n",
  190. hv_dev->channel->offermsg.offer.if_instance.b);
  191. }
  192. static DEVICE_ATTR_RO(device_id);
  193. static ssize_t modalias_show(struct device *dev,
  194. struct device_attribute *dev_attr, char *buf)
  195. {
  196. struct hv_device *hv_dev = device_to_hv_device(dev);
  197. char alias_name[VMBUS_ALIAS_LEN + 1];
  198. print_alias_name(hv_dev, alias_name);
  199. return sprintf(buf, "vmbus:%s\n", alias_name);
  200. }
  201. static DEVICE_ATTR_RO(modalias);
  202. #ifdef CONFIG_NUMA
  203. static ssize_t numa_node_show(struct device *dev,
  204. struct device_attribute *attr, char *buf)
  205. {
  206. struct hv_device *hv_dev = device_to_hv_device(dev);
  207. if (!hv_dev->channel)
  208. return -ENODEV;
  209. return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
  210. }
  211. static DEVICE_ATTR_RO(numa_node);
  212. #endif
  213. static ssize_t server_monitor_pending_show(struct device *dev,
  214. struct device_attribute *dev_attr,
  215. char *buf)
  216. {
  217. struct hv_device *hv_dev = device_to_hv_device(dev);
  218. if (!hv_dev->channel)
  219. return -ENODEV;
  220. return sprintf(buf, "%d\n",
  221. channel_pending(hv_dev->channel,
  222. vmbus_connection.monitor_pages[1]));
  223. }
  224. static DEVICE_ATTR_RO(server_monitor_pending);
  225. static ssize_t client_monitor_pending_show(struct device *dev,
  226. struct device_attribute *dev_attr,
  227. char *buf)
  228. {
  229. struct hv_device *hv_dev = device_to_hv_device(dev);
  230. if (!hv_dev->channel)
  231. return -ENODEV;
  232. return sprintf(buf, "%d\n",
  233. channel_pending(hv_dev->channel,
  234. vmbus_connection.monitor_pages[1]));
  235. }
  236. static DEVICE_ATTR_RO(client_monitor_pending);
  237. static ssize_t server_monitor_latency_show(struct device *dev,
  238. struct device_attribute *dev_attr,
  239. char *buf)
  240. {
  241. struct hv_device *hv_dev = device_to_hv_device(dev);
  242. if (!hv_dev->channel)
  243. return -ENODEV;
  244. return sprintf(buf, "%d\n",
  245. channel_latency(hv_dev->channel,
  246. vmbus_connection.monitor_pages[0]));
  247. }
  248. static DEVICE_ATTR_RO(server_monitor_latency);
  249. static ssize_t client_monitor_latency_show(struct device *dev,
  250. struct device_attribute *dev_attr,
  251. char *buf)
  252. {
  253. struct hv_device *hv_dev = device_to_hv_device(dev);
  254. if (!hv_dev->channel)
  255. return -ENODEV;
  256. return sprintf(buf, "%d\n",
  257. channel_latency(hv_dev->channel,
  258. vmbus_connection.monitor_pages[1]));
  259. }
  260. static DEVICE_ATTR_RO(client_monitor_latency);
  261. static ssize_t server_monitor_conn_id_show(struct device *dev,
  262. struct device_attribute *dev_attr,
  263. char *buf)
  264. {
  265. struct hv_device *hv_dev = device_to_hv_device(dev);
  266. if (!hv_dev->channel)
  267. return -ENODEV;
  268. return sprintf(buf, "%d\n",
  269. channel_conn_id(hv_dev->channel,
  270. vmbus_connection.monitor_pages[0]));
  271. }
  272. static DEVICE_ATTR_RO(server_monitor_conn_id);
  273. static ssize_t client_monitor_conn_id_show(struct device *dev,
  274. struct device_attribute *dev_attr,
  275. char *buf)
  276. {
  277. struct hv_device *hv_dev = device_to_hv_device(dev);
  278. if (!hv_dev->channel)
  279. return -ENODEV;
  280. return sprintf(buf, "%d\n",
  281. channel_conn_id(hv_dev->channel,
  282. vmbus_connection.monitor_pages[1]));
  283. }
  284. static DEVICE_ATTR_RO(client_monitor_conn_id);
  285. static ssize_t out_intr_mask_show(struct device *dev,
  286. struct device_attribute *dev_attr, char *buf)
  287. {
  288. struct hv_device *hv_dev = device_to_hv_device(dev);
  289. struct hv_ring_buffer_debug_info outbound;
  290. int ret;
  291. if (!hv_dev->channel)
  292. return -ENODEV;
  293. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  294. &outbound);
  295. if (ret < 0)
  296. return ret;
  297. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  298. }
  299. static DEVICE_ATTR_RO(out_intr_mask);
  300. static ssize_t out_read_index_show(struct device *dev,
  301. struct device_attribute *dev_attr, char *buf)
  302. {
  303. struct hv_device *hv_dev = device_to_hv_device(dev);
  304. struct hv_ring_buffer_debug_info outbound;
  305. int ret;
  306. if (!hv_dev->channel)
  307. return -ENODEV;
  308. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  309. &outbound);
  310. if (ret < 0)
  311. return ret;
  312. return sprintf(buf, "%d\n", outbound.current_read_index);
  313. }
  314. static DEVICE_ATTR_RO(out_read_index);
  315. static ssize_t out_write_index_show(struct device *dev,
  316. struct device_attribute *dev_attr,
  317. char *buf)
  318. {
  319. struct hv_device *hv_dev = device_to_hv_device(dev);
  320. struct hv_ring_buffer_debug_info outbound;
  321. int ret;
  322. if (!hv_dev->channel)
  323. return -ENODEV;
  324. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  325. &outbound);
  326. if (ret < 0)
  327. return ret;
  328. return sprintf(buf, "%d\n", outbound.current_write_index);
  329. }
  330. static DEVICE_ATTR_RO(out_write_index);
  331. static ssize_t out_read_bytes_avail_show(struct device *dev,
  332. struct device_attribute *dev_attr,
  333. char *buf)
  334. {
  335. struct hv_device *hv_dev = device_to_hv_device(dev);
  336. struct hv_ring_buffer_debug_info outbound;
  337. int ret;
  338. if (!hv_dev->channel)
  339. return -ENODEV;
  340. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  341. &outbound);
  342. if (ret < 0)
  343. return ret;
  344. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  345. }
  346. static DEVICE_ATTR_RO(out_read_bytes_avail);
  347. static ssize_t out_write_bytes_avail_show(struct device *dev,
  348. struct device_attribute *dev_attr,
  349. char *buf)
  350. {
  351. struct hv_device *hv_dev = device_to_hv_device(dev);
  352. struct hv_ring_buffer_debug_info outbound;
  353. int ret;
  354. if (!hv_dev->channel)
  355. return -ENODEV;
  356. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  357. &outbound);
  358. if (ret < 0)
  359. return ret;
  360. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  361. }
  362. static DEVICE_ATTR_RO(out_write_bytes_avail);
  363. static ssize_t in_intr_mask_show(struct device *dev,
  364. struct device_attribute *dev_attr, char *buf)
  365. {
  366. struct hv_device *hv_dev = device_to_hv_device(dev);
  367. struct hv_ring_buffer_debug_info inbound;
  368. int ret;
  369. if (!hv_dev->channel)
  370. return -ENODEV;
  371. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  372. if (ret < 0)
  373. return ret;
  374. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  375. }
  376. static DEVICE_ATTR_RO(in_intr_mask);
  377. static ssize_t in_read_index_show(struct device *dev,
  378. struct device_attribute *dev_attr, char *buf)
  379. {
  380. struct hv_device *hv_dev = device_to_hv_device(dev);
  381. struct hv_ring_buffer_debug_info inbound;
  382. int ret;
  383. if (!hv_dev->channel)
  384. return -ENODEV;
  385. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  386. if (ret < 0)
  387. return ret;
  388. return sprintf(buf, "%d\n", inbound.current_read_index);
  389. }
  390. static DEVICE_ATTR_RO(in_read_index);
  391. static ssize_t in_write_index_show(struct device *dev,
  392. struct device_attribute *dev_attr, char *buf)
  393. {
  394. struct hv_device *hv_dev = device_to_hv_device(dev);
  395. struct hv_ring_buffer_debug_info inbound;
  396. int ret;
  397. if (!hv_dev->channel)
  398. return -ENODEV;
  399. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  400. if (ret < 0)
  401. return ret;
  402. return sprintf(buf, "%d\n", inbound.current_write_index);
  403. }
  404. static DEVICE_ATTR_RO(in_write_index);
  405. static ssize_t in_read_bytes_avail_show(struct device *dev,
  406. struct device_attribute *dev_attr,
  407. char *buf)
  408. {
  409. struct hv_device *hv_dev = device_to_hv_device(dev);
  410. struct hv_ring_buffer_debug_info inbound;
  411. int ret;
  412. if (!hv_dev->channel)
  413. return -ENODEV;
  414. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  415. if (ret < 0)
  416. return ret;
  417. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  418. }
  419. static DEVICE_ATTR_RO(in_read_bytes_avail);
  420. static ssize_t in_write_bytes_avail_show(struct device *dev,
  421. struct device_attribute *dev_attr,
  422. char *buf)
  423. {
  424. struct hv_device *hv_dev = device_to_hv_device(dev);
  425. struct hv_ring_buffer_debug_info inbound;
  426. int ret;
  427. if (!hv_dev->channel)
  428. return -ENODEV;
  429. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  430. if (ret < 0)
  431. return ret;
  432. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  433. }
  434. static DEVICE_ATTR_RO(in_write_bytes_avail);
  435. static ssize_t channel_vp_mapping_show(struct device *dev,
  436. struct device_attribute *dev_attr,
  437. char *buf)
  438. {
  439. struct hv_device *hv_dev = device_to_hv_device(dev);
  440. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  441. unsigned long flags;
  442. int buf_size = PAGE_SIZE, n_written, tot_written;
  443. struct list_head *cur;
  444. if (!channel)
  445. return -ENODEV;
  446. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  447. channel->offermsg.child_relid, channel->target_cpu);
  448. spin_lock_irqsave(&channel->lock, flags);
  449. list_for_each(cur, &channel->sc_list) {
  450. if (tot_written >= buf_size - 1)
  451. break;
  452. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  453. n_written = scnprintf(buf + tot_written,
  454. buf_size - tot_written,
  455. "%u:%u\n",
  456. cur_sc->offermsg.child_relid,
  457. cur_sc->target_cpu);
  458. tot_written += n_written;
  459. }
  460. spin_unlock_irqrestore(&channel->lock, flags);
  461. return tot_written;
  462. }
  463. static DEVICE_ATTR_RO(channel_vp_mapping);
  464. static ssize_t vendor_show(struct device *dev,
  465. struct device_attribute *dev_attr,
  466. char *buf)
  467. {
  468. struct hv_device *hv_dev = device_to_hv_device(dev);
  469. return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
  470. }
  471. static DEVICE_ATTR_RO(vendor);
  472. static ssize_t device_show(struct device *dev,
  473. struct device_attribute *dev_attr,
  474. char *buf)
  475. {
  476. struct hv_device *hv_dev = device_to_hv_device(dev);
  477. return sprintf(buf, "0x%x\n", hv_dev->device_id);
  478. }
  479. static DEVICE_ATTR_RO(device);
  480. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  481. static struct attribute *vmbus_dev_attrs[] = {
  482. &dev_attr_id.attr,
  483. &dev_attr_state.attr,
  484. &dev_attr_monitor_id.attr,
  485. &dev_attr_class_id.attr,
  486. &dev_attr_device_id.attr,
  487. &dev_attr_modalias.attr,
  488. #ifdef CONFIG_NUMA
  489. &dev_attr_numa_node.attr,
  490. #endif
  491. &dev_attr_server_monitor_pending.attr,
  492. &dev_attr_client_monitor_pending.attr,
  493. &dev_attr_server_monitor_latency.attr,
  494. &dev_attr_client_monitor_latency.attr,
  495. &dev_attr_server_monitor_conn_id.attr,
  496. &dev_attr_client_monitor_conn_id.attr,
  497. &dev_attr_out_intr_mask.attr,
  498. &dev_attr_out_read_index.attr,
  499. &dev_attr_out_write_index.attr,
  500. &dev_attr_out_read_bytes_avail.attr,
  501. &dev_attr_out_write_bytes_avail.attr,
  502. &dev_attr_in_intr_mask.attr,
  503. &dev_attr_in_read_index.attr,
  504. &dev_attr_in_write_index.attr,
  505. &dev_attr_in_read_bytes_avail.attr,
  506. &dev_attr_in_write_bytes_avail.attr,
  507. &dev_attr_channel_vp_mapping.attr,
  508. &dev_attr_vendor.attr,
  509. &dev_attr_device.attr,
  510. NULL,
  511. };
  512. ATTRIBUTE_GROUPS(vmbus_dev);
  513. /*
  514. * vmbus_uevent - add uevent for our device
  515. *
  516. * This routine is invoked when a device is added or removed on the vmbus to
  517. * generate a uevent to udev in the userspace. The udev will then look at its
  518. * rule and the uevent generated here to load the appropriate driver
  519. *
  520. * The alias string will be of the form vmbus:guid where guid is the string
  521. * representation of the device guid (each byte of the guid will be
  522. * represented with two hex characters.
  523. */
  524. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  525. {
  526. struct hv_device *dev = device_to_hv_device(device);
  527. int ret;
  528. char alias_name[VMBUS_ALIAS_LEN + 1];
  529. print_alias_name(dev, alias_name);
  530. ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
  531. return ret;
  532. }
  533. static const uuid_le null_guid;
  534. static inline bool is_null_guid(const uuid_le *guid)
  535. {
  536. if (uuid_le_cmp(*guid, null_guid))
  537. return false;
  538. return true;
  539. }
  540. /*
  541. * Return a matching hv_vmbus_device_id pointer.
  542. * If there is no match, return NULL.
  543. */
  544. static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
  545. const uuid_le *guid)
  546. {
  547. const struct hv_vmbus_device_id *id = NULL;
  548. struct vmbus_dynid *dynid;
  549. /* Look at the dynamic ids first, before the static ones */
  550. spin_lock(&drv->dynids.lock);
  551. list_for_each_entry(dynid, &drv->dynids.list, node) {
  552. if (!uuid_le_cmp(dynid->id.guid, *guid)) {
  553. id = &dynid->id;
  554. break;
  555. }
  556. }
  557. spin_unlock(&drv->dynids.lock);
  558. if (id)
  559. return id;
  560. id = drv->id_table;
  561. if (id == NULL)
  562. return NULL; /* empty device table */
  563. for (; !is_null_guid(&id->guid); id++)
  564. if (!uuid_le_cmp(id->guid, *guid))
  565. return id;
  566. return NULL;
  567. }
  568. /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
  569. static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
  570. {
  571. struct vmbus_dynid *dynid;
  572. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  573. if (!dynid)
  574. return -ENOMEM;
  575. dynid->id.guid = *guid;
  576. spin_lock(&drv->dynids.lock);
  577. list_add_tail(&dynid->node, &drv->dynids.list);
  578. spin_unlock(&drv->dynids.lock);
  579. return driver_attach(&drv->driver);
  580. }
  581. static void vmbus_free_dynids(struct hv_driver *drv)
  582. {
  583. struct vmbus_dynid *dynid, *n;
  584. spin_lock(&drv->dynids.lock);
  585. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  586. list_del(&dynid->node);
  587. kfree(dynid);
  588. }
  589. spin_unlock(&drv->dynids.lock);
  590. }
  591. /*
  592. * store_new_id - sysfs frontend to vmbus_add_dynid()
  593. *
  594. * Allow GUIDs to be added to an existing driver via sysfs.
  595. */
  596. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  597. size_t count)
  598. {
  599. struct hv_driver *drv = drv_to_hv_drv(driver);
  600. uuid_le guid;
  601. ssize_t retval;
  602. retval = uuid_le_to_bin(buf, &guid);
  603. if (retval)
  604. return retval;
  605. if (hv_vmbus_get_id(drv, &guid))
  606. return -EEXIST;
  607. retval = vmbus_add_dynid(drv, &guid);
  608. if (retval)
  609. return retval;
  610. return count;
  611. }
  612. static DRIVER_ATTR_WO(new_id);
  613. /*
  614. * store_remove_id - remove a PCI device ID from this driver
  615. *
  616. * Removes a dynamic pci device ID to this driver.
  617. */
  618. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  619. size_t count)
  620. {
  621. struct hv_driver *drv = drv_to_hv_drv(driver);
  622. struct vmbus_dynid *dynid, *n;
  623. uuid_le guid;
  624. ssize_t retval;
  625. retval = uuid_le_to_bin(buf, &guid);
  626. if (retval)
  627. return retval;
  628. retval = -ENODEV;
  629. spin_lock(&drv->dynids.lock);
  630. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  631. struct hv_vmbus_device_id *id = &dynid->id;
  632. if (!uuid_le_cmp(id->guid, guid)) {
  633. list_del(&dynid->node);
  634. kfree(dynid);
  635. retval = count;
  636. break;
  637. }
  638. }
  639. spin_unlock(&drv->dynids.lock);
  640. return retval;
  641. }
  642. static DRIVER_ATTR_WO(remove_id);
  643. static struct attribute *vmbus_drv_attrs[] = {
  644. &driver_attr_new_id.attr,
  645. &driver_attr_remove_id.attr,
  646. NULL,
  647. };
  648. ATTRIBUTE_GROUPS(vmbus_drv);
  649. /*
  650. * vmbus_match - Attempt to match the specified device to the specified driver
  651. */
  652. static int vmbus_match(struct device *device, struct device_driver *driver)
  653. {
  654. struct hv_driver *drv = drv_to_hv_drv(driver);
  655. struct hv_device *hv_dev = device_to_hv_device(device);
  656. /* The hv_sock driver handles all hv_sock offers. */
  657. if (is_hvsock_channel(hv_dev->channel))
  658. return drv->hvsock;
  659. if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
  660. return 1;
  661. return 0;
  662. }
  663. /*
  664. * vmbus_probe - Add the new vmbus's child device
  665. */
  666. static int vmbus_probe(struct device *child_device)
  667. {
  668. int ret = 0;
  669. struct hv_driver *drv =
  670. drv_to_hv_drv(child_device->driver);
  671. struct hv_device *dev = device_to_hv_device(child_device);
  672. const struct hv_vmbus_device_id *dev_id;
  673. dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
  674. if (drv->probe) {
  675. ret = drv->probe(dev, dev_id);
  676. if (ret != 0)
  677. pr_err("probe failed for device %s (%d)\n",
  678. dev_name(child_device), ret);
  679. } else {
  680. pr_err("probe not set for driver %s\n",
  681. dev_name(child_device));
  682. ret = -ENODEV;
  683. }
  684. return ret;
  685. }
  686. /*
  687. * vmbus_remove - Remove a vmbus device
  688. */
  689. static int vmbus_remove(struct device *child_device)
  690. {
  691. struct hv_driver *drv;
  692. struct hv_device *dev = device_to_hv_device(child_device);
  693. if (child_device->driver) {
  694. drv = drv_to_hv_drv(child_device->driver);
  695. if (drv->remove)
  696. drv->remove(dev);
  697. }
  698. return 0;
  699. }
  700. /*
  701. * vmbus_shutdown - Shutdown a vmbus device
  702. */
  703. static void vmbus_shutdown(struct device *child_device)
  704. {
  705. struct hv_driver *drv;
  706. struct hv_device *dev = device_to_hv_device(child_device);
  707. /* The device may not be attached yet */
  708. if (!child_device->driver)
  709. return;
  710. drv = drv_to_hv_drv(child_device->driver);
  711. if (drv->shutdown)
  712. drv->shutdown(dev);
  713. }
  714. /*
  715. * vmbus_device_release - Final callback release of the vmbus child device
  716. */
  717. static void vmbus_device_release(struct device *device)
  718. {
  719. struct hv_device *hv_dev = device_to_hv_device(device);
  720. struct vmbus_channel *channel = hv_dev->channel;
  721. mutex_lock(&vmbus_connection.channel_mutex);
  722. hv_process_channel_removal(channel->offermsg.child_relid);
  723. mutex_unlock(&vmbus_connection.channel_mutex);
  724. kfree(hv_dev);
  725. }
  726. /* The one and only one */
  727. static struct bus_type hv_bus = {
  728. .name = "vmbus",
  729. .match = vmbus_match,
  730. .shutdown = vmbus_shutdown,
  731. .remove = vmbus_remove,
  732. .probe = vmbus_probe,
  733. .uevent = vmbus_uevent,
  734. .dev_groups = vmbus_dev_groups,
  735. .drv_groups = vmbus_drv_groups,
  736. };
  737. struct onmessage_work_context {
  738. struct work_struct work;
  739. struct hv_message msg;
  740. };
  741. static void vmbus_onmessage_work(struct work_struct *work)
  742. {
  743. struct onmessage_work_context *ctx;
  744. /* Do not process messages if we're in DISCONNECTED state */
  745. if (vmbus_connection.conn_state == DISCONNECTED)
  746. return;
  747. ctx = container_of(work, struct onmessage_work_context,
  748. work);
  749. vmbus_onmessage(&ctx->msg);
  750. kfree(ctx);
  751. }
  752. static void hv_process_timer_expiration(struct hv_message *msg,
  753. struct hv_per_cpu_context *hv_cpu)
  754. {
  755. struct clock_event_device *dev = hv_cpu->clk_evt;
  756. if (dev->event_handler)
  757. dev->event_handler(dev);
  758. vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
  759. }
  760. void vmbus_on_msg_dpc(unsigned long data)
  761. {
  762. struct hv_per_cpu_context *hv_cpu = (void *)data;
  763. void *page_addr = hv_cpu->synic_message_page;
  764. struct hv_message *msg = (struct hv_message *)page_addr +
  765. VMBUS_MESSAGE_SINT;
  766. struct vmbus_channel_message_header *hdr;
  767. const struct vmbus_channel_message_table_entry *entry;
  768. struct onmessage_work_context *ctx;
  769. u32 message_type = msg->header.message_type;
  770. if (message_type == HVMSG_NONE)
  771. /* no msg */
  772. return;
  773. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  774. trace_vmbus_on_msg_dpc(hdr);
  775. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  776. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  777. goto msg_handled;
  778. }
  779. entry = &channel_message_table[hdr->msgtype];
  780. if (!entry->message_handler)
  781. goto msg_handled;
  782. if (entry->handler_type == VMHT_BLOCKING) {
  783. ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
  784. if (ctx == NULL)
  785. return;
  786. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  787. memcpy(&ctx->msg, msg, sizeof(*msg));
  788. /*
  789. * The host can generate a rescind message while we
  790. * may still be handling the original offer. We deal with
  791. * this condition by ensuring the processing is done on the
  792. * same CPU.
  793. */
  794. switch (hdr->msgtype) {
  795. case CHANNELMSG_RESCIND_CHANNELOFFER:
  796. /*
  797. * If we are handling the rescind message;
  798. * schedule the work on the global work queue.
  799. */
  800. schedule_work_on(vmbus_connection.connect_cpu,
  801. &ctx->work);
  802. break;
  803. case CHANNELMSG_OFFERCHANNEL:
  804. atomic_inc(&vmbus_connection.offer_in_progress);
  805. queue_work_on(vmbus_connection.connect_cpu,
  806. vmbus_connection.work_queue,
  807. &ctx->work);
  808. break;
  809. default:
  810. queue_work(vmbus_connection.work_queue, &ctx->work);
  811. }
  812. } else
  813. entry->message_handler(hdr);
  814. msg_handled:
  815. vmbus_signal_eom(msg, message_type);
  816. }
  817. /*
  818. * Direct callback for channels using other deferred processing
  819. */
  820. static void vmbus_channel_isr(struct vmbus_channel *channel)
  821. {
  822. void (*callback_fn)(void *);
  823. callback_fn = READ_ONCE(channel->onchannel_callback);
  824. if (likely(callback_fn != NULL))
  825. (*callback_fn)(channel->channel_callback_context);
  826. }
  827. /*
  828. * Schedule all channels with events pending
  829. */
  830. static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
  831. {
  832. unsigned long *recv_int_page;
  833. u32 maxbits, relid;
  834. if (vmbus_proto_version < VERSION_WIN8) {
  835. maxbits = MAX_NUM_CHANNELS_SUPPORTED;
  836. recv_int_page = vmbus_connection.recv_int_page;
  837. } else {
  838. /*
  839. * When the host is win8 and beyond, the event page
  840. * can be directly checked to get the id of the channel
  841. * that has the interrupt pending.
  842. */
  843. void *page_addr = hv_cpu->synic_event_page;
  844. union hv_synic_event_flags *event
  845. = (union hv_synic_event_flags *)page_addr +
  846. VMBUS_MESSAGE_SINT;
  847. maxbits = HV_EVENT_FLAGS_COUNT;
  848. recv_int_page = event->flags;
  849. }
  850. if (unlikely(!recv_int_page))
  851. return;
  852. for_each_set_bit(relid, recv_int_page, maxbits) {
  853. struct vmbus_channel *channel;
  854. if (!sync_test_and_clear_bit(relid, recv_int_page))
  855. continue;
  856. /* Special case - vmbus channel protocol msg */
  857. if (relid == 0)
  858. continue;
  859. rcu_read_lock();
  860. /* Find channel based on relid */
  861. list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
  862. if (channel->offermsg.child_relid != relid)
  863. continue;
  864. if (channel->rescind)
  865. continue;
  866. trace_vmbus_chan_sched(channel);
  867. ++channel->interrupts;
  868. switch (channel->callback_mode) {
  869. case HV_CALL_ISR:
  870. vmbus_channel_isr(channel);
  871. break;
  872. case HV_CALL_BATCHED:
  873. hv_begin_read(&channel->inbound);
  874. /* fallthrough */
  875. case HV_CALL_DIRECT:
  876. tasklet_schedule(&channel->callback_event);
  877. }
  878. }
  879. rcu_read_unlock();
  880. }
  881. }
  882. static void vmbus_isr(void)
  883. {
  884. struct hv_per_cpu_context *hv_cpu
  885. = this_cpu_ptr(hv_context.cpu_context);
  886. void *page_addr = hv_cpu->synic_event_page;
  887. struct hv_message *msg;
  888. union hv_synic_event_flags *event;
  889. bool handled = false;
  890. if (unlikely(page_addr == NULL))
  891. return;
  892. event = (union hv_synic_event_flags *)page_addr +
  893. VMBUS_MESSAGE_SINT;
  894. /*
  895. * Check for events before checking for messages. This is the order
  896. * in which events and messages are checked in Windows guests on
  897. * Hyper-V, and the Windows team suggested we do the same.
  898. */
  899. if ((vmbus_proto_version == VERSION_WS2008) ||
  900. (vmbus_proto_version == VERSION_WIN7)) {
  901. /* Since we are a child, we only need to check bit 0 */
  902. if (sync_test_and_clear_bit(0, event->flags))
  903. handled = true;
  904. } else {
  905. /*
  906. * Our host is win8 or above. The signaling mechanism
  907. * has changed and we can directly look at the event page.
  908. * If bit n is set then we have an interrup on the channel
  909. * whose id is n.
  910. */
  911. handled = true;
  912. }
  913. if (handled)
  914. vmbus_chan_sched(hv_cpu);
  915. page_addr = hv_cpu->synic_message_page;
  916. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  917. /* Check if there are actual msgs to be processed */
  918. if (msg->header.message_type != HVMSG_NONE) {
  919. if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
  920. hv_process_timer_expiration(msg, hv_cpu);
  921. else
  922. tasklet_schedule(&hv_cpu->msg_dpc);
  923. }
  924. add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
  925. }
  926. /*
  927. * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
  928. * buffer and call into Hyper-V to transfer the data.
  929. */
  930. static void hv_kmsg_dump(struct kmsg_dumper *dumper,
  931. enum kmsg_dump_reason reason)
  932. {
  933. size_t bytes_written;
  934. phys_addr_t panic_pa;
  935. /* We are only interested in panics. */
  936. if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
  937. return;
  938. panic_pa = virt_to_phys(hv_panic_page);
  939. /*
  940. * Write dump contents to the page. No need to synchronize; panic should
  941. * be single-threaded.
  942. */
  943. kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
  944. &bytes_written);
  945. if (bytes_written)
  946. hyperv_report_panic_msg(panic_pa, bytes_written);
  947. }
  948. static struct kmsg_dumper hv_kmsg_dumper = {
  949. .dump = hv_kmsg_dump,
  950. };
  951. static struct ctl_table_header *hv_ctl_table_hdr;
  952. static int zero;
  953. static int one = 1;
  954. /*
  955. * sysctl option to allow the user to control whether kmsg data should be
  956. * reported to Hyper-V on panic.
  957. */
  958. static struct ctl_table hv_ctl_table[] = {
  959. {
  960. .procname = "hyperv_record_panic_msg",
  961. .data = &sysctl_record_panic_msg,
  962. .maxlen = sizeof(int),
  963. .mode = 0644,
  964. .proc_handler = proc_dointvec_minmax,
  965. .extra1 = &zero,
  966. .extra2 = &one
  967. },
  968. {}
  969. };
  970. static struct ctl_table hv_root_table[] = {
  971. {
  972. .procname = "kernel",
  973. .mode = 0555,
  974. .child = hv_ctl_table
  975. },
  976. {}
  977. };
  978. /*
  979. * vmbus_bus_init -Main vmbus driver initialization routine.
  980. *
  981. * Here, we
  982. * - initialize the vmbus driver context
  983. * - invoke the vmbus hv main init routine
  984. * - retrieve the channel offers
  985. */
  986. static int vmbus_bus_init(void)
  987. {
  988. int ret;
  989. /* Hypervisor initialization...setup hypercall page..etc */
  990. ret = hv_init();
  991. if (ret != 0) {
  992. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  993. return ret;
  994. }
  995. ret = bus_register(&hv_bus);
  996. if (ret)
  997. return ret;
  998. hv_setup_vmbus_irq(vmbus_isr);
  999. ret = hv_synic_alloc();
  1000. if (ret)
  1001. goto err_alloc;
  1002. /*
  1003. * Initialize the per-cpu interrupt state and
  1004. * connect to the host.
  1005. */
  1006. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
  1007. hv_synic_init, hv_synic_cleanup);
  1008. if (ret < 0)
  1009. goto err_alloc;
  1010. hyperv_cpuhp_online = ret;
  1011. ret = vmbus_connect();
  1012. if (ret)
  1013. goto err_connect;
  1014. /*
  1015. * Only register if the crash MSRs are available
  1016. */
  1017. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1018. u64 hyperv_crash_ctl;
  1019. /*
  1020. * Sysctl registration is not fatal, since by default
  1021. * reporting is enabled.
  1022. */
  1023. hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
  1024. if (!hv_ctl_table_hdr)
  1025. pr_err("Hyper-V: sysctl table register error");
  1026. /*
  1027. * Register for panic kmsg callback only if the right
  1028. * capability is supported by the hypervisor.
  1029. */
  1030. hv_get_crash_ctl(hyperv_crash_ctl);
  1031. if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
  1032. hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
  1033. if (hv_panic_page) {
  1034. ret = kmsg_dump_register(&hv_kmsg_dumper);
  1035. if (ret) {
  1036. pr_err("Hyper-V: kmsg dump register "
  1037. "error 0x%x\n", ret);
  1038. free_page(
  1039. (unsigned long)hv_panic_page);
  1040. hv_panic_page = NULL;
  1041. }
  1042. } else
  1043. pr_err("Hyper-V: panic message page memory "
  1044. "allocation failed");
  1045. }
  1046. register_die_notifier(&hyperv_die_block);
  1047. }
  1048. /*
  1049. * Always register the panic notifier because we need to unload
  1050. * the VMbus channel connection to prevent any VMbus
  1051. * activity after the VM panics.
  1052. */
  1053. atomic_notifier_chain_register(&panic_notifier_list,
  1054. &hyperv_panic_block);
  1055. vmbus_request_offers();
  1056. return 0;
  1057. err_connect:
  1058. cpuhp_remove_state(hyperv_cpuhp_online);
  1059. err_alloc:
  1060. hv_synic_free();
  1061. hv_remove_vmbus_irq();
  1062. bus_unregister(&hv_bus);
  1063. unregister_sysctl_table(hv_ctl_table_hdr);
  1064. hv_ctl_table_hdr = NULL;
  1065. return ret;
  1066. }
  1067. /**
  1068. * __vmbus_child_driver_register() - Register a vmbus's driver
  1069. * @hv_driver: Pointer to driver structure you want to register
  1070. * @owner: owner module of the drv
  1071. * @mod_name: module name string
  1072. *
  1073. * Registers the given driver with Linux through the 'driver_register()' call
  1074. * and sets up the hyper-v vmbus handling for this driver.
  1075. * It will return the state of the 'driver_register()' call.
  1076. *
  1077. */
  1078. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  1079. {
  1080. int ret;
  1081. pr_info("registering driver %s\n", hv_driver->name);
  1082. ret = vmbus_exists();
  1083. if (ret < 0)
  1084. return ret;
  1085. hv_driver->driver.name = hv_driver->name;
  1086. hv_driver->driver.owner = owner;
  1087. hv_driver->driver.mod_name = mod_name;
  1088. hv_driver->driver.bus = &hv_bus;
  1089. spin_lock_init(&hv_driver->dynids.lock);
  1090. INIT_LIST_HEAD(&hv_driver->dynids.list);
  1091. ret = driver_register(&hv_driver->driver);
  1092. return ret;
  1093. }
  1094. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  1095. /**
  1096. * vmbus_driver_unregister() - Unregister a vmbus's driver
  1097. * @hv_driver: Pointer to driver structure you want to
  1098. * un-register
  1099. *
  1100. * Un-register the given driver that was previous registered with a call to
  1101. * vmbus_driver_register()
  1102. */
  1103. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  1104. {
  1105. pr_info("unregistering driver %s\n", hv_driver->name);
  1106. if (!vmbus_exists()) {
  1107. driver_unregister(&hv_driver->driver);
  1108. vmbus_free_dynids(hv_driver);
  1109. }
  1110. }
  1111. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  1112. /*
  1113. * Called when last reference to channel is gone.
  1114. */
  1115. static void vmbus_chan_release(struct kobject *kobj)
  1116. {
  1117. struct vmbus_channel *channel
  1118. = container_of(kobj, struct vmbus_channel, kobj);
  1119. kfree_rcu(channel, rcu);
  1120. }
  1121. struct vmbus_chan_attribute {
  1122. struct attribute attr;
  1123. ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
  1124. ssize_t (*store)(struct vmbus_channel *chan,
  1125. const char *buf, size_t count);
  1126. };
  1127. #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
  1128. struct vmbus_chan_attribute chan_attr_##_name \
  1129. = __ATTR(_name, _mode, _show, _store)
  1130. #define VMBUS_CHAN_ATTR_RW(_name) \
  1131. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
  1132. #define VMBUS_CHAN_ATTR_RO(_name) \
  1133. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
  1134. #define VMBUS_CHAN_ATTR_WO(_name) \
  1135. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
  1136. static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
  1137. struct attribute *attr, char *buf)
  1138. {
  1139. const struct vmbus_chan_attribute *attribute
  1140. = container_of(attr, struct vmbus_chan_attribute, attr);
  1141. const struct vmbus_channel *chan
  1142. = container_of(kobj, struct vmbus_channel, kobj);
  1143. if (!attribute->show)
  1144. return -EIO;
  1145. if (chan->state != CHANNEL_OPENED_STATE)
  1146. return -EINVAL;
  1147. return attribute->show(chan, buf);
  1148. }
  1149. static const struct sysfs_ops vmbus_chan_sysfs_ops = {
  1150. .show = vmbus_chan_attr_show,
  1151. };
  1152. static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
  1153. {
  1154. const struct hv_ring_buffer_info *rbi = &channel->outbound;
  1155. return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
  1156. }
  1157. static VMBUS_CHAN_ATTR_RO(out_mask);
  1158. static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
  1159. {
  1160. const struct hv_ring_buffer_info *rbi = &channel->inbound;
  1161. return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
  1162. }
  1163. static VMBUS_CHAN_ATTR_RO(in_mask);
  1164. static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
  1165. {
  1166. const struct hv_ring_buffer_info *rbi = &channel->inbound;
  1167. return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
  1168. }
  1169. static VMBUS_CHAN_ATTR_RO(read_avail);
  1170. static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
  1171. {
  1172. const struct hv_ring_buffer_info *rbi = &channel->outbound;
  1173. return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
  1174. }
  1175. static VMBUS_CHAN_ATTR_RO(write_avail);
  1176. static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
  1177. {
  1178. return sprintf(buf, "%u\n", channel->target_cpu);
  1179. }
  1180. static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
  1181. static ssize_t channel_pending_show(const struct vmbus_channel *channel,
  1182. char *buf)
  1183. {
  1184. return sprintf(buf, "%d\n",
  1185. channel_pending(channel,
  1186. vmbus_connection.monitor_pages[1]));
  1187. }
  1188. static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
  1189. static ssize_t channel_latency_show(const struct vmbus_channel *channel,
  1190. char *buf)
  1191. {
  1192. return sprintf(buf, "%d\n",
  1193. channel_latency(channel,
  1194. vmbus_connection.monitor_pages[1]));
  1195. }
  1196. static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
  1197. static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
  1198. {
  1199. return sprintf(buf, "%llu\n", channel->interrupts);
  1200. }
  1201. static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
  1202. static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
  1203. {
  1204. return sprintf(buf, "%llu\n", channel->sig_events);
  1205. }
  1206. static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
  1207. static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
  1208. char *buf)
  1209. {
  1210. return sprintf(buf, "%u\n", channel->offermsg.monitorid);
  1211. }
  1212. static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
  1213. static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
  1214. char *buf)
  1215. {
  1216. return sprintf(buf, "%u\n",
  1217. channel->offermsg.offer.sub_channel_index);
  1218. }
  1219. static VMBUS_CHAN_ATTR_RO(subchannel_id);
  1220. static struct attribute *vmbus_chan_attrs[] = {
  1221. &chan_attr_out_mask.attr,
  1222. &chan_attr_in_mask.attr,
  1223. &chan_attr_read_avail.attr,
  1224. &chan_attr_write_avail.attr,
  1225. &chan_attr_cpu.attr,
  1226. &chan_attr_pending.attr,
  1227. &chan_attr_latency.attr,
  1228. &chan_attr_interrupts.attr,
  1229. &chan_attr_events.attr,
  1230. &chan_attr_monitor_id.attr,
  1231. &chan_attr_subchannel_id.attr,
  1232. NULL
  1233. };
  1234. static struct kobj_type vmbus_chan_ktype = {
  1235. .sysfs_ops = &vmbus_chan_sysfs_ops,
  1236. .release = vmbus_chan_release,
  1237. .default_attrs = vmbus_chan_attrs,
  1238. };
  1239. /*
  1240. * vmbus_add_channel_kobj - setup a sub-directory under device/channels
  1241. */
  1242. int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
  1243. {
  1244. struct kobject *kobj = &channel->kobj;
  1245. u32 relid = channel->offermsg.child_relid;
  1246. int ret;
  1247. kobj->kset = dev->channels_kset;
  1248. ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
  1249. "%u", relid);
  1250. if (ret)
  1251. return ret;
  1252. kobject_uevent(kobj, KOBJ_ADD);
  1253. return 0;
  1254. }
  1255. /*
  1256. * vmbus_device_create - Creates and registers a new child device
  1257. * on the vmbus.
  1258. */
  1259. struct hv_device *vmbus_device_create(const uuid_le *type,
  1260. const uuid_le *instance,
  1261. struct vmbus_channel *channel)
  1262. {
  1263. struct hv_device *child_device_obj;
  1264. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  1265. if (!child_device_obj) {
  1266. pr_err("Unable to allocate device object for child device\n");
  1267. return NULL;
  1268. }
  1269. child_device_obj->channel = channel;
  1270. memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
  1271. memcpy(&child_device_obj->dev_instance, instance,
  1272. sizeof(uuid_le));
  1273. child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
  1274. return child_device_obj;
  1275. }
  1276. /*
  1277. * vmbus_device_register - Register the child device
  1278. */
  1279. int vmbus_device_register(struct hv_device *child_device_obj)
  1280. {
  1281. struct kobject *kobj = &child_device_obj->device.kobj;
  1282. int ret;
  1283. dev_set_name(&child_device_obj->device, "%pUl",
  1284. child_device_obj->channel->offermsg.offer.if_instance.b);
  1285. child_device_obj->device.bus = &hv_bus;
  1286. child_device_obj->device.parent = &hv_acpi_dev->dev;
  1287. child_device_obj->device.release = vmbus_device_release;
  1288. /*
  1289. * Register with the LDM. This will kick off the driver/device
  1290. * binding...which will eventually call vmbus_match() and vmbus_probe()
  1291. */
  1292. ret = device_register(&child_device_obj->device);
  1293. if (ret) {
  1294. pr_err("Unable to register child device\n");
  1295. return ret;
  1296. }
  1297. child_device_obj->channels_kset = kset_create_and_add("channels",
  1298. NULL, kobj);
  1299. if (!child_device_obj->channels_kset) {
  1300. ret = -ENOMEM;
  1301. goto err_dev_unregister;
  1302. }
  1303. ret = vmbus_add_channel_kobj(child_device_obj,
  1304. child_device_obj->channel);
  1305. if (ret) {
  1306. pr_err("Unable to register primary channeln");
  1307. goto err_kset_unregister;
  1308. }
  1309. return 0;
  1310. err_kset_unregister:
  1311. kset_unregister(child_device_obj->channels_kset);
  1312. err_dev_unregister:
  1313. device_unregister(&child_device_obj->device);
  1314. return ret;
  1315. }
  1316. /*
  1317. * vmbus_device_unregister - Remove the specified child device
  1318. * from the vmbus.
  1319. */
  1320. void vmbus_device_unregister(struct hv_device *device_obj)
  1321. {
  1322. pr_debug("child device %s unregistered\n",
  1323. dev_name(&device_obj->device));
  1324. kset_unregister(device_obj->channels_kset);
  1325. /*
  1326. * Kick off the process of unregistering the device.
  1327. * This will call vmbus_remove() and eventually vmbus_device_release()
  1328. */
  1329. device_unregister(&device_obj->device);
  1330. }
  1331. /*
  1332. * VMBUS is an acpi enumerated device. Get the information we
  1333. * need from DSDT.
  1334. */
  1335. #define VTPM_BASE_ADDRESS 0xfed40000
  1336. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  1337. {
  1338. resource_size_t start = 0;
  1339. resource_size_t end = 0;
  1340. struct resource *new_res;
  1341. struct resource **old_res = &hyperv_mmio;
  1342. struct resource **prev_res = NULL;
  1343. switch (res->type) {
  1344. /*
  1345. * "Address" descriptors are for bus windows. Ignore
  1346. * "memory" descriptors, which are for registers on
  1347. * devices.
  1348. */
  1349. case ACPI_RESOURCE_TYPE_ADDRESS32:
  1350. start = res->data.address32.address.minimum;
  1351. end = res->data.address32.address.maximum;
  1352. break;
  1353. case ACPI_RESOURCE_TYPE_ADDRESS64:
  1354. start = res->data.address64.address.minimum;
  1355. end = res->data.address64.address.maximum;
  1356. break;
  1357. default:
  1358. /* Unused resource type */
  1359. return AE_OK;
  1360. }
  1361. /*
  1362. * Ignore ranges that are below 1MB, as they're not
  1363. * necessary or useful here.
  1364. */
  1365. if (end < 0x100000)
  1366. return AE_OK;
  1367. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  1368. if (!new_res)
  1369. return AE_NO_MEMORY;
  1370. /* If this range overlaps the virtual TPM, truncate it. */
  1371. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  1372. end = VTPM_BASE_ADDRESS;
  1373. new_res->name = "hyperv mmio";
  1374. new_res->flags = IORESOURCE_MEM;
  1375. new_res->start = start;
  1376. new_res->end = end;
  1377. /*
  1378. * If two ranges are adjacent, merge them.
  1379. */
  1380. do {
  1381. if (!*old_res) {
  1382. *old_res = new_res;
  1383. break;
  1384. }
  1385. if (((*old_res)->end + 1) == new_res->start) {
  1386. (*old_res)->end = new_res->end;
  1387. kfree(new_res);
  1388. break;
  1389. }
  1390. if ((*old_res)->start == new_res->end + 1) {
  1391. (*old_res)->start = new_res->start;
  1392. kfree(new_res);
  1393. break;
  1394. }
  1395. if ((*old_res)->start > new_res->end) {
  1396. new_res->sibling = *old_res;
  1397. if (prev_res)
  1398. (*prev_res)->sibling = new_res;
  1399. *old_res = new_res;
  1400. break;
  1401. }
  1402. prev_res = old_res;
  1403. old_res = &(*old_res)->sibling;
  1404. } while (1);
  1405. return AE_OK;
  1406. }
  1407. static int vmbus_acpi_remove(struct acpi_device *device)
  1408. {
  1409. struct resource *cur_res;
  1410. struct resource *next_res;
  1411. if (hyperv_mmio) {
  1412. if (fb_mmio) {
  1413. __release_region(hyperv_mmio, fb_mmio->start,
  1414. resource_size(fb_mmio));
  1415. fb_mmio = NULL;
  1416. }
  1417. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  1418. next_res = cur_res->sibling;
  1419. kfree(cur_res);
  1420. }
  1421. }
  1422. return 0;
  1423. }
  1424. static void vmbus_reserve_fb(void)
  1425. {
  1426. int size;
  1427. /*
  1428. * Make a claim for the frame buffer in the resource tree under the
  1429. * first node, which will be the one below 4GB. The length seems to
  1430. * be underreported, particularly in a Generation 1 VM. So start out
  1431. * reserving a larger area and make it smaller until it succeeds.
  1432. */
  1433. if (screen_info.lfb_base) {
  1434. if (efi_enabled(EFI_BOOT))
  1435. size = max_t(__u32, screen_info.lfb_size, 0x800000);
  1436. else
  1437. size = max_t(__u32, screen_info.lfb_size, 0x4000000);
  1438. for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
  1439. fb_mmio = __request_region(hyperv_mmio,
  1440. screen_info.lfb_base, size,
  1441. fb_mmio_name, 0);
  1442. }
  1443. }
  1444. }
  1445. /**
  1446. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  1447. * @new: If successful, supplied a pointer to the
  1448. * allocated MMIO space.
  1449. * @device_obj: Identifies the caller
  1450. * @min: Minimum guest physical address of the
  1451. * allocation
  1452. * @max: Maximum guest physical address
  1453. * @size: Size of the range to be allocated
  1454. * @align: Alignment of the range to be allocated
  1455. * @fb_overlap_ok: Whether this allocation can be allowed
  1456. * to overlap the video frame buffer.
  1457. *
  1458. * This function walks the resources granted to VMBus by the
  1459. * _CRS object in the ACPI namespace underneath the parent
  1460. * "bridge" whether that's a root PCI bus in the Generation 1
  1461. * case or a Module Device in the Generation 2 case. It then
  1462. * attempts to allocate from the global MMIO pool in a way that
  1463. * matches the constraints supplied in these parameters and by
  1464. * that _CRS.
  1465. *
  1466. * Return: 0 on success, -errno on failure
  1467. */
  1468. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  1469. resource_size_t min, resource_size_t max,
  1470. resource_size_t size, resource_size_t align,
  1471. bool fb_overlap_ok)
  1472. {
  1473. struct resource *iter, *shadow;
  1474. resource_size_t range_min, range_max, start;
  1475. const char *dev_n = dev_name(&device_obj->device);
  1476. int retval;
  1477. retval = -ENXIO;
  1478. down(&hyperv_mmio_lock);
  1479. /*
  1480. * If overlaps with frame buffers are allowed, then first attempt to
  1481. * make the allocation from within the reserved region. Because it
  1482. * is already reserved, no shadow allocation is necessary.
  1483. */
  1484. if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
  1485. !(max < fb_mmio->start)) {
  1486. range_min = fb_mmio->start;
  1487. range_max = fb_mmio->end;
  1488. start = (range_min + align - 1) & ~(align - 1);
  1489. for (; start + size - 1 <= range_max; start += align) {
  1490. *new = request_mem_region_exclusive(start, size, dev_n);
  1491. if (*new) {
  1492. retval = 0;
  1493. goto exit;
  1494. }
  1495. }
  1496. }
  1497. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1498. if ((iter->start >= max) || (iter->end <= min))
  1499. continue;
  1500. range_min = iter->start;
  1501. range_max = iter->end;
  1502. start = (range_min + align - 1) & ~(align - 1);
  1503. for (; start + size - 1 <= range_max; start += align) {
  1504. shadow = __request_region(iter, start, size, NULL,
  1505. IORESOURCE_BUSY);
  1506. if (!shadow)
  1507. continue;
  1508. *new = request_mem_region_exclusive(start, size, dev_n);
  1509. if (*new) {
  1510. shadow->name = (char *)*new;
  1511. retval = 0;
  1512. goto exit;
  1513. }
  1514. __release_region(iter, start, size);
  1515. }
  1516. }
  1517. exit:
  1518. up(&hyperv_mmio_lock);
  1519. return retval;
  1520. }
  1521. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  1522. /**
  1523. * vmbus_free_mmio() - Free a memory-mapped I/O range.
  1524. * @start: Base address of region to release.
  1525. * @size: Size of the range to be allocated
  1526. *
  1527. * This function releases anything requested by
  1528. * vmbus_mmio_allocate().
  1529. */
  1530. void vmbus_free_mmio(resource_size_t start, resource_size_t size)
  1531. {
  1532. struct resource *iter;
  1533. down(&hyperv_mmio_lock);
  1534. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1535. if ((iter->start >= start + size) || (iter->end <= start))
  1536. continue;
  1537. __release_region(iter, start, size);
  1538. }
  1539. release_mem_region(start, size);
  1540. up(&hyperv_mmio_lock);
  1541. }
  1542. EXPORT_SYMBOL_GPL(vmbus_free_mmio);
  1543. static int vmbus_acpi_add(struct acpi_device *device)
  1544. {
  1545. acpi_status result;
  1546. int ret_val = -ENODEV;
  1547. struct acpi_device *ancestor;
  1548. hv_acpi_dev = device;
  1549. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1550. vmbus_walk_resources, NULL);
  1551. if (ACPI_FAILURE(result))
  1552. goto acpi_walk_err;
  1553. /*
  1554. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1555. * firmware) is the VMOD that has the mmio ranges. Get that.
  1556. */
  1557. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1558. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1559. vmbus_walk_resources, NULL);
  1560. if (ACPI_FAILURE(result))
  1561. continue;
  1562. if (hyperv_mmio) {
  1563. vmbus_reserve_fb();
  1564. break;
  1565. }
  1566. }
  1567. ret_val = 0;
  1568. acpi_walk_err:
  1569. complete(&probe_event);
  1570. if (ret_val)
  1571. vmbus_acpi_remove(device);
  1572. return ret_val;
  1573. }
  1574. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  1575. {"VMBUS", 0},
  1576. {"VMBus", 0},
  1577. {"", 0},
  1578. };
  1579. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  1580. static struct acpi_driver vmbus_acpi_driver = {
  1581. .name = "vmbus",
  1582. .ids = vmbus_acpi_device_ids,
  1583. .ops = {
  1584. .add = vmbus_acpi_add,
  1585. .remove = vmbus_acpi_remove,
  1586. },
  1587. };
  1588. static void hv_kexec_handler(void)
  1589. {
  1590. hv_synic_clockevents_cleanup();
  1591. vmbus_initiate_unload(false);
  1592. /* Make sure conn_state is set as hv_synic_cleanup checks for it */
  1593. mb();
  1594. cpuhp_remove_state(hyperv_cpuhp_online);
  1595. hyperv_cleanup();
  1596. };
  1597. static void hv_crash_handler(struct pt_regs *regs)
  1598. {
  1599. vmbus_initiate_unload(true);
  1600. /*
  1601. * In crash handler we can't schedule synic cleanup for all CPUs,
  1602. * doing the cleanup for current CPU only. This should be sufficient
  1603. * for kdump.
  1604. */
  1605. hv_synic_cleanup(smp_processor_id());
  1606. hyperv_cleanup();
  1607. };
  1608. static int __init hv_acpi_init(void)
  1609. {
  1610. int ret, t;
  1611. if (!hv_is_hyperv_initialized())
  1612. return -ENODEV;
  1613. init_completion(&probe_event);
  1614. /*
  1615. * Get ACPI resources first.
  1616. */
  1617. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  1618. if (ret)
  1619. return ret;
  1620. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  1621. if (t == 0) {
  1622. ret = -ETIMEDOUT;
  1623. goto cleanup;
  1624. }
  1625. ret = vmbus_bus_init();
  1626. if (ret)
  1627. goto cleanup;
  1628. hv_setup_kexec_handler(hv_kexec_handler);
  1629. hv_setup_crash_handler(hv_crash_handler);
  1630. return 0;
  1631. cleanup:
  1632. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1633. hv_acpi_dev = NULL;
  1634. return ret;
  1635. }
  1636. static void __exit vmbus_exit(void)
  1637. {
  1638. int cpu;
  1639. hv_remove_kexec_handler();
  1640. hv_remove_crash_handler();
  1641. vmbus_connection.conn_state = DISCONNECTED;
  1642. hv_synic_clockevents_cleanup();
  1643. vmbus_disconnect();
  1644. hv_remove_vmbus_irq();
  1645. for_each_online_cpu(cpu) {
  1646. struct hv_per_cpu_context *hv_cpu
  1647. = per_cpu_ptr(hv_context.cpu_context, cpu);
  1648. tasklet_kill(&hv_cpu->msg_dpc);
  1649. }
  1650. vmbus_free_channels();
  1651. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1652. kmsg_dump_unregister(&hv_kmsg_dumper);
  1653. unregister_die_notifier(&hyperv_die_block);
  1654. atomic_notifier_chain_unregister(&panic_notifier_list,
  1655. &hyperv_panic_block);
  1656. }
  1657. free_page((unsigned long)hv_panic_page);
  1658. unregister_sysctl_table(hv_ctl_table_hdr);
  1659. hv_ctl_table_hdr = NULL;
  1660. bus_unregister(&hv_bus);
  1661. cpuhp_remove_state(hyperv_cpuhp_online);
  1662. hv_synic_free();
  1663. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  1664. }
  1665. MODULE_LICENSE("GPL");
  1666. subsys_initcall(hv_acpi_init);
  1667. module_exit(vmbus_exit);