hv_util.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2010, Microsoft Corporation.
  4. *
  5. * Authors:
  6. * Haiyang Zhang <haiyangz@microsoft.com>
  7. * Hank Janssen <hjanssen@microsoft.com>
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/sysctl.h>
  15. #include <linux/reboot.h>
  16. #include <linux/hyperv.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/ptp_clock_kernel.h>
  19. #include <asm/mshyperv.h>
  20. #include "hyperv_vmbus.h"
  21. #define SD_MAJOR 3
  22. #define SD_MINOR 0
  23. #define SD_MINOR_1 1
  24. #define SD_MINOR_2 2
  25. #define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1)
  26. #define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2)
  27. #define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
  28. #define SD_MAJOR_1 1
  29. #define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR)
  30. #define TS_MAJOR 4
  31. #define TS_MINOR 0
  32. #define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
  33. #define TS_MAJOR_1 1
  34. #define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR)
  35. #define TS_MAJOR_3 3
  36. #define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR)
  37. #define HB_MAJOR 3
  38. #define HB_MINOR 0
  39. #define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
  40. #define HB_MAJOR_1 1
  41. #define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR)
  42. static int sd_srv_version;
  43. static int ts_srv_version;
  44. static int hb_srv_version;
  45. #define SD_VER_COUNT 4
  46. static const int sd_versions[] = {
  47. SD_VERSION_3_2,
  48. SD_VERSION_3_1,
  49. SD_VERSION,
  50. SD_VERSION_1
  51. };
  52. #define TS_VER_COUNT 3
  53. static const int ts_versions[] = {
  54. TS_VERSION,
  55. TS_VERSION_3,
  56. TS_VERSION_1
  57. };
  58. #define HB_VER_COUNT 2
  59. static const int hb_versions[] = {
  60. HB_VERSION,
  61. HB_VERSION_1
  62. };
  63. #define FW_VER_COUNT 2
  64. static const int fw_versions[] = {
  65. UTIL_FW_VERSION,
  66. UTIL_WS2K8_FW_VERSION
  67. };
  68. /*
  69. * Send the "hibernate" udev event in a thread context.
  70. */
  71. struct hibernate_work_context {
  72. struct work_struct work;
  73. struct hv_device *dev;
  74. };
  75. static struct hibernate_work_context hibernate_context;
  76. static bool hibernation_supported;
  77. static void send_hibernate_uevent(struct work_struct *work)
  78. {
  79. char *uevent_env[2] = { "EVENT=hibernate", NULL };
  80. struct hibernate_work_context *ctx;
  81. ctx = container_of(work, struct hibernate_work_context, work);
  82. kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
  83. pr_info("Sent hibernation uevent\n");
  84. }
  85. static int hv_shutdown_init(struct hv_util_service *srv)
  86. {
  87. struct vmbus_channel *channel = srv->channel;
  88. INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
  89. hibernate_context.dev = channel->device_obj;
  90. hibernation_supported = hv_is_hibernation_supported();
  91. return 0;
  92. }
  93. static void shutdown_onchannelcallback(void *context);
  94. static struct hv_util_service util_shutdown = {
  95. .util_cb = shutdown_onchannelcallback,
  96. .util_init = hv_shutdown_init,
  97. };
  98. static int hv_timesync_init(struct hv_util_service *srv);
  99. static int hv_timesync_pre_suspend(void);
  100. static void hv_timesync_deinit(void);
  101. static void timesync_onchannelcallback(void *context);
  102. static struct hv_util_service util_timesynch = {
  103. .util_cb = timesync_onchannelcallback,
  104. .util_init = hv_timesync_init,
  105. .util_pre_suspend = hv_timesync_pre_suspend,
  106. .util_deinit = hv_timesync_deinit,
  107. };
  108. static void heartbeat_onchannelcallback(void *context);
  109. static struct hv_util_service util_heartbeat = {
  110. .util_cb = heartbeat_onchannelcallback,
  111. };
  112. static struct hv_util_service util_kvp = {
  113. .util_cb = hv_kvp_onchannelcallback,
  114. .util_init = hv_kvp_init,
  115. .util_init_transport = hv_kvp_init_transport,
  116. .util_pre_suspend = hv_kvp_pre_suspend,
  117. .util_pre_resume = hv_kvp_pre_resume,
  118. .util_deinit = hv_kvp_deinit,
  119. };
  120. static struct hv_util_service util_vss = {
  121. .util_cb = hv_vss_onchannelcallback,
  122. .util_init = hv_vss_init,
  123. .util_init_transport = hv_vss_init_transport,
  124. .util_pre_suspend = hv_vss_pre_suspend,
  125. .util_pre_resume = hv_vss_pre_resume,
  126. .util_deinit = hv_vss_deinit,
  127. };
  128. static void perform_shutdown(struct work_struct *dummy)
  129. {
  130. orderly_poweroff(true);
  131. }
  132. static void perform_restart(struct work_struct *dummy)
  133. {
  134. orderly_reboot();
  135. }
  136. /*
  137. * Perform the shutdown operation in a thread context.
  138. */
  139. static DECLARE_WORK(shutdown_work, perform_shutdown);
  140. /*
  141. * Perform the restart operation in a thread context.
  142. */
  143. static DECLARE_WORK(restart_work, perform_restart);
  144. static void shutdown_onchannelcallback(void *context)
  145. {
  146. struct vmbus_channel *channel = context;
  147. struct work_struct *work = NULL;
  148. u32 recvlen;
  149. u64 requestid;
  150. u8 *shut_txf_buf = util_shutdown.recv_buffer;
  151. struct shutdown_msg_data *shutdown_msg;
  152. struct icmsg_hdr *icmsghdrp;
  153. if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
  154. pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
  155. return;
  156. }
  157. if (!recvlen)
  158. return;
  159. /* Ensure recvlen is big enough to read header data */
  160. if (recvlen < ICMSG_HDR) {
  161. pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
  162. recvlen);
  163. return;
  164. }
  165. icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
  166. if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
  167. if (vmbus_prep_negotiate_resp(icmsghdrp,
  168. shut_txf_buf, recvlen,
  169. fw_versions, FW_VER_COUNT,
  170. sd_versions, SD_VER_COUNT,
  171. NULL, &sd_srv_version)) {
  172. pr_info("Shutdown IC version %d.%d\n",
  173. sd_srv_version >> 16,
  174. sd_srv_version & 0xFFFF);
  175. }
  176. } else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
  177. /* Ensure recvlen is big enough to contain shutdown_msg_data struct */
  178. if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
  179. pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
  180. recvlen);
  181. return;
  182. }
  183. shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
  184. /*
  185. * shutdown_msg->flags can be 0(shut down), 2(reboot),
  186. * or 4(hibernate). It may bitwise-OR 1, which means
  187. * performing the request by force. Linux always tries
  188. * to perform the request by force.
  189. */
  190. switch (shutdown_msg->flags) {
  191. case 0:
  192. case 1:
  193. icmsghdrp->status = HV_S_OK;
  194. work = &shutdown_work;
  195. pr_info("Shutdown request received - graceful shutdown initiated\n");
  196. break;
  197. case 2:
  198. case 3:
  199. icmsghdrp->status = HV_S_OK;
  200. work = &restart_work;
  201. pr_info("Restart request received - graceful restart initiated\n");
  202. break;
  203. case 4:
  204. case 5:
  205. pr_info("Hibernation request received\n");
  206. icmsghdrp->status = hibernation_supported ?
  207. HV_S_OK : HV_E_FAIL;
  208. if (hibernation_supported)
  209. work = &hibernate_context.work;
  210. break;
  211. default:
  212. icmsghdrp->status = HV_E_FAIL;
  213. pr_info("Shutdown request received - Invalid request\n");
  214. break;
  215. }
  216. } else {
  217. icmsghdrp->status = HV_E_FAIL;
  218. pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
  219. icmsghdrp->icmsgtype);
  220. }
  221. icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
  222. | ICMSGHDRFLAG_RESPONSE;
  223. vmbus_sendpacket(channel, shut_txf_buf,
  224. recvlen, requestid,
  225. VM_PKT_DATA_INBAND, 0);
  226. if (work)
  227. schedule_work(work);
  228. }
  229. /*
  230. * Set the host time in a process context.
  231. */
  232. static struct work_struct adj_time_work;
  233. /*
  234. * The last time sample, received from the host. PTP device responds to
  235. * requests by using this data and the current partition-wide time reference
  236. * count.
  237. */
  238. static struct {
  239. u64 host_time;
  240. u64 ref_time;
  241. spinlock_t lock;
  242. } host_ts;
  243. static bool timesync_implicit;
  244. module_param(timesync_implicit, bool, 0644);
  245. MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
  246. static inline u64 reftime_to_ns(u64 reftime)
  247. {
  248. return (reftime - WLTIMEDELTA) * 100;
  249. }
  250. /*
  251. * Hard coded threshold for host timesync delay: 600 seconds
  252. */
  253. static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
  254. static int hv_get_adj_host_time(struct timespec64 *ts)
  255. {
  256. u64 newtime, reftime, timediff_adj;
  257. unsigned long flags;
  258. int ret = 0;
  259. spin_lock_irqsave(&host_ts.lock, flags);
  260. reftime = hv_read_reference_counter();
  261. /*
  262. * We need to let the caller know that last update from host
  263. * is older than the max allowable threshold. clock_gettime()
  264. * and PTP ioctl do not have a documented error that we could
  265. * return for this specific case. Use ESTALE to report this.
  266. */
  267. timediff_adj = reftime - host_ts.ref_time;
  268. if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
  269. pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
  270. (timediff_adj * 100));
  271. ret = -ESTALE;
  272. }
  273. newtime = host_ts.host_time + timediff_adj;
  274. *ts = ns_to_timespec64(reftime_to_ns(newtime));
  275. spin_unlock_irqrestore(&host_ts.lock, flags);
  276. return ret;
  277. }
  278. static void hv_set_host_time(struct work_struct *work)
  279. {
  280. struct timespec64 ts;
  281. if (!hv_get_adj_host_time(&ts))
  282. do_settimeofday64(&ts);
  283. }
  284. /*
  285. * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
  286. * Force a sync if the guest is behind.
  287. */
  288. static inline bool hv_implicit_sync(u64 host_time)
  289. {
  290. struct timespec64 new_ts;
  291. struct timespec64 threshold_ts;
  292. new_ts = ns_to_timespec64(reftime_to_ns(host_time));
  293. ktime_get_real_ts64(&threshold_ts);
  294. threshold_ts.tv_sec += 5;
  295. /*
  296. * If guest behind the host by 5 or more seconds.
  297. */
  298. if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
  299. return true;
  300. return false;
  301. }
  302. /*
  303. * Synchronize time with host after reboot, restore, etc.
  304. *
  305. * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
  306. * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
  307. * message after the timesync channel is opened. Since the hv_utils module is
  308. * loaded after hv_vmbus, the first message is usually missed. This bit is
  309. * considered a hard request to discipline the clock.
  310. *
  311. * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
  312. * typically used as a hint to the guest. The guest is under no obligation
  313. * to discipline the clock.
  314. */
  315. static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
  316. {
  317. unsigned long flags;
  318. u64 cur_reftime;
  319. /*
  320. * Save the adjusted time sample from the host and the snapshot
  321. * of the current system time.
  322. */
  323. spin_lock_irqsave(&host_ts.lock, flags);
  324. cur_reftime = hv_read_reference_counter();
  325. host_ts.host_time = hosttime;
  326. host_ts.ref_time = cur_reftime;
  327. /*
  328. * TimeSync v4 messages contain reference time (guest's Hyper-V
  329. * clocksource read when the time sample was generated), we can
  330. * improve the precision by adding the delta between now and the
  331. * time of generation. For older protocols we set
  332. * reftime == cur_reftime on call.
  333. */
  334. host_ts.host_time += (cur_reftime - reftime);
  335. spin_unlock_irqrestore(&host_ts.lock, flags);
  336. /* Schedule work to do do_settimeofday64() */
  337. if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
  338. (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
  339. schedule_work(&adj_time_work);
  340. }
  341. /*
  342. * Time Sync Channel message handler.
  343. */
  344. static void timesync_onchannelcallback(void *context)
  345. {
  346. struct vmbus_channel *channel = context;
  347. u32 recvlen;
  348. u64 requestid;
  349. struct icmsg_hdr *icmsghdrp;
  350. struct ictimesync_data *timedatap;
  351. struct ictimesync_ref_data *refdata;
  352. u8 *time_txf_buf = util_timesynch.recv_buffer;
  353. /*
  354. * Drain the ring buffer and use the last packet to update
  355. * host_ts
  356. */
  357. while (1) {
  358. int ret = vmbus_recvpacket(channel, time_txf_buf,
  359. HV_HYP_PAGE_SIZE, &recvlen,
  360. &requestid);
  361. if (ret) {
  362. pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
  363. ret);
  364. break;
  365. }
  366. if (!recvlen)
  367. break;
  368. /* Ensure recvlen is big enough to read header data */
  369. if (recvlen < ICMSG_HDR) {
  370. pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
  371. recvlen);
  372. break;
  373. }
  374. icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
  375. sizeof(struct vmbuspipe_hdr)];
  376. if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
  377. if (vmbus_prep_negotiate_resp(icmsghdrp,
  378. time_txf_buf, recvlen,
  379. fw_versions, FW_VER_COUNT,
  380. ts_versions, TS_VER_COUNT,
  381. NULL, &ts_srv_version)) {
  382. pr_info("TimeSync IC version %d.%d\n",
  383. ts_srv_version >> 16,
  384. ts_srv_version & 0xFFFF);
  385. }
  386. } else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
  387. if (ts_srv_version > TS_VERSION_3) {
  388. /* Ensure recvlen is big enough to read ictimesync_ref_data */
  389. if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
  390. pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
  391. recvlen);
  392. break;
  393. }
  394. refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
  395. adj_guesttime(refdata->parenttime,
  396. refdata->vmreferencetime,
  397. refdata->flags);
  398. } else {
  399. /* Ensure recvlen is big enough to read ictimesync_data */
  400. if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
  401. pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
  402. recvlen);
  403. break;
  404. }
  405. timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
  406. adj_guesttime(timedatap->parenttime,
  407. hv_read_reference_counter(),
  408. timedatap->flags);
  409. }
  410. } else {
  411. icmsghdrp->status = HV_E_FAIL;
  412. pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
  413. icmsghdrp->icmsgtype);
  414. }
  415. icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
  416. | ICMSGHDRFLAG_RESPONSE;
  417. vmbus_sendpacket(channel, time_txf_buf,
  418. recvlen, requestid,
  419. VM_PKT_DATA_INBAND, 0);
  420. }
  421. }
  422. /*
  423. * Heartbeat functionality.
  424. * Every two seconds, Hyper-V send us a heartbeat request message.
  425. * we respond to this message, and Hyper-V knows we are alive.
  426. */
  427. static void heartbeat_onchannelcallback(void *context)
  428. {
  429. struct vmbus_channel *channel = context;
  430. u32 recvlen;
  431. u64 requestid;
  432. struct icmsg_hdr *icmsghdrp;
  433. struct heartbeat_msg_data *heartbeat_msg;
  434. u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
  435. while (1) {
  436. if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
  437. &recvlen, &requestid)) {
  438. pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
  439. return;
  440. }
  441. if (!recvlen)
  442. break;
  443. /* Ensure recvlen is big enough to read header data */
  444. if (recvlen < ICMSG_HDR) {
  445. pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
  446. recvlen);
  447. break;
  448. }
  449. icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
  450. sizeof(struct vmbuspipe_hdr)];
  451. if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
  452. if (vmbus_prep_negotiate_resp(icmsghdrp,
  453. hbeat_txf_buf, recvlen,
  454. fw_versions, FW_VER_COUNT,
  455. hb_versions, HB_VER_COUNT,
  456. NULL, &hb_srv_version)) {
  457. pr_info("Heartbeat IC version %d.%d\n",
  458. hb_srv_version >> 16,
  459. hb_srv_version & 0xFFFF);
  460. }
  461. } else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
  462. /*
  463. * Ensure recvlen is big enough to read seq_num. Reserved area is not
  464. * included in the check as the host may not fill it up entirely
  465. */
  466. if (recvlen < ICMSG_HDR + sizeof(u64)) {
  467. pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
  468. recvlen);
  469. break;
  470. }
  471. heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
  472. heartbeat_msg->seq_num += 1;
  473. } else {
  474. icmsghdrp->status = HV_E_FAIL;
  475. pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
  476. icmsghdrp->icmsgtype);
  477. }
  478. icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
  479. | ICMSGHDRFLAG_RESPONSE;
  480. vmbus_sendpacket(channel, hbeat_txf_buf,
  481. recvlen, requestid,
  482. VM_PKT_DATA_INBAND, 0);
  483. }
  484. }
  485. #define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
  486. #define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
  487. static int util_probe(struct hv_device *dev,
  488. const struct hv_vmbus_device_id *dev_id)
  489. {
  490. struct hv_util_service *srv =
  491. (struct hv_util_service *)dev_id->driver_data;
  492. int ret;
  493. srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
  494. if (!srv->recv_buffer)
  495. return -ENOMEM;
  496. srv->channel = dev->channel;
  497. if (srv->util_init) {
  498. ret = srv->util_init(srv);
  499. if (ret) {
  500. ret = -ENODEV;
  501. goto error1;
  502. }
  503. }
  504. /*
  505. * The set of services managed by the util driver are not performance
  506. * critical and do not need batched reading. Furthermore, some services
  507. * such as KVP can only handle one message from the host at a time.
  508. * Turn off batched reading for all util drivers before we open the
  509. * channel.
  510. */
  511. set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
  512. hv_set_drvdata(dev, srv);
  513. ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
  514. HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
  515. dev->channel);
  516. if (ret)
  517. goto error;
  518. if (srv->util_init_transport) {
  519. ret = srv->util_init_transport();
  520. if (ret) {
  521. vmbus_close(dev->channel);
  522. goto error;
  523. }
  524. }
  525. return 0;
  526. error:
  527. if (srv->util_deinit)
  528. srv->util_deinit();
  529. error1:
  530. kfree(srv->recv_buffer);
  531. return ret;
  532. }
  533. static void util_remove(struct hv_device *dev)
  534. {
  535. struct hv_util_service *srv = hv_get_drvdata(dev);
  536. if (srv->util_deinit)
  537. srv->util_deinit();
  538. vmbus_close(dev->channel);
  539. kfree(srv->recv_buffer);
  540. }
  541. /*
  542. * When we're in util_suspend(), all the userspace processes have been frozen
  543. * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
  544. * after the whole resume procedure, including util_resume(), finishes.
  545. */
  546. static int util_suspend(struct hv_device *dev)
  547. {
  548. struct hv_util_service *srv = hv_get_drvdata(dev);
  549. int ret = 0;
  550. if (srv->util_pre_suspend) {
  551. ret = srv->util_pre_suspend();
  552. if (ret)
  553. return ret;
  554. }
  555. vmbus_close(dev->channel);
  556. return 0;
  557. }
  558. static int util_resume(struct hv_device *dev)
  559. {
  560. struct hv_util_service *srv = hv_get_drvdata(dev);
  561. int ret = 0;
  562. if (srv->util_pre_resume) {
  563. ret = srv->util_pre_resume();
  564. if (ret)
  565. return ret;
  566. }
  567. ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
  568. HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
  569. dev->channel);
  570. return ret;
  571. }
  572. static const struct hv_vmbus_device_id id_table[] = {
  573. /* Shutdown guid */
  574. { HV_SHUTDOWN_GUID,
  575. .driver_data = (unsigned long)&util_shutdown
  576. },
  577. /* Time synch guid */
  578. { HV_TS_GUID,
  579. .driver_data = (unsigned long)&util_timesynch
  580. },
  581. /* Heartbeat guid */
  582. { HV_HEART_BEAT_GUID,
  583. .driver_data = (unsigned long)&util_heartbeat
  584. },
  585. /* KVP guid */
  586. { HV_KVP_GUID,
  587. .driver_data = (unsigned long)&util_kvp
  588. },
  589. /* VSS GUID */
  590. { HV_VSS_GUID,
  591. .driver_data = (unsigned long)&util_vss
  592. },
  593. { },
  594. };
  595. MODULE_DEVICE_TABLE(vmbus, id_table);
  596. /* The one and only one */
  597. static struct hv_driver util_drv = {
  598. .name = "hv_utils",
  599. .id_table = id_table,
  600. .probe = util_probe,
  601. .remove = util_remove,
  602. .suspend = util_suspend,
  603. .resume = util_resume,
  604. .driver = {
  605. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  606. },
  607. };
  608. static int hv_ptp_enable(struct ptp_clock_info *info,
  609. struct ptp_clock_request *request, int on)
  610. {
  611. return -EOPNOTSUPP;
  612. }
  613. static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
  614. {
  615. return -EOPNOTSUPP;
  616. }
  617. static int hv_ptp_adjfine(struct ptp_clock_info *ptp, long delta)
  618. {
  619. return -EOPNOTSUPP;
  620. }
  621. static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  622. {
  623. return -EOPNOTSUPP;
  624. }
  625. static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
  626. {
  627. return hv_get_adj_host_time(ts);
  628. }
  629. static struct ptp_clock_info ptp_hyperv_info = {
  630. .name = "hyperv",
  631. .enable = hv_ptp_enable,
  632. .adjtime = hv_ptp_adjtime,
  633. .adjfine = hv_ptp_adjfine,
  634. .gettime64 = hv_ptp_gettime,
  635. .settime64 = hv_ptp_settime,
  636. .owner = THIS_MODULE,
  637. };
  638. static struct ptp_clock *hv_ptp_clock;
  639. static int hv_timesync_init(struct hv_util_service *srv)
  640. {
  641. spin_lock_init(&host_ts.lock);
  642. INIT_WORK(&adj_time_work, hv_set_host_time);
  643. /*
  644. * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
  645. * disabled but the driver is still useful without the PTP device
  646. * as it still handles the ICTIMESYNCFLAG_SYNC case.
  647. */
  648. hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
  649. if (IS_ERR_OR_NULL(hv_ptp_clock)) {
  650. pr_err("cannot register PTP clock: %d\n",
  651. PTR_ERR_OR_ZERO(hv_ptp_clock));
  652. hv_ptp_clock = NULL;
  653. }
  654. return 0;
  655. }
  656. static void hv_timesync_cancel_work(void)
  657. {
  658. cancel_work_sync(&adj_time_work);
  659. }
  660. static int hv_timesync_pre_suspend(void)
  661. {
  662. hv_timesync_cancel_work();
  663. return 0;
  664. }
  665. static void hv_timesync_deinit(void)
  666. {
  667. if (hv_ptp_clock)
  668. ptp_clock_unregister(hv_ptp_clock);
  669. hv_timesync_cancel_work();
  670. }
  671. static int __init init_hyperv_utils(void)
  672. {
  673. pr_info("Registering HyperV Utility Driver\n");
  674. return vmbus_driver_register(&util_drv);
  675. }
  676. static void exit_hyperv_utils(void)
  677. {
  678. pr_info("De-Registered HyperV Utility Driver\n");
  679. vmbus_driver_unregister(&util_drv);
  680. }
  681. module_init(init_hyperv_utils);
  682. module_exit(exit_hyperv_utils);
  683. MODULE_DESCRIPTION("Hyper-V Utilities");
  684. MODULE_LICENSE("GPL");