David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2010, Microsoft Corporation. |
| 4 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | * Authors: |
| 6 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 7 | * Hank Janssen <hjanssen@microsoft.com> |
| 8 | */ |
| 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 10 | |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/sysctl.h> |
| 16 | #include <linux/reboot.h> |
| 17 | #include <linux/hyperv.h> |
| 18 | #include <linux/clockchips.h> |
| 19 | #include <linux/ptp_clock_kernel.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 20 | #include <clocksource/hyperv_timer.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | #include <asm/mshyperv.h> |
| 22 | |
| 23 | #include "hyperv_vmbus.h" |
| 24 | |
| 25 | #define SD_MAJOR 3 |
| 26 | #define SD_MINOR 0 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 27 | #define SD_MINOR_1 1 |
| 28 | #define SD_MINOR_2 2 |
| 29 | #define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1) |
| 30 | #define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | #define SD_VERSION (SD_MAJOR << 16 | SD_MINOR) |
| 32 | |
| 33 | #define SD_MAJOR_1 1 |
| 34 | #define SD_VERSION_1 (SD_MAJOR_1 << 16 | SD_MINOR) |
| 35 | |
| 36 | #define TS_MAJOR 4 |
| 37 | #define TS_MINOR 0 |
| 38 | #define TS_VERSION (TS_MAJOR << 16 | TS_MINOR) |
| 39 | |
| 40 | #define TS_MAJOR_1 1 |
| 41 | #define TS_VERSION_1 (TS_MAJOR_1 << 16 | TS_MINOR) |
| 42 | |
| 43 | #define TS_MAJOR_3 3 |
| 44 | #define TS_VERSION_3 (TS_MAJOR_3 << 16 | TS_MINOR) |
| 45 | |
| 46 | #define HB_MAJOR 3 |
| 47 | #define HB_MINOR 0 |
| 48 | #define HB_VERSION (HB_MAJOR << 16 | HB_MINOR) |
| 49 | |
| 50 | #define HB_MAJOR_1 1 |
| 51 | #define HB_VERSION_1 (HB_MAJOR_1 << 16 | HB_MINOR) |
| 52 | |
| 53 | static int sd_srv_version; |
| 54 | static int ts_srv_version; |
| 55 | static int hb_srv_version; |
| 56 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 57 | #define SD_VER_COUNT 4 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | static const int sd_versions[] = { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | SD_VERSION_3_2, |
| 60 | SD_VERSION_3_1, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | SD_VERSION, |
| 62 | SD_VERSION_1 |
| 63 | }; |
| 64 | |
| 65 | #define TS_VER_COUNT 3 |
| 66 | static const int ts_versions[] = { |
| 67 | TS_VERSION, |
| 68 | TS_VERSION_3, |
| 69 | TS_VERSION_1 |
| 70 | }; |
| 71 | |
| 72 | #define HB_VER_COUNT 2 |
| 73 | static const int hb_versions[] = { |
| 74 | HB_VERSION, |
| 75 | HB_VERSION_1 |
| 76 | }; |
| 77 | |
| 78 | #define FW_VER_COUNT 2 |
| 79 | static const int fw_versions[] = { |
| 80 | UTIL_FW_VERSION, |
| 81 | UTIL_WS2K8_FW_VERSION |
| 82 | }; |
| 83 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 84 | /* |
| 85 | * Send the "hibernate" udev event in a thread context. |
| 86 | */ |
| 87 | struct hibernate_work_context { |
| 88 | struct work_struct work; |
| 89 | struct hv_device *dev; |
| 90 | }; |
| 91 | |
| 92 | static struct hibernate_work_context hibernate_context; |
| 93 | static bool hibernation_supported; |
| 94 | |
| 95 | static void send_hibernate_uevent(struct work_struct *work) |
| 96 | { |
| 97 | char *uevent_env[2] = { "EVENT=hibernate", NULL }; |
| 98 | struct hibernate_work_context *ctx; |
| 99 | |
| 100 | ctx = container_of(work, struct hibernate_work_context, work); |
| 101 | |
| 102 | kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env); |
| 103 | |
| 104 | pr_info("Sent hibernation uevent\n"); |
| 105 | } |
| 106 | |
| 107 | static int hv_shutdown_init(struct hv_util_service *srv) |
| 108 | { |
| 109 | struct vmbus_channel *channel = srv->channel; |
| 110 | |
| 111 | INIT_WORK(&hibernate_context.work, send_hibernate_uevent); |
| 112 | hibernate_context.dev = channel->device_obj; |
| 113 | |
| 114 | hibernation_supported = hv_is_hibernation_supported(); |
| 115 | |
| 116 | return 0; |
| 117 | } |
| 118 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | static void shutdown_onchannelcallback(void *context); |
| 120 | static struct hv_util_service util_shutdown = { |
| 121 | .util_cb = shutdown_onchannelcallback, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 122 | .util_init = hv_shutdown_init, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | }; |
| 124 | |
| 125 | static int hv_timesync_init(struct hv_util_service *srv); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 126 | static int hv_timesync_pre_suspend(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | static void hv_timesync_deinit(void); |
| 128 | |
| 129 | static void timesync_onchannelcallback(void *context); |
| 130 | static struct hv_util_service util_timesynch = { |
| 131 | .util_cb = timesync_onchannelcallback, |
| 132 | .util_init = hv_timesync_init, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 133 | .util_pre_suspend = hv_timesync_pre_suspend, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 134 | .util_deinit = hv_timesync_deinit, |
| 135 | }; |
| 136 | |
| 137 | static void heartbeat_onchannelcallback(void *context); |
| 138 | static struct hv_util_service util_heartbeat = { |
| 139 | .util_cb = heartbeat_onchannelcallback, |
| 140 | }; |
| 141 | |
| 142 | static struct hv_util_service util_kvp = { |
| 143 | .util_cb = hv_kvp_onchannelcallback, |
| 144 | .util_init = hv_kvp_init, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 145 | .util_pre_suspend = hv_kvp_pre_suspend, |
| 146 | .util_pre_resume = hv_kvp_pre_resume, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | .util_deinit = hv_kvp_deinit, |
| 148 | }; |
| 149 | |
| 150 | static struct hv_util_service util_vss = { |
| 151 | .util_cb = hv_vss_onchannelcallback, |
| 152 | .util_init = hv_vss_init, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 153 | .util_pre_suspend = hv_vss_pre_suspend, |
| 154 | .util_pre_resume = hv_vss_pre_resume, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | .util_deinit = hv_vss_deinit, |
| 156 | }; |
| 157 | |
| 158 | static struct hv_util_service util_fcopy = { |
| 159 | .util_cb = hv_fcopy_onchannelcallback, |
| 160 | .util_init = hv_fcopy_init, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 161 | .util_pre_suspend = hv_fcopy_pre_suspend, |
| 162 | .util_pre_resume = hv_fcopy_pre_resume, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | .util_deinit = hv_fcopy_deinit, |
| 164 | }; |
| 165 | |
| 166 | static void perform_shutdown(struct work_struct *dummy) |
| 167 | { |
| 168 | orderly_poweroff(true); |
| 169 | } |
| 170 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 171 | static void perform_restart(struct work_struct *dummy) |
| 172 | { |
| 173 | orderly_reboot(); |
| 174 | } |
| 175 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | /* |
| 177 | * Perform the shutdown operation in a thread context. |
| 178 | */ |
| 179 | static DECLARE_WORK(shutdown_work, perform_shutdown); |
| 180 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 181 | /* |
| 182 | * Perform the restart operation in a thread context. |
| 183 | */ |
| 184 | static DECLARE_WORK(restart_work, perform_restart); |
| 185 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 186 | static void shutdown_onchannelcallback(void *context) |
| 187 | { |
| 188 | struct vmbus_channel *channel = context; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 189 | struct work_struct *work = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 190 | u32 recvlen; |
| 191 | u64 requestid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | u8 *shut_txf_buf = util_shutdown.recv_buffer; |
| 193 | |
| 194 | struct shutdown_msg_data *shutdown_msg; |
| 195 | |
| 196 | struct icmsg_hdr *icmsghdrp; |
| 197 | |
| 198 | vmbus_recvpacket(channel, shut_txf_buf, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 199 | HV_HYP_PAGE_SIZE, &recvlen, &requestid); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | |
| 201 | if (recvlen > 0) { |
| 202 | icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[ |
| 203 | sizeof(struct vmbuspipe_hdr)]; |
| 204 | |
| 205 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
| 206 | if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf, |
| 207 | fw_versions, FW_VER_COUNT, |
| 208 | sd_versions, SD_VER_COUNT, |
| 209 | NULL, &sd_srv_version)) { |
| 210 | pr_info("Shutdown IC version %d.%d\n", |
| 211 | sd_srv_version >> 16, |
| 212 | sd_srv_version & 0xFFFF); |
| 213 | } |
| 214 | } else { |
| 215 | shutdown_msg = |
| 216 | (struct shutdown_msg_data *)&shut_txf_buf[ |
| 217 | sizeof(struct vmbuspipe_hdr) + |
| 218 | sizeof(struct icmsg_hdr)]; |
| 219 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 220 | /* |
| 221 | * shutdown_msg->flags can be 0(shut down), 2(reboot), |
| 222 | * or 4(hibernate). It may bitwise-OR 1, which means |
| 223 | * performing the request by force. Linux always tries |
| 224 | * to perform the request by force. |
| 225 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 226 | switch (shutdown_msg->flags) { |
| 227 | case 0: |
| 228 | case 1: |
| 229 | icmsghdrp->status = HV_S_OK; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 230 | work = &shutdown_work; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | pr_info("Shutdown request received -" |
| 232 | " graceful shutdown initiated\n"); |
| 233 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 234 | case 2: |
| 235 | case 3: |
| 236 | icmsghdrp->status = HV_S_OK; |
| 237 | work = &restart_work; |
| 238 | pr_info("Restart request received -" |
| 239 | " graceful restart initiated\n"); |
| 240 | break; |
| 241 | case 4: |
| 242 | case 5: |
| 243 | pr_info("Hibernation request received\n"); |
| 244 | icmsghdrp->status = hibernation_supported ? |
| 245 | HV_S_OK : HV_E_FAIL; |
| 246 | if (hibernation_supported) |
| 247 | work = &hibernate_context.work; |
| 248 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 249 | default: |
| 250 | icmsghdrp->status = HV_E_FAIL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 251 | pr_info("Shutdown request received -" |
| 252 | " Invalid request\n"); |
| 253 | break; |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
| 258 | | ICMSGHDRFLAG_RESPONSE; |
| 259 | |
| 260 | vmbus_sendpacket(channel, shut_txf_buf, |
| 261 | recvlen, requestid, |
| 262 | VM_PKT_DATA_INBAND, 0); |
| 263 | } |
| 264 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 265 | if (work) |
| 266 | schedule_work(work); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | /* |
| 270 | * Set the host time in a process context. |
| 271 | */ |
| 272 | static struct work_struct adj_time_work; |
| 273 | |
| 274 | /* |
| 275 | * The last time sample, received from the host. PTP device responds to |
| 276 | * requests by using this data and the current partition-wide time reference |
| 277 | * count. |
| 278 | */ |
| 279 | static struct { |
| 280 | u64 host_time; |
| 281 | u64 ref_time; |
| 282 | spinlock_t lock; |
| 283 | } host_ts; |
| 284 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 285 | static inline u64 reftime_to_ns(u64 reftime) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 286 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 287 | return (reftime - WLTIMEDELTA) * 100; |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Hard coded threshold for host timesync delay: 600 seconds |
| 292 | */ |
| 293 | static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC; |
| 294 | |
| 295 | static int hv_get_adj_host_time(struct timespec64 *ts) |
| 296 | { |
| 297 | u64 newtime, reftime, timediff_adj; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 298 | unsigned long flags; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 299 | int ret = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 300 | |
| 301 | spin_lock_irqsave(&host_ts.lock, flags); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 302 | reftime = hv_read_reference_counter(); |
| 303 | |
| 304 | /* |
| 305 | * We need to let the caller know that last update from host |
| 306 | * is older than the max allowable threshold. clock_gettime() |
| 307 | * and PTP ioctl do not have a documented error that we could |
| 308 | * return for this specific case. Use ESTALE to report this. |
| 309 | */ |
| 310 | timediff_adj = reftime - host_ts.ref_time; |
| 311 | if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) { |
| 312 | pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n", |
| 313 | (timediff_adj * 100)); |
| 314 | ret = -ESTALE; |
| 315 | } |
| 316 | |
| 317 | newtime = host_ts.host_time + timediff_adj; |
| 318 | *ts = ns_to_timespec64(reftime_to_ns(newtime)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 319 | spin_unlock_irqrestore(&host_ts.lock, flags); |
| 320 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 321 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | static void hv_set_host_time(struct work_struct *work) |
| 325 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 326 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 327 | struct timespec64 ts; |
| 328 | |
| 329 | if (!hv_get_adj_host_time(&ts)) |
| 330 | do_settimeofday64(&ts); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 331 | } |
| 332 | |
| 333 | /* |
| 334 | * Synchronize time with host after reboot, restore, etc. |
| 335 | * |
| 336 | * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM. |
| 337 | * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time |
| 338 | * message after the timesync channel is opened. Since the hv_utils module is |
| 339 | * loaded after hv_vmbus, the first message is usually missed. This bit is |
| 340 | * considered a hard request to discipline the clock. |
| 341 | * |
| 342 | * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is |
| 343 | * typically used as a hint to the guest. The guest is under no obligation |
| 344 | * to discipline the clock. |
| 345 | */ |
| 346 | static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags) |
| 347 | { |
| 348 | unsigned long flags; |
| 349 | u64 cur_reftime; |
| 350 | |
| 351 | /* |
| 352 | * Save the adjusted time sample from the host and the snapshot |
| 353 | * of the current system time. |
| 354 | */ |
| 355 | spin_lock_irqsave(&host_ts.lock, flags); |
| 356 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 357 | cur_reftime = hv_read_reference_counter(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 358 | host_ts.host_time = hosttime; |
| 359 | host_ts.ref_time = cur_reftime; |
| 360 | |
| 361 | /* |
| 362 | * TimeSync v4 messages contain reference time (guest's Hyper-V |
| 363 | * clocksource read when the time sample was generated), we can |
| 364 | * improve the precision by adding the delta between now and the |
| 365 | * time of generation. For older protocols we set |
| 366 | * reftime == cur_reftime on call. |
| 367 | */ |
| 368 | host_ts.host_time += (cur_reftime - reftime); |
| 369 | |
| 370 | spin_unlock_irqrestore(&host_ts.lock, flags); |
| 371 | |
| 372 | /* Schedule work to do do_settimeofday64() */ |
| 373 | if (adj_flags & ICTIMESYNCFLAG_SYNC) |
| 374 | schedule_work(&adj_time_work); |
| 375 | } |
| 376 | |
| 377 | /* |
| 378 | * Time Sync Channel message handler. |
| 379 | */ |
| 380 | static void timesync_onchannelcallback(void *context) |
| 381 | { |
| 382 | struct vmbus_channel *channel = context; |
| 383 | u32 recvlen; |
| 384 | u64 requestid; |
| 385 | struct icmsg_hdr *icmsghdrp; |
| 386 | struct ictimesync_data *timedatap; |
| 387 | struct ictimesync_ref_data *refdata; |
| 388 | u8 *time_txf_buf = util_timesynch.recv_buffer; |
| 389 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 390 | /* |
| 391 | * Drain the ring buffer and use the last packet to update |
| 392 | * host_ts |
| 393 | */ |
| 394 | while (1) { |
| 395 | int ret = vmbus_recvpacket(channel, time_txf_buf, |
| 396 | HV_HYP_PAGE_SIZE, &recvlen, |
| 397 | &requestid); |
| 398 | if (ret) { |
| 399 | pr_warn_once("TimeSync IC pkt recv failed (Err: %d)\n", |
| 400 | ret); |
| 401 | break; |
| 402 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 403 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 404 | if (!recvlen) |
| 405 | break; |
| 406 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 407 | icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[ |
| 408 | sizeof(struct vmbuspipe_hdr)]; |
| 409 | |
| 410 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
| 411 | if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf, |
| 412 | fw_versions, FW_VER_COUNT, |
| 413 | ts_versions, TS_VER_COUNT, |
| 414 | NULL, &ts_srv_version)) { |
| 415 | pr_info("TimeSync IC version %d.%d\n", |
| 416 | ts_srv_version >> 16, |
| 417 | ts_srv_version & 0xFFFF); |
| 418 | } |
| 419 | } else { |
| 420 | if (ts_srv_version > TS_VERSION_3) { |
| 421 | refdata = (struct ictimesync_ref_data *) |
| 422 | &time_txf_buf[ |
| 423 | sizeof(struct vmbuspipe_hdr) + |
| 424 | sizeof(struct icmsg_hdr)]; |
| 425 | |
| 426 | adj_guesttime(refdata->parenttime, |
| 427 | refdata->vmreferencetime, |
| 428 | refdata->flags); |
| 429 | } else { |
| 430 | timedatap = (struct ictimesync_data *) |
| 431 | &time_txf_buf[ |
| 432 | sizeof(struct vmbuspipe_hdr) + |
| 433 | sizeof(struct icmsg_hdr)]; |
| 434 | adj_guesttime(timedatap->parenttime, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 435 | hv_read_reference_counter(), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 436 | timedatap->flags); |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
| 441 | | ICMSGHDRFLAG_RESPONSE; |
| 442 | |
| 443 | vmbus_sendpacket(channel, time_txf_buf, |
| 444 | recvlen, requestid, |
| 445 | VM_PKT_DATA_INBAND, 0); |
| 446 | } |
| 447 | } |
| 448 | |
| 449 | /* |
| 450 | * Heartbeat functionality. |
| 451 | * Every two seconds, Hyper-V send us a heartbeat request message. |
| 452 | * we respond to this message, and Hyper-V knows we are alive. |
| 453 | */ |
| 454 | static void heartbeat_onchannelcallback(void *context) |
| 455 | { |
| 456 | struct vmbus_channel *channel = context; |
| 457 | u32 recvlen; |
| 458 | u64 requestid; |
| 459 | struct icmsg_hdr *icmsghdrp; |
| 460 | struct heartbeat_msg_data *heartbeat_msg; |
| 461 | u8 *hbeat_txf_buf = util_heartbeat.recv_buffer; |
| 462 | |
| 463 | while (1) { |
| 464 | |
| 465 | vmbus_recvpacket(channel, hbeat_txf_buf, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 466 | HV_HYP_PAGE_SIZE, &recvlen, &requestid); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 467 | |
| 468 | if (!recvlen) |
| 469 | break; |
| 470 | |
| 471 | icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[ |
| 472 | sizeof(struct vmbuspipe_hdr)]; |
| 473 | |
| 474 | if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { |
| 475 | if (vmbus_prep_negotiate_resp(icmsghdrp, |
| 476 | hbeat_txf_buf, |
| 477 | fw_versions, FW_VER_COUNT, |
| 478 | hb_versions, HB_VER_COUNT, |
| 479 | NULL, &hb_srv_version)) { |
| 480 | |
| 481 | pr_info("Heartbeat IC version %d.%d\n", |
| 482 | hb_srv_version >> 16, |
| 483 | hb_srv_version & 0xFFFF); |
| 484 | } |
| 485 | } else { |
| 486 | heartbeat_msg = |
| 487 | (struct heartbeat_msg_data *)&hbeat_txf_buf[ |
| 488 | sizeof(struct vmbuspipe_hdr) + |
| 489 | sizeof(struct icmsg_hdr)]; |
| 490 | |
| 491 | heartbeat_msg->seq_num += 1; |
| 492 | } |
| 493 | |
| 494 | icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
| 495 | | ICMSGHDRFLAG_RESPONSE; |
| 496 | |
| 497 | vmbus_sendpacket(channel, hbeat_txf_buf, |
| 498 | recvlen, requestid, |
| 499 | VM_PKT_DATA_INBAND, 0); |
| 500 | } |
| 501 | } |
| 502 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 503 | #define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE) |
| 504 | #define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE) |
| 505 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 506 | static int util_probe(struct hv_device *dev, |
| 507 | const struct hv_vmbus_device_id *dev_id) |
| 508 | { |
| 509 | struct hv_util_service *srv = |
| 510 | (struct hv_util_service *)dev_id->driver_data; |
| 511 | int ret; |
| 512 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 513 | srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 514 | if (!srv->recv_buffer) |
| 515 | return -ENOMEM; |
| 516 | srv->channel = dev->channel; |
| 517 | if (srv->util_init) { |
| 518 | ret = srv->util_init(srv); |
| 519 | if (ret) { |
| 520 | ret = -ENODEV; |
| 521 | goto error1; |
| 522 | } |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * The set of services managed by the util driver are not performance |
| 527 | * critical and do not need batched reading. Furthermore, some services |
| 528 | * such as KVP can only handle one message from the host at a time. |
| 529 | * Turn off batched reading for all util drivers before we open the |
| 530 | * channel. |
| 531 | */ |
| 532 | set_channel_read_mode(dev->channel, HV_CALL_DIRECT); |
| 533 | |
| 534 | hv_set_drvdata(dev, srv); |
| 535 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 536 | ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE, |
| 537 | HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb, |
| 538 | dev->channel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 539 | if (ret) |
| 540 | goto error; |
| 541 | |
| 542 | return 0; |
| 543 | |
| 544 | error: |
| 545 | if (srv->util_deinit) |
| 546 | srv->util_deinit(); |
| 547 | error1: |
| 548 | kfree(srv->recv_buffer); |
| 549 | return ret; |
| 550 | } |
| 551 | |
| 552 | static int util_remove(struct hv_device *dev) |
| 553 | { |
| 554 | struct hv_util_service *srv = hv_get_drvdata(dev); |
| 555 | |
| 556 | if (srv->util_deinit) |
| 557 | srv->util_deinit(); |
| 558 | vmbus_close(dev->channel); |
| 559 | kfree(srv->recv_buffer); |
| 560 | |
| 561 | return 0; |
| 562 | } |
| 563 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 564 | /* |
| 565 | * When we're in util_suspend(), all the userspace processes have been frozen |
| 566 | * (refer to hibernate() -> freeze_processes()). The userspace is thawed only |
| 567 | * after the whole resume procedure, including util_resume(), finishes. |
| 568 | */ |
| 569 | static int util_suspend(struct hv_device *dev) |
| 570 | { |
| 571 | struct hv_util_service *srv = hv_get_drvdata(dev); |
| 572 | int ret = 0; |
| 573 | |
| 574 | if (srv->util_pre_suspend) { |
| 575 | ret = srv->util_pre_suspend(); |
| 576 | if (ret) |
| 577 | return ret; |
| 578 | } |
| 579 | |
| 580 | vmbus_close(dev->channel); |
| 581 | |
| 582 | return 0; |
| 583 | } |
| 584 | |
| 585 | static int util_resume(struct hv_device *dev) |
| 586 | { |
| 587 | struct hv_util_service *srv = hv_get_drvdata(dev); |
| 588 | int ret = 0; |
| 589 | |
| 590 | if (srv->util_pre_resume) { |
| 591 | ret = srv->util_pre_resume(); |
| 592 | if (ret) |
| 593 | return ret; |
| 594 | } |
| 595 | |
| 596 | ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE, |
| 597 | HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb, |
| 598 | dev->channel); |
| 599 | return ret; |
| 600 | } |
| 601 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 602 | static const struct hv_vmbus_device_id id_table[] = { |
| 603 | /* Shutdown guid */ |
| 604 | { HV_SHUTDOWN_GUID, |
| 605 | .driver_data = (unsigned long)&util_shutdown |
| 606 | }, |
| 607 | /* Time synch guid */ |
| 608 | { HV_TS_GUID, |
| 609 | .driver_data = (unsigned long)&util_timesynch |
| 610 | }, |
| 611 | /* Heartbeat guid */ |
| 612 | { HV_HEART_BEAT_GUID, |
| 613 | .driver_data = (unsigned long)&util_heartbeat |
| 614 | }, |
| 615 | /* KVP guid */ |
| 616 | { HV_KVP_GUID, |
| 617 | .driver_data = (unsigned long)&util_kvp |
| 618 | }, |
| 619 | /* VSS GUID */ |
| 620 | { HV_VSS_GUID, |
| 621 | .driver_data = (unsigned long)&util_vss |
| 622 | }, |
| 623 | /* File copy GUID */ |
| 624 | { HV_FCOPY_GUID, |
| 625 | .driver_data = (unsigned long)&util_fcopy |
| 626 | }, |
| 627 | { }, |
| 628 | }; |
| 629 | |
| 630 | MODULE_DEVICE_TABLE(vmbus, id_table); |
| 631 | |
| 632 | /* The one and only one */ |
| 633 | static struct hv_driver util_drv = { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 634 | .name = "hv_utils", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 635 | .id_table = id_table, |
| 636 | .probe = util_probe, |
| 637 | .remove = util_remove, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 638 | .suspend = util_suspend, |
| 639 | .resume = util_resume, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 640 | .driver = { |
| 641 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, |
| 642 | }, |
| 643 | }; |
| 644 | |
| 645 | static int hv_ptp_enable(struct ptp_clock_info *info, |
| 646 | struct ptp_clock_request *request, int on) |
| 647 | { |
| 648 | return -EOPNOTSUPP; |
| 649 | } |
| 650 | |
| 651 | static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts) |
| 652 | { |
| 653 | return -EOPNOTSUPP; |
| 654 | } |
| 655 | |
| 656 | static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) |
| 657 | { |
| 658 | return -EOPNOTSUPP; |
| 659 | } |
| 660 | static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
| 661 | { |
| 662 | return -EOPNOTSUPP; |
| 663 | } |
| 664 | |
| 665 | static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) |
| 666 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 667 | return hv_get_adj_host_time(ts); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 668 | } |
| 669 | |
| 670 | static struct ptp_clock_info ptp_hyperv_info = { |
| 671 | .name = "hyperv", |
| 672 | .enable = hv_ptp_enable, |
| 673 | .adjtime = hv_ptp_adjtime, |
| 674 | .adjfreq = hv_ptp_adjfreq, |
| 675 | .gettime64 = hv_ptp_gettime, |
| 676 | .settime64 = hv_ptp_settime, |
| 677 | .owner = THIS_MODULE, |
| 678 | }; |
| 679 | |
| 680 | static struct ptp_clock *hv_ptp_clock; |
| 681 | |
| 682 | static int hv_timesync_init(struct hv_util_service *srv) |
| 683 | { |
| 684 | /* TimeSync requires Hyper-V clocksource. */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 685 | if (!hv_read_reference_counter) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 686 | return -ENODEV; |
| 687 | |
| 688 | spin_lock_init(&host_ts.lock); |
| 689 | |
| 690 | INIT_WORK(&adj_time_work, hv_set_host_time); |
| 691 | |
| 692 | /* |
| 693 | * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is |
| 694 | * disabled but the driver is still useful without the PTP device |
| 695 | * as it still handles the ICTIMESYNCFLAG_SYNC case. |
| 696 | */ |
| 697 | hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL); |
| 698 | if (IS_ERR_OR_NULL(hv_ptp_clock)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 699 | pr_err("cannot register PTP clock: %d\n", |
| 700 | PTR_ERR_OR_ZERO(hv_ptp_clock)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 701 | hv_ptp_clock = NULL; |
| 702 | } |
| 703 | |
| 704 | return 0; |
| 705 | } |
| 706 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 707 | static void hv_timesync_cancel_work(void) |
| 708 | { |
| 709 | cancel_work_sync(&adj_time_work); |
| 710 | } |
| 711 | |
| 712 | static int hv_timesync_pre_suspend(void) |
| 713 | { |
| 714 | hv_timesync_cancel_work(); |
| 715 | return 0; |
| 716 | } |
| 717 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 718 | static void hv_timesync_deinit(void) |
| 719 | { |
| 720 | if (hv_ptp_clock) |
| 721 | ptp_clock_unregister(hv_ptp_clock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 722 | |
| 723 | hv_timesync_cancel_work(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 724 | } |
| 725 | |
| 726 | static int __init init_hyperv_utils(void) |
| 727 | { |
| 728 | pr_info("Registering HyperV Utility Driver\n"); |
| 729 | |
| 730 | return vmbus_driver_register(&util_drv); |
| 731 | } |
| 732 | |
| 733 | static void exit_hyperv_utils(void) |
| 734 | { |
| 735 | pr_info("De-Registered HyperV Utility Driver\n"); |
| 736 | |
| 737 | vmbus_driver_unregister(&util_drv); |
| 738 | } |
| 739 | |
| 740 | module_init(init_hyperv_utils); |
| 741 | module_exit(exit_hyperv_utils); |
| 742 | |
| 743 | MODULE_DESCRIPTION("Hyper-V Utilities"); |
| 744 | MODULE_LICENSE("GPL"); |