David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * |
| 4 | * Copyright (c) 2011, Microsoft Corporation. |
| 5 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | * Authors: |
| 7 | * Haiyang Zhang <haiyangz@microsoft.com> |
| 8 | * Hank Janssen <hjanssen@microsoft.com> |
| 9 | * K. Y. Srinivasan <kys@microsoft.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #ifndef _HYPERV_VMBUS_H |
| 13 | #define _HYPERV_VMBUS_H |
| 14 | |
| 15 | #include <linux/list.h> |
| 16 | #include <asm/sync_bitops.h> |
| 17 | #include <asm/hyperv-tlfs.h> |
| 18 | #include <linux/atomic.h> |
| 19 | #include <linux/hyperv.h> |
| 20 | #include <linux/interrupt.h> |
| 21 | |
| 22 | #include "hv_trace.h" |
| 23 | |
| 24 | /* |
| 25 | * Timeout for services such as KVP and fcopy. |
| 26 | */ |
| 27 | #define HV_UTIL_TIMEOUT 30 |
| 28 | |
| 29 | /* |
| 30 | * Timeout for guest-host handshake for services. |
| 31 | */ |
| 32 | #define HV_UTIL_NEGO_TIMEOUT 55 |
| 33 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 34 | |
| 35 | /* Definitions for the monitored notification facility */ |
| 36 | union hv_monitor_trigger_group { |
| 37 | u64 as_uint64; |
| 38 | struct { |
| 39 | u32 pending; |
| 40 | u32 armed; |
| 41 | }; |
| 42 | }; |
| 43 | |
| 44 | struct hv_monitor_parameter { |
| 45 | union hv_connection_id connectionid; |
| 46 | u16 flagnumber; |
| 47 | u16 rsvdz; |
| 48 | }; |
| 49 | |
| 50 | union hv_monitor_trigger_state { |
| 51 | u32 asu32; |
| 52 | |
| 53 | struct { |
| 54 | u32 group_enable:4; |
| 55 | u32 rsvdz:28; |
| 56 | }; |
| 57 | }; |
| 58 | |
| 59 | /* struct hv_monitor_page Layout */ |
| 60 | /* ------------------------------------------------------ */ |
| 61 | /* | 0 | TriggerState (4 bytes) | Rsvd1 (4 bytes) | */ |
| 62 | /* | 8 | TriggerGroup[0] | */ |
| 63 | /* | 10 | TriggerGroup[1] | */ |
| 64 | /* | 18 | TriggerGroup[2] | */ |
| 65 | /* | 20 | TriggerGroup[3] | */ |
| 66 | /* | 28 | Rsvd2[0] | */ |
| 67 | /* | 30 | Rsvd2[1] | */ |
| 68 | /* | 38 | Rsvd2[2] | */ |
| 69 | /* | 40 | NextCheckTime[0][0] | NextCheckTime[0][1] | */ |
| 70 | /* | ... | */ |
| 71 | /* | 240 | Latency[0][0..3] | */ |
| 72 | /* | 340 | Rsvz3[0] | */ |
| 73 | /* | 440 | Parameter[0][0] | */ |
| 74 | /* | 448 | Parameter[0][1] | */ |
| 75 | /* | ... | */ |
| 76 | /* | 840 | Rsvd4[0] | */ |
| 77 | /* ------------------------------------------------------ */ |
| 78 | struct hv_monitor_page { |
| 79 | union hv_monitor_trigger_state trigger_state; |
| 80 | u32 rsvdz1; |
| 81 | |
| 82 | union hv_monitor_trigger_group trigger_group[4]; |
| 83 | u64 rsvdz2[3]; |
| 84 | |
| 85 | s32 next_checktime[4][32]; |
| 86 | |
| 87 | u16 latency[4][32]; |
| 88 | u64 rsvdz3[32]; |
| 89 | |
| 90 | struct hv_monitor_parameter parameter[4][32]; |
| 91 | |
| 92 | u8 rsvdz4[1984]; |
| 93 | }; |
| 94 | |
| 95 | #define HV_HYPERCALL_PARAM_ALIGN sizeof(u64) |
| 96 | |
| 97 | /* Definition of the hv_post_message hypercall input structure. */ |
| 98 | struct hv_input_post_message { |
| 99 | union hv_connection_id connectionid; |
| 100 | u32 reserved; |
| 101 | u32 message_type; |
| 102 | u32 payload_size; |
| 103 | u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; |
| 104 | }; |
| 105 | |
| 106 | |
| 107 | enum { |
| 108 | VMBUS_MESSAGE_CONNECTION_ID = 1, |
| 109 | VMBUS_MESSAGE_CONNECTION_ID_4 = 4, |
| 110 | VMBUS_MESSAGE_PORT_ID = 1, |
| 111 | VMBUS_EVENT_CONNECTION_ID = 2, |
| 112 | VMBUS_EVENT_PORT_ID = 2, |
| 113 | VMBUS_MONITOR_CONNECTION_ID = 3, |
| 114 | VMBUS_MONITOR_PORT_ID = 3, |
| 115 | VMBUS_MESSAGE_SINT = 2, |
| 116 | }; |
| 117 | |
| 118 | /* |
| 119 | * Per cpu state for channel handling |
| 120 | */ |
| 121 | struct hv_per_cpu_context { |
| 122 | void *synic_message_page; |
| 123 | void *synic_event_page; |
| 124 | /* |
| 125 | * buffer to post messages to the host. |
| 126 | */ |
| 127 | void *post_msg_page; |
| 128 | |
| 129 | /* |
| 130 | * Starting with win8, we can take channel interrupts on any CPU; |
| 131 | * we will manage the tasklet that handles events messages on a per CPU |
| 132 | * basis. |
| 133 | */ |
| 134 | struct tasklet_struct msg_dpc; |
| 135 | |
| 136 | /* |
| 137 | * To optimize the mapping of relid to channel, maintain |
| 138 | * per-cpu list of the channels based on their CPU affinity. |
| 139 | */ |
| 140 | struct list_head chan_list; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | }; |
| 142 | |
| 143 | struct hv_context { |
| 144 | /* We only support running on top of Hyper-V |
| 145 | * So at this point this really can only contain the Hyper-V ID |
| 146 | */ |
| 147 | u64 guestid; |
| 148 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 149 | struct hv_per_cpu_context __percpu *cpu_context; |
| 150 | |
| 151 | /* |
| 152 | * To manage allocations in a NUMA node. |
| 153 | * Array indexed by numa node ID. |
| 154 | */ |
| 155 | struct cpumask *hv_numa_map; |
| 156 | }; |
| 157 | |
| 158 | extern struct hv_context hv_context; |
| 159 | |
| 160 | /* Hv Interface */ |
| 161 | |
| 162 | extern int hv_init(void); |
| 163 | |
| 164 | extern int hv_post_message(union hv_connection_id connection_id, |
| 165 | enum hv_message_type message_type, |
| 166 | void *payload, size_t payload_size); |
| 167 | |
| 168 | extern int hv_synic_alloc(void); |
| 169 | |
| 170 | extern void hv_synic_free(void); |
| 171 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 172 | extern void hv_synic_enable_regs(unsigned int cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | extern int hv_synic_init(unsigned int cpu); |
| 174 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 175 | extern void hv_synic_disable_regs(unsigned int cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | extern int hv_synic_cleanup(unsigned int cpu); |
| 177 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 178 | /* Interface */ |
| 179 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 180 | void hv_ringbuffer_pre_init(struct vmbus_channel *channel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 181 | |
| 182 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
| 183 | struct page *pages, u32 pagecnt); |
| 184 | |
| 185 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); |
| 186 | |
| 187 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
| 188 | const struct kvec *kv_list, u32 kv_count); |
| 189 | |
| 190 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
| 191 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
| 192 | u64 *requestid, bool raw); |
| 193 | |
| 194 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 195 | * The Maximum number of channels (16348) is determined by the size of the |
| 196 | * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to |
| 197 | * send endpoint interrupts, and the other is to receive endpoint interrupts. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 199 | #define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | |
| 201 | /* The value here must be in multiple of 32 */ |
| 202 | /* TODO: Need to make this configurable */ |
| 203 | #define MAX_NUM_CHANNELS_SUPPORTED 256 |
| 204 | |
| 205 | |
| 206 | enum vmbus_connect_state { |
| 207 | DISCONNECTED, |
| 208 | CONNECTING, |
| 209 | CONNECTED, |
| 210 | DISCONNECTING |
| 211 | }; |
| 212 | |
| 213 | #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT |
| 214 | |
| 215 | struct vmbus_connection { |
| 216 | /* |
| 217 | * CPU on which the initial host contact was made. |
| 218 | */ |
| 219 | int connect_cpu; |
| 220 | |
| 221 | u32 msg_conn_id; |
| 222 | |
| 223 | atomic_t offer_in_progress; |
| 224 | |
| 225 | enum vmbus_connect_state conn_state; |
| 226 | |
| 227 | atomic_t next_gpadl_handle; |
| 228 | |
| 229 | struct completion unload_event; |
| 230 | /* |
| 231 | * Represents channel interrupts. Each bit position represents a |
| 232 | * channel. When a channel sends an interrupt via VMBUS, it finds its |
| 233 | * bit in the sendInterruptPage, set it and calls Hv to generate a port |
| 234 | * event. The other end receives the port event and parse the |
| 235 | * recvInterruptPage to see which bit is set |
| 236 | */ |
| 237 | void *int_page; |
| 238 | void *send_int_page; |
| 239 | void *recv_int_page; |
| 240 | |
| 241 | /* |
| 242 | * 2 pages - 1st page for parent->child notification and 2nd |
| 243 | * is child->parent notification |
| 244 | */ |
| 245 | struct hv_monitor_page *monitor_pages[2]; |
| 246 | struct list_head chn_msg_list; |
| 247 | spinlock_t channelmsg_lock; |
| 248 | |
| 249 | /* List of channels */ |
| 250 | struct list_head chn_list; |
| 251 | struct mutex channel_mutex; |
| 252 | |
| 253 | /* |
| 254 | * An offer message is handled first on the work_queue, and then |
| 255 | * is further handled on handle_primary_chan_wq or |
| 256 | * handle_sub_chan_wq. |
| 257 | */ |
| 258 | struct workqueue_struct *work_queue; |
| 259 | struct workqueue_struct *handle_primary_chan_wq; |
| 260 | struct workqueue_struct *handle_sub_chan_wq; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 261 | |
| 262 | /* |
| 263 | * The number of sub-channels and hv_sock channels that should be |
| 264 | * cleaned up upon suspend: sub-channels will be re-created upon |
| 265 | * resume, and hv_sock channels should not survive suspend. |
| 266 | */ |
| 267 | atomic_t nr_chan_close_on_suspend; |
| 268 | /* |
| 269 | * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to |
| 270 | * drop to zero. |
| 271 | */ |
| 272 | struct completion ready_for_suspend_event; |
| 273 | |
| 274 | /* |
| 275 | * The number of primary channels that should be "fixed up" |
| 276 | * upon resume: these channels are re-offered upon resume, and some |
| 277 | * fields of the channel offers (i.e. child_relid and connection_id) |
| 278 | * can change, so the old offermsg must be fixed up, before the resume |
| 279 | * callbacks of the VSC drivers start to further touch the channels. |
| 280 | */ |
| 281 | atomic_t nr_chan_fixup_on_resume; |
| 282 | /* |
| 283 | * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to |
| 284 | * drop to zero. |
| 285 | */ |
| 286 | struct completion ready_for_resume_event; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 287 | }; |
| 288 | |
| 289 | |
| 290 | struct vmbus_msginfo { |
| 291 | /* Bookkeeping stuff */ |
| 292 | struct list_head msglist_entry; |
| 293 | |
| 294 | /* The message itself */ |
| 295 | unsigned char msg[0]; |
| 296 | }; |
| 297 | |
| 298 | |
| 299 | extern struct vmbus_connection vmbus_connection; |
| 300 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 301 | int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version); |
| 302 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 303 | static inline void vmbus_send_interrupt(u32 relid) |
| 304 | { |
| 305 | sync_set_bit(relid, vmbus_connection.send_int_page); |
| 306 | } |
| 307 | |
| 308 | enum vmbus_message_handler_type { |
| 309 | /* The related handler can sleep. */ |
| 310 | VMHT_BLOCKING = 0, |
| 311 | |
| 312 | /* The related handler must NOT sleep. */ |
| 313 | VMHT_NON_BLOCKING = 1, |
| 314 | }; |
| 315 | |
| 316 | struct vmbus_channel_message_table_entry { |
| 317 | enum vmbus_channel_message_type message_type; |
| 318 | enum vmbus_message_handler_type handler_type; |
| 319 | void (*message_handler)(struct vmbus_channel_message_header *msg); |
| 320 | }; |
| 321 | |
| 322 | extern const struct vmbus_channel_message_table_entry |
| 323 | channel_message_table[CHANNELMSG_COUNT]; |
| 324 | |
| 325 | |
| 326 | /* General vmbus interface */ |
| 327 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 328 | struct hv_device *vmbus_device_create(const guid_t *type, |
| 329 | const guid_t *instance, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 330 | struct vmbus_channel *channel); |
| 331 | |
| 332 | int vmbus_device_register(struct hv_device *child_device_obj); |
| 333 | void vmbus_device_unregister(struct hv_device *device_obj); |
| 334 | int vmbus_add_channel_kobj(struct hv_device *device_obj, |
| 335 | struct vmbus_channel *channel); |
| 336 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 337 | void vmbus_remove_channel_attr_group(struct vmbus_channel *channel); |
| 338 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 339 | struct vmbus_channel *relid2channel(u32 relid); |
| 340 | |
| 341 | void vmbus_free_channels(void); |
| 342 | |
| 343 | /* Connection interface */ |
| 344 | |
| 345 | int vmbus_connect(void); |
| 346 | void vmbus_disconnect(void); |
| 347 | |
| 348 | int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep); |
| 349 | |
| 350 | void vmbus_on_event(unsigned long data); |
| 351 | void vmbus_on_msg_dpc(unsigned long data); |
| 352 | |
| 353 | int hv_kvp_init(struct hv_util_service *srv); |
| 354 | void hv_kvp_deinit(void); |
| 355 | void hv_kvp_onchannelcallback(void *context); |
| 356 | |
| 357 | int hv_vss_init(struct hv_util_service *srv); |
| 358 | void hv_vss_deinit(void); |
| 359 | void hv_vss_onchannelcallback(void *context); |
| 360 | |
| 361 | int hv_fcopy_init(struct hv_util_service *srv); |
| 362 | void hv_fcopy_deinit(void); |
| 363 | void hv_fcopy_onchannelcallback(void *context); |
| 364 | void vmbus_initiate_unload(bool crash); |
| 365 | |
| 366 | static inline void hv_poll_channel(struct vmbus_channel *channel, |
| 367 | void (*cb)(void *)) |
| 368 | { |
| 369 | if (!channel) |
| 370 | return; |
| 371 | |
| 372 | if (in_interrupt() && (channel->target_cpu == smp_processor_id())) { |
| 373 | cb(channel); |
| 374 | return; |
| 375 | } |
| 376 | smp_call_function_single(channel->target_cpu, cb, channel, true); |
| 377 | } |
| 378 | |
| 379 | enum hvutil_device_state { |
| 380 | HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */ |
| 381 | HVUTIL_READY, /* userspace is registered */ |
| 382 | HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */ |
| 383 | HVUTIL_USERSPACE_REQ, /* request to userspace was sent */ |
| 384 | HVUTIL_USERSPACE_RECV, /* reply from userspace was received */ |
| 385 | HVUTIL_DEVICE_DYING, /* driver unload is in progress */ |
| 386 | }; |
| 387 | |
| 388 | #endif /* _HYPERV_VMBUS_H */ |