Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6 | * (C) Copyright 2020 Hewlett Packard Enterprise Development LP |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. |
| 8 | */ |
| 9 | |
| 10 | /* |
| 11 | * Cross Partition (XP) base. |
| 12 | * |
| 13 | * XP provides a base from which its users can interact |
| 14 | * with XPC, yet not be dependent on XPC. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/device.h> |
| 20 | #include "xp.h" |
| 21 | |
| 22 | /* define the XP debug device structures to be used with dev_dbg() et al */ |
| 23 | |
| 24 | struct device_driver xp_dbg_name = { |
| 25 | .name = "xp" |
| 26 | }; |
| 27 | |
| 28 | struct device xp_dbg_subname = { |
| 29 | .init_name = "", /* set to "" */ |
| 30 | .driver = &xp_dbg_name |
| 31 | }; |
| 32 | |
| 33 | struct device *xp = &xp_dbg_subname; |
| 34 | |
| 35 | /* max #of partitions possible */ |
| 36 | short xp_max_npartitions; |
| 37 | EXPORT_SYMBOL_GPL(xp_max_npartitions); |
| 38 | |
| 39 | short xp_partition_id; |
| 40 | EXPORT_SYMBOL_GPL(xp_partition_id); |
| 41 | |
| 42 | u8 xp_region_size; |
| 43 | EXPORT_SYMBOL_GPL(xp_region_size); |
| 44 | |
| 45 | unsigned long (*xp_pa) (void *addr); |
| 46 | EXPORT_SYMBOL_GPL(xp_pa); |
| 47 | |
| 48 | unsigned long (*xp_socket_pa) (unsigned long gpa); |
| 49 | EXPORT_SYMBOL_GPL(xp_socket_pa); |
| 50 | |
| 51 | enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, |
| 52 | const unsigned long src_gpa, size_t len); |
| 53 | EXPORT_SYMBOL_GPL(xp_remote_memcpy); |
| 54 | |
| 55 | int (*xp_cpu_to_nasid) (int cpuid); |
| 56 | EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); |
| 57 | |
| 58 | enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr, |
| 59 | unsigned long size); |
| 60 | EXPORT_SYMBOL_GPL(xp_expand_memprotect); |
| 61 | enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr, |
| 62 | unsigned long size); |
| 63 | EXPORT_SYMBOL_GPL(xp_restrict_memprotect); |
| 64 | |
| 65 | /* |
| 66 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level |
| 67 | * users of XPC. |
| 68 | */ |
| 69 | struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; |
| 70 | EXPORT_SYMBOL_GPL(xpc_registrations); |
| 71 | |
| 72 | /* |
| 73 | * Initialize the XPC interface to NULL to indicate that XPC isn't loaded. |
| 74 | */ |
| 75 | struct xpc_interface xpc_interface = { }; |
| 76 | EXPORT_SYMBOL_GPL(xpc_interface); |
| 77 | |
| 78 | /* |
| 79 | * XPC calls this when it (the XPC module) has been loaded. |
| 80 | */ |
| 81 | void |
| 82 | xpc_set_interface(void (*connect) (int), |
| 83 | void (*disconnect) (int), |
| 84 | enum xp_retval (*send) (short, int, u32, void *, u16), |
| 85 | enum xp_retval (*send_notify) (short, int, u32, void *, u16, |
| 86 | xpc_notify_func, void *), |
| 87 | void (*received) (short, int, void *), |
| 88 | enum xp_retval (*partid_to_nasids) (short, void *)) |
| 89 | { |
| 90 | xpc_interface.connect = connect; |
| 91 | xpc_interface.disconnect = disconnect; |
| 92 | xpc_interface.send = send; |
| 93 | xpc_interface.send_notify = send_notify; |
| 94 | xpc_interface.received = received; |
| 95 | xpc_interface.partid_to_nasids = partid_to_nasids; |
| 96 | } |
| 97 | EXPORT_SYMBOL_GPL(xpc_set_interface); |
| 98 | |
| 99 | /* |
| 100 | * XPC calls this when it (the XPC module) is being unloaded. |
| 101 | */ |
| 102 | void |
| 103 | xpc_clear_interface(void) |
| 104 | { |
| 105 | memset(&xpc_interface, 0, sizeof(xpc_interface)); |
| 106 | } |
| 107 | EXPORT_SYMBOL_GPL(xpc_clear_interface); |
| 108 | |
| 109 | /* |
| 110 | * Register for automatic establishment of a channel connection whenever |
| 111 | * a partition comes up. |
| 112 | * |
| 113 | * Arguments: |
| 114 | * |
| 115 | * ch_number - channel # to register for connection. |
| 116 | * func - function to call for asynchronous notification of channel |
| 117 | * state changes (i.e., connection, disconnection, error) and |
| 118 | * the arrival of incoming messages. |
| 119 | * key - pointer to optional user-defined value that gets passed back |
| 120 | * to the user on any callouts made to func. |
| 121 | * payload_size - size in bytes of the XPC message's payload area which |
| 122 | * contains a user-defined message. The user should make |
| 123 | * this large enough to hold their largest message. |
| 124 | * nentries - max #of XPC message entries a message queue can contain. |
| 125 | * The actual number, which is determined when a connection |
| 126 | * is established and may be less then requested, will be |
| 127 | * passed to the user via the xpConnected callout. |
| 128 | * assigned_limit - max number of kthreads allowed to be processing |
| 129 | * messages (per connection) at any given instant. |
| 130 | * idle_limit - max number of kthreads allowed to be idle at any given |
| 131 | * instant. |
| 132 | */ |
| 133 | enum xp_retval |
| 134 | xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, |
| 135 | u16 nentries, u32 assigned_limit, u32 idle_limit) |
| 136 | { |
| 137 | struct xpc_registration *registration; |
| 138 | |
| 139 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
| 140 | DBUG_ON(payload_size == 0 || nentries == 0); |
| 141 | DBUG_ON(func == NULL); |
| 142 | DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); |
| 143 | |
| 144 | if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE) |
| 145 | return xpPayloadTooBig; |
| 146 | |
| 147 | registration = &xpc_registrations[ch_number]; |
| 148 | |
| 149 | if (mutex_lock_interruptible(®istration->mutex) != 0) |
| 150 | return xpInterrupted; |
| 151 | |
| 152 | /* if XPC_CHANNEL_REGISTERED(ch_number) */ |
| 153 | if (registration->func != NULL) { |
| 154 | mutex_unlock(®istration->mutex); |
| 155 | return xpAlreadyRegistered; |
| 156 | } |
| 157 | |
| 158 | /* register the channel for connection */ |
| 159 | registration->entry_size = XPC_MSG_SIZE(payload_size); |
| 160 | registration->nentries = nentries; |
| 161 | registration->assigned_limit = assigned_limit; |
| 162 | registration->idle_limit = idle_limit; |
| 163 | registration->key = key; |
| 164 | registration->func = func; |
| 165 | |
| 166 | mutex_unlock(®istration->mutex); |
| 167 | |
| 168 | if (xpc_interface.connect) |
| 169 | xpc_interface.connect(ch_number); |
| 170 | |
| 171 | return xpSuccess; |
| 172 | } |
| 173 | EXPORT_SYMBOL_GPL(xpc_connect); |
| 174 | |
| 175 | /* |
| 176 | * Remove the registration for automatic connection of the specified channel |
| 177 | * when a partition comes up. |
| 178 | * |
| 179 | * Before returning this xpc_disconnect() will wait for all connections on the |
| 180 | * specified channel have been closed/torndown. So the caller can be assured |
| 181 | * that they will not be receiving any more callouts from XPC to their |
| 182 | * function registered via xpc_connect(). |
| 183 | * |
| 184 | * Arguments: |
| 185 | * |
| 186 | * ch_number - channel # to unregister. |
| 187 | */ |
| 188 | void |
| 189 | xpc_disconnect(int ch_number) |
| 190 | { |
| 191 | struct xpc_registration *registration; |
| 192 | |
| 193 | DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); |
| 194 | |
| 195 | registration = &xpc_registrations[ch_number]; |
| 196 | |
| 197 | /* |
| 198 | * We've decided not to make this a down_interruptible(), since we |
| 199 | * figured XPC's users will just turn around and call xpc_disconnect() |
| 200 | * again anyways, so we might as well wait, if need be. |
| 201 | */ |
| 202 | mutex_lock(®istration->mutex); |
| 203 | |
| 204 | /* if !XPC_CHANNEL_REGISTERED(ch_number) */ |
| 205 | if (registration->func == NULL) { |
| 206 | mutex_unlock(®istration->mutex); |
| 207 | return; |
| 208 | } |
| 209 | |
| 210 | /* remove the connection registration for the specified channel */ |
| 211 | registration->func = NULL; |
| 212 | registration->key = NULL; |
| 213 | registration->nentries = 0; |
| 214 | registration->entry_size = 0; |
| 215 | registration->assigned_limit = 0; |
| 216 | registration->idle_limit = 0; |
| 217 | |
| 218 | if (xpc_interface.disconnect) |
| 219 | xpc_interface.disconnect(ch_number); |
| 220 | |
| 221 | mutex_unlock(®istration->mutex); |
| 222 | |
| 223 | return; |
| 224 | } |
| 225 | EXPORT_SYMBOL_GPL(xpc_disconnect); |
| 226 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 227 | static int __init |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | xp_init(void) |
| 229 | { |
| 230 | enum xp_retval ret; |
| 231 | int ch_number; |
| 232 | |
| 233 | /* initialize the connection registration mutex */ |
| 234 | for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) |
| 235 | mutex_init(&xpc_registrations[ch_number].mutex); |
| 236 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 237 | if (is_uv_system()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | ret = xp_init_uv(); |
| 239 | else |
| 240 | ret = 0; |
| 241 | |
| 242 | if (ret != xpSuccess) |
| 243 | return ret; |
| 244 | |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | module_init(xp_init); |
| 249 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 250 | static void __exit |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 251 | xp_exit(void) |
| 252 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 253 | if (is_uv_system()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 254 | xp_exit_uv(); |
| 255 | } |
| 256 | |
| 257 | module_exit(xp_exit); |
| 258 | |
| 259 | MODULE_AUTHOR("Silicon Graphics, Inc."); |
| 260 | MODULE_DESCRIPTION("Cross Partition (XP) base"); |
| 261 | MODULE_LICENSE("GPL"); |