Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/mlx5/cmd.h> |
| 34 | #include <linux/mlx5/vport.h> |
| 35 | #include <rdma/ib_mad.h> |
| 36 | #include <rdma/ib_smi.h> |
| 37 | #include <rdma/ib_pma.h> |
| 38 | #include "mlx5_ib.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 39 | #include "cmd.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | |
| 41 | enum { |
| 42 | MLX5_IB_VENDOR_CLASS1 = 0x9, |
| 43 | MLX5_IB_VENDOR_CLASS2 = 0xa |
| 44 | }; |
| 45 | |
| 46 | static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num, |
| 47 | struct ib_mad *in_mad) |
| 48 | { |
| 49 | if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED && |
| 50 | in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
| 51 | return true; |
| 52 | return dev->mdev->port_caps[port_num - 1].has_smi; |
| 53 | } |
| 54 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 55 | static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, |
| 56 | int ignore_bkey, u8 port, const struct ib_wc *in_wc, |
| 57 | const struct ib_grh *in_grh, const void *in_mad, |
| 58 | void *response_mad) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | { |
| 60 | u8 op_modifier = 0; |
| 61 | |
| 62 | if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad)) |
| 63 | return -EPERM; |
| 64 | |
| 65 | /* Key check traps can't be generated unless we have in_wc to |
| 66 | * tell us where to send the trap. |
| 67 | */ |
| 68 | if (ignore_mkey || !in_wc) |
| 69 | op_modifier |= 0x1; |
| 70 | if (ignore_bkey || !in_wc) |
| 71 | op_modifier |= 0x2; |
| 72 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 73 | return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, |
| 74 | port); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
| 78 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
| 79 | const struct ib_mad *in_mad, struct ib_mad *out_mad) |
| 80 | { |
| 81 | u16 slid; |
| 82 | int err; |
| 83 | |
| 84 | slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); |
| 85 | |
| 86 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) |
| 87 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; |
| 88 | |
| 89 | if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
| 90 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { |
| 91 | if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && |
| 92 | in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && |
| 93 | in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) |
| 94 | return IB_MAD_RESULT_SUCCESS; |
| 95 | |
| 96 | /* Don't process SMInfo queries -- the SMA can't handle them. |
| 97 | */ |
| 98 | if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) |
| 99 | return IB_MAD_RESULT_SUCCESS; |
| 100 | } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || |
| 101 | in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 || |
| 102 | in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 || |
| 103 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { |
| 104 | if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && |
| 105 | in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) |
| 106 | return IB_MAD_RESULT_SUCCESS; |
| 107 | } else { |
| 108 | return IB_MAD_RESULT_SUCCESS; |
| 109 | } |
| 110 | |
| 111 | err = mlx5_MAD_IFC(to_mdev(ibdev), |
| 112 | mad_flags & IB_MAD_IGNORE_MKEY, |
| 113 | mad_flags & IB_MAD_IGNORE_BKEY, |
| 114 | port_num, in_wc, in_grh, in_mad, out_mad); |
| 115 | if (err) |
| 116 | return IB_MAD_RESULT_FAILURE; |
| 117 | |
| 118 | /* set return bit in status of directed route responses */ |
| 119 | if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
| 120 | out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); |
| 121 | |
| 122 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) |
| 123 | /* no response for trap repress */ |
| 124 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; |
| 125 | |
| 126 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
| 127 | } |
| 128 | |
| 129 | static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, |
| 130 | void *out) |
| 131 | { |
| 132 | #define MLX5_SUM_CNT(p, cntr1, cntr2) \ |
| 133 | (MLX5_GET64(query_vport_counter_out, p, cntr1) + \ |
| 134 | MLX5_GET64(query_vport_counter_out, p, cntr2)) |
| 135 | |
| 136 | pma_cnt_ext->port_xmit_data = |
| 137 | cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, |
| 138 | transmitted_ib_multicast.octets) >> 2); |
| 139 | pma_cnt_ext->port_rcv_data = |
| 140 | cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, |
| 141 | received_ib_multicast.octets) >> 2); |
| 142 | pma_cnt_ext->port_xmit_packets = |
| 143 | cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets, |
| 144 | transmitted_ib_multicast.packets)); |
| 145 | pma_cnt_ext->port_rcv_packets = |
| 146 | cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets, |
| 147 | received_ib_multicast.packets)); |
| 148 | pma_cnt_ext->port_unicast_xmit_packets = |
| 149 | MLX5_GET64_BE(query_vport_counter_out, |
| 150 | out, transmitted_ib_unicast.packets); |
| 151 | pma_cnt_ext->port_unicast_rcv_packets = |
| 152 | MLX5_GET64_BE(query_vport_counter_out, |
| 153 | out, received_ib_unicast.packets); |
| 154 | pma_cnt_ext->port_multicast_xmit_packets = |
| 155 | MLX5_GET64_BE(query_vport_counter_out, |
| 156 | out, transmitted_ib_multicast.packets); |
| 157 | pma_cnt_ext->port_multicast_rcv_packets = |
| 158 | MLX5_GET64_BE(query_vport_counter_out, |
| 159 | out, received_ib_multicast.packets); |
| 160 | } |
| 161 | |
| 162 | static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, |
| 163 | void *out) |
| 164 | { |
| 165 | /* Traffic counters will be reported in |
| 166 | * their 64bit form via ib_pma_portcounters_ext by default. |
| 167 | */ |
| 168 | void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out, |
| 169 | counter_set); |
| 170 | |
| 171 | #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \ |
| 172 | counter_var = MLX5_GET_BE(typeof(counter_var), \ |
| 173 | ib_port_cntrs_grp_data_layout, \ |
| 174 | out_pma, counter_name); \ |
| 175 | } |
| 176 | |
| 177 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter, |
| 178 | symbol_error_counter); |
| 179 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter, |
| 180 | link_error_recovery_counter); |
| 181 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter, |
| 182 | link_downed_counter); |
| 183 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors, |
| 184 | port_rcv_errors); |
| 185 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors, |
| 186 | port_rcv_remote_physical_errors); |
| 187 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors, |
| 188 | port_rcv_switch_relay_errors); |
| 189 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards, |
| 190 | port_xmit_discards); |
| 191 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors, |
| 192 | port_xmit_constraint_errors); |
| 193 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait, |
| 194 | port_xmit_wait); |
| 195 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors, |
| 196 | port_rcv_constraint_errors); |
| 197 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors, |
| 198 | link_overrun_errors); |
| 199 | MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped, |
| 200 | vl_15_dropped); |
| 201 | } |
| 202 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 203 | static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 204 | const struct ib_mad *in_mad, struct ib_mad *out_mad) |
| 205 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 206 | struct mlx5_core_dev *mdev; |
| 207 | bool native_port = true; |
| 208 | u8 mdev_port_num; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | void *out_cnt; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 210 | int err; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 211 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 212 | mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); |
| 213 | if (!mdev) { |
| 214 | /* Fail to get the native port, likely due to 2nd port is still |
| 215 | * unaffiliated. In such case default to 1st port and attached |
| 216 | * PF device. |
| 217 | */ |
| 218 | native_port = false; |
| 219 | mdev = dev->mdev; |
| 220 | mdev_port_num = 1; |
| 221 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | /* Declaring support of extended counters */ |
| 223 | if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { |
| 224 | struct ib_class_port_info cpi = {}; |
| 225 | |
| 226 | cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; |
| 227 | memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 228 | err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
| 229 | goto done; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { |
| 233 | struct ib_pma_portcounters_ext *pma_cnt_ext = |
| 234 | (struct ib_pma_portcounters_ext *)(out_mad->data + 40); |
| 235 | int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); |
| 236 | |
| 237 | out_cnt = kvzalloc(sz, GFP_KERNEL); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 238 | if (!out_cnt) { |
| 239 | err = IB_MAD_RESULT_FAILURE; |
| 240 | goto done; |
| 241 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | |
| 243 | err = mlx5_core_query_vport_counter(mdev, 0, 0, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 244 | mdev_port_num, out_cnt, sz); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | if (!err) |
| 246 | pma_cnt_ext_assign(pma_cnt_ext, out_cnt); |
| 247 | } else { |
| 248 | struct ib_pma_portcounters *pma_cnt = |
| 249 | (struct ib_pma_portcounters *)(out_mad->data + 40); |
| 250 | int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); |
| 251 | |
| 252 | out_cnt = kvzalloc(sz, GFP_KERNEL); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 253 | if (!out_cnt) { |
| 254 | err = IB_MAD_RESULT_FAILURE; |
| 255 | goto done; |
| 256 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 257 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 258 | err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 259 | out_cnt, sz); |
| 260 | if (!err) |
| 261 | pma_cnt_assign(pma_cnt, out_cnt); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 262 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | kvfree(out_cnt); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 264 | err = err ? IB_MAD_RESULT_FAILURE : |
| 265 | IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; |
| 266 | done: |
| 267 | if (native_port) |
| 268 | mlx5_ib_put_native_port_mdev(dev, port_num); |
| 269 | return err; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
| 273 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
| 274 | const struct ib_mad_hdr *in, size_t in_mad_size, |
| 275 | struct ib_mad_hdr *out, size_t *out_mad_size, |
| 276 | u16 *out_mad_pkey_index) |
| 277 | { |
| 278 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 279 | const struct ib_mad *in_mad = (const struct ib_mad *)in; |
| 280 | struct ib_mad *out_mad = (struct ib_mad *)out; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 281 | int ret; |
| 282 | |
| 283 | if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || |
| 284 | *out_mad_size != sizeof(*out_mad))) |
| 285 | return IB_MAD_RESULT_FAILURE; |
| 286 | |
| 287 | memset(out_mad->data, 0, sizeof(out_mad->data)); |
| 288 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 289 | if (MLX5_CAP_GEN(dev->mdev, vport_counters) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 290 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && |
| 291 | in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 292 | ret = process_pma_cmd(dev, port_num, in_mad, out_mad); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | } else { |
| 294 | ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, |
| 295 | in_mad, out_mad); |
| 296 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 297 | return ret; |
| 298 | } |
| 299 | |
| 300 | int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) |
| 301 | { |
| 302 | struct ib_smp *in_mad = NULL; |
| 303 | struct ib_smp *out_mad = NULL; |
| 304 | int err = -ENOMEM; |
| 305 | u16 packet_error; |
| 306 | |
| 307 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 308 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 309 | if (!in_mad || !out_mad) |
| 310 | goto out; |
| 311 | |
| 312 | init_query_mad(in_mad); |
| 313 | in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; |
| 314 | in_mad->attr_mod = cpu_to_be32(port); |
| 315 | |
| 316 | err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); |
| 317 | |
| 318 | packet_error = be16_to_cpu(out_mad->status); |
| 319 | |
| 320 | dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? |
| 321 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; |
| 322 | |
| 323 | out: |
| 324 | kfree(in_mad); |
| 325 | kfree(out_mad); |
| 326 | return err; |
| 327 | } |
| 328 | |
| 329 | int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, |
| 330 | struct ib_smp *out_mad) |
| 331 | { |
| 332 | struct ib_smp *in_mad = NULL; |
| 333 | int err = -ENOMEM; |
| 334 | |
| 335 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 336 | if (!in_mad) |
| 337 | return -ENOMEM; |
| 338 | |
| 339 | init_query_mad(in_mad); |
| 340 | in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; |
| 341 | |
| 342 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, |
| 343 | out_mad); |
| 344 | |
| 345 | kfree(in_mad); |
| 346 | return err; |
| 347 | } |
| 348 | |
| 349 | int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, |
| 350 | __be64 *sys_image_guid) |
| 351 | { |
| 352 | struct ib_smp *out_mad = NULL; |
| 353 | int err = -ENOMEM; |
| 354 | |
| 355 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 356 | if (!out_mad) |
| 357 | return -ENOMEM; |
| 358 | |
| 359 | err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); |
| 360 | if (err) |
| 361 | goto out; |
| 362 | |
| 363 | memcpy(sys_image_guid, out_mad->data + 4, 8); |
| 364 | |
| 365 | out: |
| 366 | kfree(out_mad); |
| 367 | |
| 368 | return err; |
| 369 | } |
| 370 | |
| 371 | int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, |
| 372 | u16 *max_pkeys) |
| 373 | { |
| 374 | struct ib_smp *out_mad = NULL; |
| 375 | int err = -ENOMEM; |
| 376 | |
| 377 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 378 | if (!out_mad) |
| 379 | return -ENOMEM; |
| 380 | |
| 381 | err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); |
| 382 | if (err) |
| 383 | goto out; |
| 384 | |
| 385 | *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); |
| 386 | |
| 387 | out: |
| 388 | kfree(out_mad); |
| 389 | |
| 390 | return err; |
| 391 | } |
| 392 | |
| 393 | int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, |
| 394 | u32 *vendor_id) |
| 395 | { |
| 396 | struct ib_smp *out_mad = NULL; |
| 397 | int err = -ENOMEM; |
| 398 | |
| 399 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 400 | if (!out_mad) |
| 401 | return -ENOMEM; |
| 402 | |
| 403 | err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); |
| 404 | if (err) |
| 405 | goto out; |
| 406 | |
| 407 | *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; |
| 408 | |
| 409 | out: |
| 410 | kfree(out_mad); |
| 411 | |
| 412 | return err; |
| 413 | } |
| 414 | |
| 415 | int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) |
| 416 | { |
| 417 | struct ib_smp *in_mad = NULL; |
| 418 | struct ib_smp *out_mad = NULL; |
| 419 | int err = -ENOMEM; |
| 420 | |
| 421 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 422 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 423 | if (!in_mad || !out_mad) |
| 424 | goto out; |
| 425 | |
| 426 | init_query_mad(in_mad); |
| 427 | in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; |
| 428 | |
| 429 | err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); |
| 430 | if (err) |
| 431 | goto out; |
| 432 | |
| 433 | memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); |
| 434 | out: |
| 435 | kfree(in_mad); |
| 436 | kfree(out_mad); |
| 437 | return err; |
| 438 | } |
| 439 | |
| 440 | int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) |
| 441 | { |
| 442 | struct ib_smp *in_mad = NULL; |
| 443 | struct ib_smp *out_mad = NULL; |
| 444 | int err = -ENOMEM; |
| 445 | |
| 446 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 447 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 448 | if (!in_mad || !out_mad) |
| 449 | goto out; |
| 450 | |
| 451 | init_query_mad(in_mad); |
| 452 | in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; |
| 453 | |
| 454 | err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); |
| 455 | if (err) |
| 456 | goto out; |
| 457 | |
| 458 | memcpy(node_guid, out_mad->data + 12, 8); |
| 459 | out: |
| 460 | kfree(in_mad); |
| 461 | kfree(out_mad); |
| 462 | return err; |
| 463 | } |
| 464 | |
| 465 | int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, |
| 466 | u16 *pkey) |
| 467 | { |
| 468 | struct ib_smp *in_mad = NULL; |
| 469 | struct ib_smp *out_mad = NULL; |
| 470 | int err = -ENOMEM; |
| 471 | |
| 472 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 473 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 474 | if (!in_mad || !out_mad) |
| 475 | goto out; |
| 476 | |
| 477 | init_query_mad(in_mad); |
| 478 | in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; |
| 479 | in_mad->attr_mod = cpu_to_be32(index / 32); |
| 480 | |
| 481 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, |
| 482 | out_mad); |
| 483 | if (err) |
| 484 | goto out; |
| 485 | |
| 486 | *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); |
| 487 | |
| 488 | out: |
| 489 | kfree(in_mad); |
| 490 | kfree(out_mad); |
| 491 | return err; |
| 492 | } |
| 493 | |
| 494 | int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, |
| 495 | union ib_gid *gid) |
| 496 | { |
| 497 | struct ib_smp *in_mad = NULL; |
| 498 | struct ib_smp *out_mad = NULL; |
| 499 | int err = -ENOMEM; |
| 500 | |
| 501 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 502 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 503 | if (!in_mad || !out_mad) |
| 504 | goto out; |
| 505 | |
| 506 | init_query_mad(in_mad); |
| 507 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; |
| 508 | in_mad->attr_mod = cpu_to_be32(port); |
| 509 | |
| 510 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, |
| 511 | out_mad); |
| 512 | if (err) |
| 513 | goto out; |
| 514 | |
| 515 | memcpy(gid->raw, out_mad->data + 8, 8); |
| 516 | |
| 517 | init_query_mad(in_mad); |
| 518 | in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; |
| 519 | in_mad->attr_mod = cpu_to_be32(index / 8); |
| 520 | |
| 521 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, |
| 522 | out_mad); |
| 523 | if (err) |
| 524 | goto out; |
| 525 | |
| 526 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); |
| 527 | |
| 528 | out: |
| 529 | kfree(in_mad); |
| 530 | kfree(out_mad); |
| 531 | return err; |
| 532 | } |
| 533 | |
| 534 | int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, |
| 535 | struct ib_port_attr *props) |
| 536 | { |
| 537 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 538 | struct mlx5_core_dev *mdev = dev->mdev; |
| 539 | struct ib_smp *in_mad = NULL; |
| 540 | struct ib_smp *out_mad = NULL; |
| 541 | int ext_active_speed; |
| 542 | int err = -ENOMEM; |
| 543 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 544 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 545 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 546 | if (!in_mad || !out_mad) |
| 547 | goto out; |
| 548 | |
| 549 | /* props being zeroed by the caller, avoid zeroing it here */ |
| 550 | |
| 551 | init_query_mad(in_mad); |
| 552 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; |
| 553 | in_mad->attr_mod = cpu_to_be32(port); |
| 554 | |
| 555 | err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); |
| 556 | if (err) { |
| 557 | mlx5_ib_warn(dev, "err %d\n", err); |
| 558 | goto out; |
| 559 | } |
| 560 | |
| 561 | props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); |
| 562 | props->lmc = out_mad->data[34] & 0x7; |
| 563 | props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); |
| 564 | props->sm_sl = out_mad->data[36] & 0xf; |
| 565 | props->state = out_mad->data[32] & 0xf; |
| 566 | props->phys_state = out_mad->data[33] >> 4; |
| 567 | props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); |
| 568 | props->gid_tbl_len = out_mad->data[50]; |
| 569 | props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); |
| 570 | props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; |
| 571 | props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); |
| 572 | props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); |
| 573 | props->active_width = out_mad->data[31] & 0xf; |
| 574 | props->active_speed = out_mad->data[35] >> 4; |
| 575 | props->max_mtu = out_mad->data[41] & 0xf; |
| 576 | props->active_mtu = out_mad->data[36] >> 4; |
| 577 | props->subnet_timeout = out_mad->data[51] & 0x1f; |
| 578 | props->max_vl_num = out_mad->data[37] >> 4; |
| 579 | props->init_type_reply = out_mad->data[41] >> 4; |
| 580 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 581 | if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) { |
| 582 | props->port_cap_flags2 = |
| 583 | be16_to_cpup((__be16 *)(out_mad->data + 60)); |
| 584 | |
| 585 | if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP) |
| 586 | props->active_width = out_mad->data[31] & 0x1f; |
| 587 | } |
| 588 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 589 | /* Check if extended speeds (EDR/FDR/...) are supported */ |
| 590 | if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { |
| 591 | ext_active_speed = out_mad->data[62] >> 4; |
| 592 | |
| 593 | switch (ext_active_speed) { |
| 594 | case 1: |
| 595 | props->active_speed = 16; /* FDR */ |
| 596 | break; |
| 597 | case 2: |
| 598 | props->active_speed = 32; /* EDR */ |
| 599 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 600 | case 4: |
| 601 | if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP && |
| 602 | props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP) |
| 603 | props->active_speed = IB_SPEED_HDR; |
| 604 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 605 | } |
| 606 | } |
| 607 | |
| 608 | /* If reported active speed is QDR, check if is FDR-10 */ |
| 609 | if (props->active_speed == 4) { |
| 610 | if (mdev->port_caps[port - 1].ext_port_cap & |
| 611 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { |
| 612 | init_query_mad(in_mad); |
| 613 | in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; |
| 614 | in_mad->attr_mod = cpu_to_be32(port); |
| 615 | |
| 616 | err = mlx5_MAD_IFC(dev, 1, 1, port, |
| 617 | NULL, NULL, in_mad, out_mad); |
| 618 | if (err) |
| 619 | goto out; |
| 620 | |
| 621 | /* Checking LinkSpeedActive for FDR-10 */ |
| 622 | if (out_mad->data[15] & 0x1) |
| 623 | props->active_speed = 8; |
| 624 | } |
| 625 | } |
| 626 | |
| 627 | out: |
| 628 | kfree(in_mad); |
| 629 | kfree(out_mad); |
| 630 | |
| 631 | return err; |
| 632 | } |