blob: 26b792bb102796a727d1256c662226c15a424039 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/pci.h>
49#include <linux/netdevice.h>
50#include <linux/vmalloc.h>
51#include <linux/delay.h>
David Brazdil0f672f62019-12-10 10:32:29 +000052#include <linux/xarray.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053#include <linux/module.h>
54#include <linux/printk.h>
55#include <linux/hrtimer.h>
56#include <linux/bitmap.h>
David Brazdil0f672f62019-12-10 10:32:29 +000057#include <linux/numa.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058#include <rdma/rdma_vt.h>
59
60#include "hfi.h"
61#include "device.h"
62#include "common.h"
63#include "trace.h"
64#include "mad.h"
65#include "sdma.h"
66#include "debugfs.h"
67#include "verbs.h"
68#include "aspm.h"
69#include "affinity.h"
70#include "vnic.h"
71#include "exp_rcv.h"
72
73#undef pr_fmt
74#define pr_fmt(fmt) DRIVER_NAME ": " fmt
75
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076/*
77 * min buffers we want to have per context, after driver
78 */
79#define HFI1_MIN_USER_CTXT_BUFCNT 7
80
81#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
82#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
83#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
84#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
85
David Brazdil0f672f62019-12-10 10:32:29 +000086#define NUM_IB_PORTS 1
87
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088/*
89 * Number of user receive contexts we are configured to use (to allow for more
90 * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
91 */
92int num_user_contexts = -1;
93module_param_named(num_user_contexts, num_user_contexts, int, 0444);
94MODULE_PARM_DESC(
95 num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
96
97uint krcvqs[RXE_NUM_DATA_VL];
98int krcvqsset;
99module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
100MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
101
102/* computed based on above array */
103unsigned long n_krcvqs;
104
105static unsigned hfi1_rcvarr_split = 25;
106module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
107MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
108
109static uint eager_buffer_size = (8 << 20); /* 8MB */
110module_param(eager_buffer_size, uint, S_IRUGO);
111MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
112
113static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
114module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
115MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
116
117static uint hfi1_hdrq_entsize = 32;
118module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
119MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
120
121unsigned int user_credit_return_threshold = 33; /* default is 33% */
122module_param(user_credit_return_threshold, uint, S_IRUGO);
123MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
124
125static inline u64 encode_rcv_header_entry_size(u16 size);
126
David Brazdil0f672f62019-12-10 10:32:29 +0000127DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128
129static int hfi1_create_kctxt(struct hfi1_devdata *dd,
130 struct hfi1_pportdata *ppd)
131{
132 struct hfi1_ctxtdata *rcd;
133 int ret;
134
135 /* Control context has to be always 0 */
136 BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
137
138 ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
139 if (ret < 0) {
140 dd_dev_err(dd, "Kernel receive context allocation failed\n");
141 return ret;
142 }
143
144 /*
145 * Set up the kernel context flags here and now because they use
146 * default values for all receive side memories. User contexts will
147 * be handled as they are created.
148 */
149 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
150 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
151 HFI1_CAP_KGET(NODROP_EGR_FULL) |
152 HFI1_CAP_KGET(DMA_RTAIL);
153
154 /* Control context must use DMA_RTAIL */
155 if (rcd->ctxt == HFI1_CTRL_CTXT)
156 rcd->flags |= HFI1_CAP_DMA_RTAIL;
157 rcd->seq_cnt = 1;
158
159 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
160 if (!rcd->sc) {
161 dd_dev_err(dd, "Kernel send context allocation failed\n");
162 return -ENOMEM;
163 }
164 hfi1_init_ctxt(rcd->sc);
165
166 return 0;
167}
168
169/*
170 * Create the receive context array and one or more kernel contexts
171 */
172int hfi1_create_kctxts(struct hfi1_devdata *dd)
173{
174 u16 i;
175 int ret;
176
177 dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
178 GFP_KERNEL, dd->node);
179 if (!dd->rcd)
180 return -ENOMEM;
181
182 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
183 ret = hfi1_create_kctxt(dd, dd->pport);
184 if (ret)
185 goto bail;
186 }
187
188 return 0;
189bail:
190 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
191 hfi1_free_ctxt(dd->rcd[i]);
192
193 /* All the contexts should be freed, free the array */
194 kfree(dd->rcd);
195 dd->rcd = NULL;
196 return ret;
197}
198
199/*
200 * Helper routines for the receive context reference count (rcd and uctxt).
201 */
202static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
203{
204 kref_init(&rcd->kref);
205}
206
207/**
208 * hfi1_rcd_free - When reference is zero clean up.
209 * @kref: pointer to an initialized rcd data structure
210 *
211 */
212static void hfi1_rcd_free(struct kref *kref)
213{
214 unsigned long flags;
215 struct hfi1_ctxtdata *rcd =
216 container_of(kref, struct hfi1_ctxtdata, kref);
217
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
219 rcd->dd->rcd[rcd->ctxt] = NULL;
220 spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
221
David Brazdil0f672f62019-12-10 10:32:29 +0000222 hfi1_free_ctxtdata(rcd->dd, rcd);
223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224 kfree(rcd);
225}
226
227/**
228 * hfi1_rcd_put - decrement reference for rcd
229 * @rcd: pointer to an initialized rcd data structure
230 *
231 * Use this to put a reference after the init.
232 */
233int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
234{
235 if (rcd)
236 return kref_put(&rcd->kref, hfi1_rcd_free);
237
238 return 0;
239}
240
241/**
242 * hfi1_rcd_get - increment reference for rcd
243 * @rcd: pointer to an initialized rcd data structure
244 *
245 * Use this to get a reference after the init.
David Brazdil0f672f62019-12-10 10:32:29 +0000246 *
247 * Return : reflect kref_get_unless_zero(), which returns non-zero on
248 * increment, otherwise 0.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000249 */
David Brazdil0f672f62019-12-10 10:32:29 +0000250int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251{
David Brazdil0f672f62019-12-10 10:32:29 +0000252 return kref_get_unless_zero(&rcd->kref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253}
254
255/**
256 * allocate_rcd_index - allocate an rcd index from the rcd array
257 * @dd: pointer to a valid devdata structure
258 * @rcd: rcd data structure to assign
259 * @index: pointer to index that is allocated
260 *
261 * Find an empty index in the rcd array, and assign the given rcd to it.
262 * If the array is full, we are EBUSY.
263 *
264 */
265static int allocate_rcd_index(struct hfi1_devdata *dd,
266 struct hfi1_ctxtdata *rcd, u16 *index)
267{
268 unsigned long flags;
269 u16 ctxt;
270
271 spin_lock_irqsave(&dd->uctxt_lock, flags);
272 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
273 if (!dd->rcd[ctxt])
274 break;
275
276 if (ctxt < dd->num_rcv_contexts) {
277 rcd->ctxt = ctxt;
278 dd->rcd[ctxt] = rcd;
279 hfi1_rcd_init(rcd);
280 }
281 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
282
283 if (ctxt >= dd->num_rcv_contexts)
284 return -EBUSY;
285
286 *index = ctxt;
287
288 return 0;
289}
290
291/**
292 * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
293 * array
294 * @dd: pointer to a valid devdata structure
295 * @ctxt: the index of an possilbe rcd
296 *
297 * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
298 * ctxt index is valid.
299 *
300 * The caller is responsible for making the _put().
301 *
302 */
303struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
304 u16 ctxt)
305{
306 if (ctxt < dd->num_rcv_contexts)
307 return hfi1_rcd_get_by_index(dd, ctxt);
308
309 return NULL;
310}
311
312/**
313 * hfi1_rcd_get_by_index
314 * @dd: pointer to a valid devdata structure
315 * @ctxt: the index of an possilbe rcd
316 *
317 * We need to protect access to the rcd array. If access is needed to
318 * one or more index, get the protecting spinlock and then increment the
319 * kref.
320 *
321 * The caller is responsible for making the _put().
322 *
323 */
324struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
325{
326 unsigned long flags;
327 struct hfi1_ctxtdata *rcd = NULL;
328
329 spin_lock_irqsave(&dd->uctxt_lock, flags);
330 if (dd->rcd[ctxt]) {
331 rcd = dd->rcd[ctxt];
David Brazdil0f672f62019-12-10 10:32:29 +0000332 if (!hfi1_rcd_get(rcd))
333 rcd = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334 }
335 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
336
337 return rcd;
338}
339
340/*
341 * Common code for user and kernel context create and setup.
342 * NOTE: the initial kref is done here (hf1_rcd_init()).
343 */
344int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
345 struct hfi1_ctxtdata **context)
346{
347 struct hfi1_devdata *dd = ppd->dd;
348 struct hfi1_ctxtdata *rcd;
349 unsigned kctxt_ngroups = 0;
350 u32 base;
351
352 if (dd->rcv_entries.nctxt_extra >
353 dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
354 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
355 (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
356 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
357 if (rcd) {
358 u32 rcvtids, max_entries;
359 u16 ctxt;
360 int ret;
361
362 ret = allocate_rcd_index(dd, rcd, &ctxt);
363 if (ret) {
364 *context = NULL;
365 kfree(rcd);
366 return ret;
367 }
368
369 INIT_LIST_HEAD(&rcd->qp_wait_list);
370 hfi1_exp_tid_group_init(rcd);
371 rcd->ppd = ppd;
372 rcd->dd = dd;
373 rcd->numa_id = numa;
374 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
375 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
376
377 mutex_init(&rcd->exp_mutex);
David Brazdil0f672f62019-12-10 10:32:29 +0000378 spin_lock_init(&rcd->exp_lock);
379 INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
380 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000381
382 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
383
384 /*
385 * Calculate the context's RcvArray entry starting point.
386 * We do this here because we have to take into account all
387 * the RcvArray entries that previous context would have
388 * taken and we have to account for any extra groups assigned
389 * to the static (kernel) or dynamic (vnic/user) contexts.
390 */
391 if (ctxt < dd->first_dyn_alloc_ctxt) {
392 if (ctxt < kctxt_ngroups) {
393 base = ctxt * (dd->rcv_entries.ngroups + 1);
394 rcd->rcv_array_groups++;
395 } else {
396 base = kctxt_ngroups +
397 (ctxt * dd->rcv_entries.ngroups);
398 }
399 } else {
400 u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
401
402 base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
403 kctxt_ngroups);
404 if (ct < dd->rcv_entries.nctxt_extra) {
405 base += ct * (dd->rcv_entries.ngroups + 1);
406 rcd->rcv_array_groups++;
407 } else {
408 base += dd->rcv_entries.nctxt_extra +
409 (ct * dd->rcv_entries.ngroups);
410 }
411 }
412 rcd->eager_base = base * dd->rcv_entries.group_size;
413
414 rcd->rcvhdrq_cnt = rcvhdrcnt;
415 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
416 rcd->rhf_offset =
417 rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
418 /*
419 * Simple Eager buffer allocation: we have already pre-allocated
420 * the number of RcvArray entry groups. Each ctxtdata structure
421 * holds the number of groups for that context.
422 *
423 * To follow CSR requirements and maintain cacheline alignment,
424 * make sure all sizes and bases are multiples of group_size.
425 *
426 * The expected entry count is what is left after assigning
427 * eager.
428 */
429 max_entries = rcd->rcv_array_groups *
430 dd->rcv_entries.group_size;
431 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
432 rcd->egrbufs.count = round_down(rcvtids,
433 dd->rcv_entries.group_size);
434 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
435 dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
436 rcd->ctxt);
437 rcd->egrbufs.count = MAX_EAGER_ENTRIES;
438 }
439 hfi1_cdbg(PROC,
440 "ctxt%u: max Eager buffer RcvArray entries: %u\n",
441 rcd->ctxt, rcd->egrbufs.count);
442
443 /*
444 * Allocate array that will hold the eager buffer accounting
445 * data.
446 * This will allocate the maximum possible buffer count based
447 * on the value of the RcvArray split parameter.
448 * The resulting value will be rounded down to the closest
449 * multiple of dd->rcv_entries.group_size.
450 */
451 rcd->egrbufs.buffers =
452 kcalloc_node(rcd->egrbufs.count,
453 sizeof(*rcd->egrbufs.buffers),
454 GFP_KERNEL, numa);
455 if (!rcd->egrbufs.buffers)
456 goto bail;
457 rcd->egrbufs.rcvtids =
458 kcalloc_node(rcd->egrbufs.count,
459 sizeof(*rcd->egrbufs.rcvtids),
460 GFP_KERNEL, numa);
461 if (!rcd->egrbufs.rcvtids)
462 goto bail;
463 rcd->egrbufs.size = eager_buffer_size;
464 /*
465 * The size of the buffers programmed into the RcvArray
466 * entries needs to be big enough to handle the highest
467 * MTU supported.
468 */
469 if (rcd->egrbufs.size < hfi1_max_mtu) {
470 rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
471 hfi1_cdbg(PROC,
David Brazdil0f672f62019-12-10 10:32:29 +0000472 "ctxt%u: eager bufs size too small. Adjusting to %u\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473 rcd->ctxt, rcd->egrbufs.size);
474 }
475 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
476
477 /* Applicable only for statically created kernel contexts */
478 if (ctxt < dd->first_dyn_alloc_ctxt) {
479 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
480 GFP_KERNEL, numa);
481 if (!rcd->opstats)
482 goto bail;
David Brazdil0f672f62019-12-10 10:32:29 +0000483
484 /* Initialize TID flow generations for the context */
485 hfi1_kern_init_ctxt_generations(rcd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000486 }
487
488 *context = rcd;
489 return 0;
490 }
491
492bail:
493 *context = NULL;
494 hfi1_free_ctxt(rcd);
495 return -ENOMEM;
496}
497
498/**
499 * hfi1_free_ctxt
500 * @rcd: pointer to an initialized rcd data structure
501 *
502 * This wrapper is the free function that matches hfi1_create_ctxtdata().
503 * When a context is done being used (kernel or user), this function is called
504 * for the "final" put to match the kref init from hf1i_create_ctxtdata().
505 * Other users of the context do a get/put sequence to make sure that the
506 * structure isn't removed while in use.
507 */
508void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
509{
510 hfi1_rcd_put(rcd);
511}
512
513/*
514 * Convert a receive header entry size that to the encoding used in the CSR.
515 *
516 * Return a zero if the given size is invalid.
517 */
518static inline u64 encode_rcv_header_entry_size(u16 size)
519{
520 /* there are only 3 valid receive header entry sizes */
521 if (size == 2)
522 return 1;
523 if (size == 16)
524 return 2;
525 else if (size == 32)
526 return 4;
527 return 0; /* invalid */
528}
529
530/*
531 * Select the largest ccti value over all SLs to determine the intra-
532 * packet gap for the link.
533 *
534 * called with cca_timer_lock held (to protect access to cca_timer
535 * array), and rcu_read_lock() (to protect access to cc_state).
536 */
537void set_link_ipg(struct hfi1_pportdata *ppd)
538{
539 struct hfi1_devdata *dd = ppd->dd;
540 struct cc_state *cc_state;
541 int i;
542 u16 cce, ccti_limit, max_ccti = 0;
543 u16 shift, mult;
544 u64 src;
545 u32 current_egress_rate; /* Mbits /sec */
546 u32 max_pkt_time;
547 /*
548 * max_pkt_time is the maximum packet egress time in units
549 * of the fabric clock period 1/(805 MHz).
550 */
551
552 cc_state = get_cc_state(ppd);
553
554 if (!cc_state)
555 /*
556 * This should _never_ happen - rcu_read_lock() is held,
557 * and set_link_ipg() should not be called if cc_state
558 * is NULL.
559 */
560 return;
561
562 for (i = 0; i < OPA_MAX_SLS; i++) {
563 u16 ccti = ppd->cca_timer[i].ccti;
564
565 if (ccti > max_ccti)
566 max_ccti = ccti;
567 }
568
569 ccti_limit = cc_state->cct.ccti_limit;
570 if (max_ccti > ccti_limit)
571 max_ccti = ccti_limit;
572
573 cce = cc_state->cct.entries[max_ccti].entry;
574 shift = (cce & 0xc000) >> 14;
575 mult = (cce & 0x3fff);
576
577 current_egress_rate = active_egress_rate(ppd);
578
579 max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
580
581 src = (max_pkt_time >> shift) * mult;
582
583 src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
584 src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
585
586 write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
587}
588
589static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
590{
591 struct cca_timer *cca_timer;
592 struct hfi1_pportdata *ppd;
593 int sl;
594 u16 ccti_timer, ccti_min;
595 struct cc_state *cc_state;
596 unsigned long flags;
597 enum hrtimer_restart ret = HRTIMER_NORESTART;
598
599 cca_timer = container_of(t, struct cca_timer, hrtimer);
600 ppd = cca_timer->ppd;
601 sl = cca_timer->sl;
602
603 rcu_read_lock();
604
605 cc_state = get_cc_state(ppd);
606
607 if (!cc_state) {
608 rcu_read_unlock();
609 return HRTIMER_NORESTART;
610 }
611
612 /*
613 * 1) decrement ccti for SL
614 * 2) calculate IPG for link (set_link_ipg())
615 * 3) restart timer, unless ccti is at min value
616 */
617
618 ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
619 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
620
621 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
622
623 if (cca_timer->ccti > ccti_min) {
624 cca_timer->ccti--;
625 set_link_ipg(ppd);
626 }
627
628 if (cca_timer->ccti > ccti_min) {
629 unsigned long nsec = 1024 * ccti_timer;
630 /* ccti_timer is in units of 1.024 usec */
631 hrtimer_forward_now(t, ns_to_ktime(nsec));
632 ret = HRTIMER_RESTART;
633 }
634
635 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
636 rcu_read_unlock();
637 return ret;
638}
639
640/*
641 * Common code for initializing the physical port structure.
642 */
643void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
644 struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
645{
646 int i;
647 uint default_pkey_idx;
648 struct cc_state *cc_state;
649
650 ppd->dd = dd;
651 ppd->hw_pidx = hw_pidx;
652 ppd->port = port; /* IB port number, not index */
653 ppd->prev_link_width = LINK_WIDTH_DEFAULT;
654 /*
655 * There are C_VL_COUNT number of PortVLXmitWait counters.
656 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
657 */
658 for (i = 0; i < C_VL_COUNT + 1; i++) {
659 ppd->port_vl_xmit_wait_last[i] = 0;
660 ppd->vl_xmit_flit_cnt[i] = 0;
661 }
662
663 default_pkey_idx = 1;
664
665 ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
666 ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
667
668 if (loopback) {
David Brazdil0f672f62019-12-10 10:32:29 +0000669 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
670 !default_pkey_idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671 ppd->pkeys[!default_pkey_idx] = 0x8001;
672 }
673
674 INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
675 INIT_WORK(&ppd->link_up_work, handle_link_up);
676 INIT_WORK(&ppd->link_down_work, handle_link_down);
677 INIT_WORK(&ppd->freeze_work, handle_freeze);
678 INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
679 INIT_WORK(&ppd->sma_message_work, handle_sma_message);
680 INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
681 INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
682 INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
683 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
684
685 mutex_init(&ppd->hls_lock);
686 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
687
688 ppd->qsfp_info.ppd = ppd;
689 ppd->sm_trap_qp = 0x0;
690 ppd->sa_qp = 0x1;
691
692 ppd->hfi1_wq = NULL;
693
694 spin_lock_init(&ppd->cca_timer_lock);
695
696 for (i = 0; i < OPA_MAX_SLS; i++) {
697 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
698 HRTIMER_MODE_REL);
699 ppd->cca_timer[i].ppd = ppd;
700 ppd->cca_timer[i].sl = i;
701 ppd->cca_timer[i].ccti = 0;
702 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
703 }
704
705 ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
706
707 spin_lock_init(&ppd->cc_state_lock);
708 spin_lock_init(&ppd->cc_log_lock);
709 cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
710 RCU_INIT_POINTER(ppd->cc_state, cc_state);
711 if (!cc_state)
712 goto bail;
713 return;
714
715bail:
David Brazdil0f672f62019-12-10 10:32:29 +0000716 dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717}
718
719/*
720 * Do initialization for device that is only needed on
721 * first detect, not on resets.
722 */
723static int loadtime_init(struct hfi1_devdata *dd)
724{
725 return 0;
726}
727
728/**
729 * init_after_reset - re-initialize after a reset
730 * @dd: the hfi1_ib device
731 *
732 * sanity check at least some of the values after reset, and
733 * ensure no receive or transmit (explicitly, in case reset
734 * failed
735 */
736static int init_after_reset(struct hfi1_devdata *dd)
737{
738 int i;
739 struct hfi1_ctxtdata *rcd;
740 /*
741 * Ensure chip does no sends or receives, tail updates, or
742 * pioavail updates while we re-initialize. This is mostly
743 * for the driver data structures, not chip registers.
744 */
745 for (i = 0; i < dd->num_rcv_contexts; i++) {
746 rcd = hfi1_rcd_get_by_index(dd, i);
747 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
748 HFI1_RCVCTRL_INTRAVAIL_DIS |
749 HFI1_RCVCTRL_TAILUPD_DIS, rcd);
750 hfi1_rcd_put(rcd);
751 }
752 pio_send_control(dd, PSC_GLOBAL_DISABLE);
753 for (i = 0; i < dd->num_send_contexts; i++)
754 sc_disable(dd->send_contexts[i].sc);
755
756 return 0;
757}
758
759static void enable_chip(struct hfi1_devdata *dd)
760{
761 struct hfi1_ctxtdata *rcd;
762 u32 rcvmask;
763 u16 i;
764
765 /* enable PIO send */
766 pio_send_control(dd, PSC_GLOBAL_ENABLE);
767
768 /*
769 * Enable kernel ctxts' receive and receive interrupt.
770 * Other ctxts done as user opens and initializes them.
771 */
772 for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
773 rcd = hfi1_rcd_get_by_index(dd, i);
774 if (!rcd)
775 continue;
776 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
777 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
778 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
779 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
780 rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
781 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
782 rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
783 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
784 rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
David Brazdil0f672f62019-12-10 10:32:29 +0000785 if (HFI1_CAP_IS_KSET(TID_RDMA))
786 rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787 hfi1_rcvctrl(dd, rcvmask, rcd);
788 sc_enable(rcd->sc);
789 hfi1_rcd_put(rcd);
790 }
791}
792
793/**
794 * create_workqueues - create per port workqueues
795 * @dd: the hfi1_ib device
796 */
797static int create_workqueues(struct hfi1_devdata *dd)
798{
799 int pidx;
800 struct hfi1_pportdata *ppd;
801
802 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
803 ppd = dd->pport + pidx;
804 if (!ppd->hfi1_wq) {
805 ppd->hfi1_wq =
806 alloc_workqueue(
807 "hfi%d_%d",
David Brazdil0f672f62019-12-10 10:32:29 +0000808 WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
809 WQ_MEM_RECLAIM,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000810 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
811 dd->unit, pidx);
812 if (!ppd->hfi1_wq)
813 goto wq_error;
814 }
815 if (!ppd->link_wq) {
816 /*
817 * Make the link workqueue single-threaded to enforce
818 * serialization.
819 */
820 ppd->link_wq =
821 alloc_workqueue(
822 "hfi_link_%d_%d",
823 WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
824 1, /* max_active */
825 dd->unit, pidx);
826 if (!ppd->link_wq)
827 goto wq_error;
828 }
829 }
830 return 0;
831wq_error:
832 pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
833 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
834 ppd = dd->pport + pidx;
835 if (ppd->hfi1_wq) {
836 destroy_workqueue(ppd->hfi1_wq);
837 ppd->hfi1_wq = NULL;
838 }
839 if (ppd->link_wq) {
840 destroy_workqueue(ppd->link_wq);
841 ppd->link_wq = NULL;
842 }
843 }
844 return -ENOMEM;
845}
846
847/**
David Brazdil0f672f62019-12-10 10:32:29 +0000848 * enable_general_intr() - Enable the IRQs that will be handled by the
849 * general interrupt handler.
850 * @dd: valid devdata
851 *
852 */
853static void enable_general_intr(struct hfi1_devdata *dd)
854{
855 set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
856 set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
857 set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
858 set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
859 set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
860 set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
861 set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
862}
863
864/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000865 * hfi1_init - do the actual initialization sequence on the chip
866 * @dd: the hfi1_ib device
867 * @reinit: re-initializing, so don't allocate new memory
868 *
869 * Do the actual initialization sequence on the chip. This is done
870 * both from the init routine called from the PCI infrastructure, and
871 * when we reset the chip, or detect that it was reset internally,
872 * or it's administratively re-enabled.
873 *
874 * Memory allocation here and in called routines is only done in
875 * the first case (reinit == 0). We have to be careful, because even
876 * without memory allocation, we need to re-write all the chip registers
877 * TIDs, etc. after the reset or enable has completed.
878 */
879int hfi1_init(struct hfi1_devdata *dd, int reinit)
880{
881 int ret = 0, pidx, lastfail = 0;
882 unsigned long len;
883 u16 i;
884 struct hfi1_ctxtdata *rcd;
885 struct hfi1_pportdata *ppd;
886
887 /* Set up send low level handlers */
888 dd->process_pio_send = hfi1_verbs_send_pio;
889 dd->process_dma_send = hfi1_verbs_send_dma;
890 dd->pio_inline_send = pio_copy;
891 dd->process_vnic_dma_send = hfi1_vnic_send_dma;
892
893 if (is_ax(dd)) {
894 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
895 dd->do_drop = 1;
896 } else {
897 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
898 dd->do_drop = 0;
899 }
900
901 /* make sure the link is not "up" */
902 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
903 ppd = dd->pport + pidx;
904 ppd->linkup = 0;
905 }
906
907 if (reinit)
908 ret = init_after_reset(dd);
909 else
910 ret = loadtime_init(dd);
911 if (ret)
912 goto done;
913
914 /* allocate dummy tail memory for all receive contexts */
David Brazdil0f672f62019-12-10 10:32:29 +0000915 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
916 sizeof(u64),
917 &dd->rcvhdrtail_dummy_dma,
918 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000919
920 if (!dd->rcvhdrtail_dummy_kvaddr) {
921 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
922 ret = -ENOMEM;
923 goto done;
924 }
925
926 /* dd->rcd can be NULL if early initialization failed */
927 for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
928 /*
929 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
930 * re-init, the simplest way to handle this is to free
931 * existing, and re-allocate.
932 * Need to re-create rest of ctxt 0 ctxtdata as well.
933 */
934 rcd = hfi1_rcd_get_by_index(dd, i);
935 if (!rcd)
936 continue;
937
938 rcd->do_interrupt = &handle_receive_interrupt;
939
940 lastfail = hfi1_create_rcvhdrq(dd, rcd);
941 if (!lastfail)
942 lastfail = hfi1_setup_eagerbufs(rcd);
David Brazdil0f672f62019-12-10 10:32:29 +0000943 if (!lastfail)
944 lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000945 if (lastfail) {
946 dd_dev_err(dd,
947 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
948 ret = lastfail;
949 }
David Brazdil0f672f62019-12-10 10:32:29 +0000950 /* enable IRQ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000951 hfi1_rcd_put(rcd);
952 }
953
954 /* Allocate enough memory for user event notification. */
955 len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
956 sizeof(*dd->events));
957 dd->events = vmalloc_user(len);
958 if (!dd->events)
959 dd_dev_err(dd, "Failed to allocate user events page\n");
960 /*
961 * Allocate a page for device and port status.
962 * Page will be shared amongst all user processes.
963 */
964 dd->status = vmalloc_user(PAGE_SIZE);
965 if (!dd->status)
966 dd_dev_err(dd, "Failed to allocate dev status page\n");
967 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
968 ppd = dd->pport + pidx;
969 if (dd->status)
970 /* Currently, we only have one port */
971 ppd->statusp = &dd->status->port;
972
973 set_mtu(ppd);
974 }
975
976 /* enable chip even if we have an error, so we can debug cause */
977 enable_chip(dd);
978
979done:
980 /*
981 * Set status even if port serdes is not initialized
982 * so that diags will work.
983 */
984 if (dd->status)
985 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
986 HFI1_STATUS_INITTED;
987 if (!ret) {
988 /* enable all interrupts from the chip */
David Brazdil0f672f62019-12-10 10:32:29 +0000989 enable_general_intr(dd);
990 init_qsfp_int(dd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000991
992 /* chip is OK for user apps; mark it as initialized */
993 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
994 ppd = dd->pport + pidx;
995
996 /*
997 * start the serdes - must be after interrupts are
998 * enabled so we are notified when the link goes up
999 */
1000 lastfail = bringup_serdes(ppd);
1001 if (lastfail)
1002 dd_dev_info(dd,
1003 "Failed to bring up port %u\n",
1004 ppd->port);
1005
1006 /*
1007 * Set status even if port serdes is not initialized
1008 * so that diags will work.
1009 */
1010 if (ppd->statusp)
1011 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
1012 HFI1_STATUS_INITTED;
1013 if (!ppd->link_speed_enabled)
1014 continue;
1015 }
1016 }
1017
1018 /* if ret is non-zero, we probably should do some cleanup here... */
1019 return ret;
1020}
1021
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001022struct hfi1_devdata *hfi1_lookup(int unit)
1023{
David Brazdil0f672f62019-12-10 10:32:29 +00001024 return xa_load(&hfi1_dev_table, unit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001025}
1026
1027/*
1028 * Stop the timers during unit shutdown, or after an error late
1029 * in initialization.
1030 */
1031static void stop_timers(struct hfi1_devdata *dd)
1032{
1033 struct hfi1_pportdata *ppd;
1034 int pidx;
1035
1036 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1037 ppd = dd->pport + pidx;
1038 if (ppd->led_override_timer.function) {
1039 del_timer_sync(&ppd->led_override_timer);
1040 atomic_set(&ppd->led_override_timer_active, 0);
1041 }
1042 }
1043}
1044
1045/**
1046 * shutdown_device - shut down a device
1047 * @dd: the hfi1_ib device
1048 *
1049 * This is called to make the device quiet when we are about to
1050 * unload the driver, and also when the device is administratively
1051 * disabled. It does not free any data structures.
1052 * Everything it does has to be setup again by hfi1_init(dd, 1)
1053 */
1054static void shutdown_device(struct hfi1_devdata *dd)
1055{
1056 struct hfi1_pportdata *ppd;
1057 struct hfi1_ctxtdata *rcd;
1058 unsigned pidx;
1059 int i;
1060
1061 if (dd->flags & HFI1_SHUTDOWN)
1062 return;
1063 dd->flags |= HFI1_SHUTDOWN;
1064
1065 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1066 ppd = dd->pport + pidx;
1067
1068 ppd->linkup = 0;
1069 if (ppd->statusp)
1070 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1071 HFI1_STATUS_IB_READY);
1072 }
1073 dd->flags &= ~HFI1_INITTED;
1074
David Brazdil0f672f62019-12-10 10:32:29 +00001075 /* mask and clean up interrupts */
1076 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
1077 msix_clean_up_interrupts(dd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001078
1079 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1080 ppd = dd->pport + pidx;
1081 for (i = 0; i < dd->num_rcv_contexts; i++) {
1082 rcd = hfi1_rcd_get_by_index(dd, i);
1083 hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1084 HFI1_RCVCTRL_CTXT_DIS |
1085 HFI1_RCVCTRL_INTRAVAIL_DIS |
1086 HFI1_RCVCTRL_PKEY_DIS |
1087 HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1088 hfi1_rcd_put(rcd);
1089 }
1090 /*
1091 * Gracefully stop all sends allowing any in progress to
1092 * trickle out first.
1093 */
1094 for (i = 0; i < dd->num_send_contexts; i++)
1095 sc_flush(dd->send_contexts[i].sc);
1096 }
1097
1098 /*
1099 * Enough for anything that's going to trickle out to have actually
1100 * done so.
1101 */
1102 udelay(20);
1103
1104 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1105 ppd = dd->pport + pidx;
1106
1107 /* disable all contexts */
1108 for (i = 0; i < dd->num_send_contexts; i++)
1109 sc_disable(dd->send_contexts[i].sc);
1110 /* disable the send device */
1111 pio_send_control(dd, PSC_GLOBAL_DISABLE);
1112
1113 shutdown_led_override(ppd);
1114
1115 /*
1116 * Clear SerdesEnable.
1117 * We can't count on interrupts since we are stopping.
1118 */
1119 hfi1_quiet_serdes(ppd);
1120
1121 if (ppd->hfi1_wq) {
1122 destroy_workqueue(ppd->hfi1_wq);
1123 ppd->hfi1_wq = NULL;
1124 }
1125 if (ppd->link_wq) {
1126 destroy_workqueue(ppd->link_wq);
1127 ppd->link_wq = NULL;
1128 }
1129 }
1130 sdma_exit(dd);
1131}
1132
1133/**
1134 * hfi1_free_ctxtdata - free a context's allocated data
1135 * @dd: the hfi1_ib device
1136 * @rcd: the ctxtdata structure
1137 *
1138 * free up any allocated data for a context
1139 * It should never change any chip state, or global driver state.
1140 */
1141void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1142{
1143 u32 e;
1144
1145 if (!rcd)
1146 return;
1147
1148 if (rcd->rcvhdrq) {
1149 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1150 rcd->rcvhdrq, rcd->rcvhdrq_dma);
1151 rcd->rcvhdrq = NULL;
1152 if (rcd->rcvhdrtail_kvaddr) {
1153 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1154 (void *)rcd->rcvhdrtail_kvaddr,
1155 rcd->rcvhdrqtailaddr_dma);
1156 rcd->rcvhdrtail_kvaddr = NULL;
1157 }
1158 }
1159
1160 /* all the RcvArray entries should have been cleared by now */
1161 kfree(rcd->egrbufs.rcvtids);
1162 rcd->egrbufs.rcvtids = NULL;
1163
1164 for (e = 0; e < rcd->egrbufs.alloced; e++) {
1165 if (rcd->egrbufs.buffers[e].dma)
1166 dma_free_coherent(&dd->pcidev->dev,
1167 rcd->egrbufs.buffers[e].len,
1168 rcd->egrbufs.buffers[e].addr,
1169 rcd->egrbufs.buffers[e].dma);
1170 }
1171 kfree(rcd->egrbufs.buffers);
1172 rcd->egrbufs.alloced = 0;
1173 rcd->egrbufs.buffers = NULL;
1174
1175 sc_free(rcd->sc);
1176 rcd->sc = NULL;
1177
1178 vfree(rcd->subctxt_uregbase);
1179 vfree(rcd->subctxt_rcvegrbuf);
1180 vfree(rcd->subctxt_rcvhdr_base);
1181 kfree(rcd->opstats);
1182
1183 rcd->subctxt_uregbase = NULL;
1184 rcd->subctxt_rcvegrbuf = NULL;
1185 rcd->subctxt_rcvhdr_base = NULL;
1186 rcd->opstats = NULL;
1187}
1188
1189/*
1190 * Release our hold on the shared asic data. If we are the last one,
1191 * return the structure to be finalized outside the lock. Must be
David Brazdil0f672f62019-12-10 10:32:29 +00001192 * holding hfi1_dev_table lock.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001193 */
1194static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1195{
1196 struct hfi1_asic_data *ad;
1197 int other;
1198
1199 if (!dd->asic_data)
1200 return NULL;
1201 dd->asic_data->dds[dd->hfi1_id] = NULL;
1202 other = dd->hfi1_id ? 0 : 1;
1203 ad = dd->asic_data;
1204 dd->asic_data = NULL;
1205 /* return NULL if the other dd still has a link */
1206 return ad->dds[other] ? NULL : ad;
1207}
1208
1209static void finalize_asic_data(struct hfi1_devdata *dd,
1210 struct hfi1_asic_data *ad)
1211{
1212 clean_up_i2c(dd, ad);
1213 kfree(ad);
1214}
1215
1216/**
1217 * hfi1_clean_devdata - cleans up per-unit data structure
1218 * @dd: pointer to a valid devdata structure
1219 *
1220 * It cleans up all data structures set up by
1221 * by hfi1_alloc_devdata().
1222 */
1223static void hfi1_clean_devdata(struct hfi1_devdata *dd)
1224{
1225 struct hfi1_asic_data *ad;
1226 unsigned long flags;
1227
David Brazdil0f672f62019-12-10 10:32:29 +00001228 xa_lock_irqsave(&hfi1_dev_table, flags);
1229 __xa_erase(&hfi1_dev_table, dd->unit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001230 ad = release_asic_data(dd);
David Brazdil0f672f62019-12-10 10:32:29 +00001231 xa_unlock_irqrestore(&hfi1_dev_table, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001232
1233 finalize_asic_data(dd, ad);
1234 free_platform_config(dd);
1235 rcu_barrier(); /* wait for rcu callbacks to complete */
1236 free_percpu(dd->int_counter);
1237 free_percpu(dd->rcv_limit);
1238 free_percpu(dd->send_schedule);
1239 free_percpu(dd->tx_opstats);
1240 dd->int_counter = NULL;
1241 dd->rcv_limit = NULL;
1242 dd->send_schedule = NULL;
1243 dd->tx_opstats = NULL;
1244 kfree(dd->comp_vect);
1245 dd->comp_vect = NULL;
1246 sdma_clean(dd, dd->num_sdma);
1247 rvt_dealloc_device(&dd->verbs_dev.rdi);
1248}
1249
1250static void __hfi1_free_devdata(struct kobject *kobj)
1251{
1252 struct hfi1_devdata *dd =
1253 container_of(kobj, struct hfi1_devdata, kobj);
1254
1255 hfi1_clean_devdata(dd);
1256}
1257
1258static struct kobj_type hfi1_devdata_type = {
1259 .release = __hfi1_free_devdata,
1260};
1261
1262void hfi1_free_devdata(struct hfi1_devdata *dd)
1263{
1264 kobject_put(&dd->kobj);
1265}
1266
David Brazdil0f672f62019-12-10 10:32:29 +00001267/**
1268 * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1269 * @pdev: Valid PCI device
1270 * @extra: How many bytes to alloc past the default
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001271 *
David Brazdil0f672f62019-12-10 10:32:29 +00001272 * Must be done via verbs allocator, because the verbs cleanup process
1273 * both does cleanup and free of the data structure.
1274 * "extra" is for chip-specific data.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001275 */
David Brazdil0f672f62019-12-10 10:32:29 +00001276static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
1277 size_t extra)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001278{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001279 struct hfi1_devdata *dd;
1280 int ret, nports;
1281
1282 /* extra is * number of ports */
1283 nports = extra / sizeof(struct hfi1_pportdata);
1284
1285 dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1286 nports);
1287 if (!dd)
1288 return ERR_PTR(-ENOMEM);
1289 dd->num_pports = nports;
1290 dd->pport = (struct hfi1_pportdata *)(dd + 1);
1291 dd->pcidev = pdev;
1292 pci_set_drvdata(pdev, dd);
David Brazdil0f672f62019-12-10 10:32:29 +00001293 dd->node = NUMA_NO_NODE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001294
David Brazdil0f672f62019-12-10 10:32:29 +00001295 ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
1296 GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001297 if (ret < 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00001298 dev_err(&pdev->dev,
1299 "Could not allocate unit ID: error %d\n", -ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300 goto bail;
1301 }
1302 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1303
1304 /*
1305 * Initialize all locks for the device. This needs to be as early as
1306 * possible so locks are usable.
1307 */
1308 spin_lock_init(&dd->sc_lock);
1309 spin_lock_init(&dd->sendctrl_lock);
1310 spin_lock_init(&dd->rcvctrl_lock);
1311 spin_lock_init(&dd->uctxt_lock);
1312 spin_lock_init(&dd->hfi1_diag_trans_lock);
1313 spin_lock_init(&dd->sc_init_lock);
1314 spin_lock_init(&dd->dc8051_memlock);
1315 seqlock_init(&dd->sc2vl_lock);
1316 spin_lock_init(&dd->sde_map_lock);
1317 spin_lock_init(&dd->pio_map_lock);
1318 mutex_init(&dd->dc8051_lock);
1319 init_waitqueue_head(&dd->event_queue);
David Brazdil0f672f62019-12-10 10:32:29 +00001320 spin_lock_init(&dd->irq_src_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001321
1322 dd->int_counter = alloc_percpu(u64);
1323 if (!dd->int_counter) {
1324 ret = -ENOMEM;
1325 goto bail;
1326 }
1327
1328 dd->rcv_limit = alloc_percpu(u64);
1329 if (!dd->rcv_limit) {
1330 ret = -ENOMEM;
1331 goto bail;
1332 }
1333
1334 dd->send_schedule = alloc_percpu(u64);
1335 if (!dd->send_schedule) {
1336 ret = -ENOMEM;
1337 goto bail;
1338 }
1339
1340 dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1341 if (!dd->tx_opstats) {
1342 ret = -ENOMEM;
1343 goto bail;
1344 }
1345
1346 dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
1347 if (!dd->comp_vect) {
1348 ret = -ENOMEM;
1349 goto bail;
1350 }
1351
1352 kobject_init(&dd->kobj, &hfi1_devdata_type);
1353 return dd;
1354
1355bail:
1356 hfi1_clean_devdata(dd);
1357 return ERR_PTR(ret);
1358}
1359
1360/*
1361 * Called from freeze mode handlers, and from PCI error
1362 * reporting code. Should be paranoid about state of
1363 * system and data structures.
1364 */
1365void hfi1_disable_after_error(struct hfi1_devdata *dd)
1366{
1367 if (dd->flags & HFI1_INITTED) {
1368 u32 pidx;
1369
1370 dd->flags &= ~HFI1_INITTED;
1371 if (dd->pport)
1372 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1373 struct hfi1_pportdata *ppd;
1374
1375 ppd = dd->pport + pidx;
1376 if (dd->flags & HFI1_PRESENT)
1377 set_link_state(ppd, HLS_DN_DISABLE);
1378
1379 if (ppd->statusp)
1380 *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1381 }
1382 }
1383
1384 /*
1385 * Mark as having had an error for driver, and also
1386 * for /sys and status word mapped to user programs.
1387 * This marks unit as not usable, until reset.
1388 */
1389 if (dd->status)
1390 dd->status->dev |= HFI1_STATUS_HWERROR;
1391}
1392
1393static void remove_one(struct pci_dev *);
1394static int init_one(struct pci_dev *, const struct pci_device_id *);
1395static void shutdown_one(struct pci_dev *);
1396
1397#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1398#define PFX DRIVER_NAME ": "
1399
1400const struct pci_device_id hfi1_pci_tbl[] = {
1401 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1402 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1403 { 0, }
1404};
1405
1406MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1407
1408static struct pci_driver hfi1_pci_driver = {
1409 .name = DRIVER_NAME,
1410 .probe = init_one,
1411 .remove = remove_one,
1412 .shutdown = shutdown_one,
1413 .id_table = hfi1_pci_tbl,
1414 .err_handler = &hfi1_pci_err_handler,
1415};
1416
1417static void __init compute_krcvqs(void)
1418{
1419 int i;
1420
1421 for (i = 0; i < krcvqsset; i++)
1422 n_krcvqs += krcvqs[i];
1423}
1424
1425/*
1426 * Do all the generic driver unit- and chip-independent memory
1427 * allocation and initialization.
1428 */
1429static int __init hfi1_mod_init(void)
1430{
1431 int ret;
1432
1433 ret = dev_init();
1434 if (ret)
1435 goto bail;
1436
1437 ret = node_affinity_init();
1438 if (ret)
1439 goto bail;
1440
1441 /* validate max MTU before any devices start */
1442 if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1443 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1444 hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1445 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1446 }
1447 /* valid CUs run from 1-128 in powers of 2 */
1448 if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1449 hfi1_cu = 1;
1450 /* valid credit return threshold is 0-100, variable is unsigned */
1451 if (user_credit_return_threshold > 100)
1452 user_credit_return_threshold = 100;
1453
1454 compute_krcvqs();
1455 /*
1456 * sanitize receive interrupt count, time must wait until after
1457 * the hardware type is known
1458 */
1459 if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1460 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1461 /* reject invalid combinations */
1462 if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1463 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1464 rcv_intr_count = 1;
1465 }
1466 if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1467 /*
1468 * Avoid indefinite packet delivery by requiring a timeout
1469 * if count is > 1.
1470 */
1471 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1472 rcv_intr_timeout = 1;
1473 }
1474 if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1475 /*
1476 * The dynamic algorithm expects a non-zero timeout
1477 * and a count > 1.
1478 */
1479 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1480 rcv_intr_dynamic = 0;
1481 }
1482
1483 /* sanitize link CRC options */
1484 link_crc_mask &= SUPPORTED_CRCS;
1485
David Brazdil0f672f62019-12-10 10:32:29 +00001486 ret = opfn_init();
1487 if (ret < 0) {
1488 pr_err("Failed to allocate opfn_wq");
1489 goto bail_dev;
1490 }
1491
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001492 /*
1493 * These must be called before the driver is registered with
1494 * the PCI subsystem.
1495 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001496 hfi1_dbg_init();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001497 ret = pci_register_driver(&hfi1_pci_driver);
1498 if (ret < 0) {
1499 pr_err("Unable to register driver: error %d\n", -ret);
1500 goto bail_dev;
1501 }
1502 goto bail; /* all OK */
1503
1504bail_dev:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001505 hfi1_dbg_exit();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001506 dev_cleanup();
1507bail:
1508 return ret;
1509}
1510
1511module_init(hfi1_mod_init);
1512
1513/*
1514 * Do the non-unit driver cleanup, memory free, etc. at unload.
1515 */
1516static void __exit hfi1_mod_cleanup(void)
1517{
1518 pci_unregister_driver(&hfi1_pci_driver);
David Brazdil0f672f62019-12-10 10:32:29 +00001519 opfn_exit();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001520 node_affinity_destroy_all();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001521 hfi1_dbg_exit();
1522
David Brazdil0f672f62019-12-10 10:32:29 +00001523 WARN_ON(!xa_empty(&hfi1_dev_table));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001524 dispose_firmware(); /* asymmetric with obtain_firmware() */
1525 dev_cleanup();
1526}
1527
1528module_exit(hfi1_mod_cleanup);
1529
1530/* this can only be called after a successful initialization */
1531static void cleanup_device_data(struct hfi1_devdata *dd)
1532{
1533 int ctxt;
1534 int pidx;
1535
1536 /* users can't do anything more with chip */
1537 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1538 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1539 struct cc_state *cc_state;
1540 int i;
1541
1542 if (ppd->statusp)
1543 *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1544
1545 for (i = 0; i < OPA_MAX_SLS; i++)
1546 hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1547
1548 spin_lock(&ppd->cc_state_lock);
1549 cc_state = get_cc_state_protected(ppd);
1550 RCU_INIT_POINTER(ppd->cc_state, NULL);
1551 spin_unlock(&ppd->cc_state_lock);
1552
1553 if (cc_state)
1554 kfree_rcu(cc_state, rcu);
1555 }
1556
1557 free_credit_return(dd);
1558
1559 if (dd->rcvhdrtail_dummy_kvaddr) {
1560 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1561 (void *)dd->rcvhdrtail_dummy_kvaddr,
1562 dd->rcvhdrtail_dummy_dma);
1563 dd->rcvhdrtail_dummy_kvaddr = NULL;
1564 }
1565
1566 /*
1567 * Free any resources still in use (usually just kernel contexts)
1568 * at unload; we do for ctxtcnt, because that's what we allocate.
1569 */
1570 for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1571 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1572
1573 if (rcd) {
David Brazdil0f672f62019-12-10 10:32:29 +00001574 hfi1_free_ctxt_rcv_groups(rcd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001575 hfi1_free_ctxt(rcd);
1576 }
1577 }
1578
1579 kfree(dd->rcd);
1580 dd->rcd = NULL;
1581
1582 free_pio_map(dd);
1583 /* must follow rcv context free - need to remove rcv's hooks */
1584 for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1585 sc_free(dd->send_contexts[ctxt].sc);
1586 dd->num_send_contexts = 0;
1587 kfree(dd->send_contexts);
1588 dd->send_contexts = NULL;
1589 kfree(dd->hw_to_sw);
1590 dd->hw_to_sw = NULL;
1591 kfree(dd->boardname);
1592 vfree(dd->events);
1593 vfree(dd->status);
1594}
1595
1596/*
1597 * Clean up on unit shutdown, or error during unit load after
1598 * successful initialization.
1599 */
1600static void postinit_cleanup(struct hfi1_devdata *dd)
1601{
1602 hfi1_start_cleanup(dd);
1603 hfi1_comp_vectors_clean_up(dd);
1604 hfi1_dev_affinity_clean_up(dd);
1605
1606 hfi1_pcie_ddcleanup(dd);
1607 hfi1_pcie_cleanup(dd->pcidev);
1608
1609 cleanup_device_data(dd);
1610
1611 hfi1_free_devdata(dd);
1612}
1613
David Brazdil0f672f62019-12-10 10:32:29 +00001614static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001615{
1616 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
David Brazdil0f672f62019-12-10 10:32:29 +00001617 dd_dev_err(dd, "Receive header queue count too small\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001618 return -EINVAL;
1619 }
1620
1621 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
David Brazdil0f672f62019-12-10 10:32:29 +00001622 dd_dev_err(dd,
1623 "Receive header queue count cannot be greater than %u\n",
1624 HFI1_MAX_HDRQ_EGRBUF_CNT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001625 return -EINVAL;
1626 }
1627
1628 if (thecnt % HDRQ_INCREMENT) {
David Brazdil0f672f62019-12-10 10:32:29 +00001629 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
1630 thecnt, HDRQ_INCREMENT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001631 return -EINVAL;
1632 }
1633
1634 return 0;
1635}
1636
1637static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1638{
1639 int ret = 0, j, pidx, initfail;
1640 struct hfi1_devdata *dd;
1641 struct hfi1_pportdata *ppd;
1642
1643 /* First, lock the non-writable module parameters */
1644 HFI1_CAP_LOCK();
1645
1646 /* Validate dev ids */
1647 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1648 ent->device == PCI_DEVICE_ID_INTEL1)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001649 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
1650 ent->device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001651 ret = -ENODEV;
1652 goto bail;
1653 }
1654
David Brazdil0f672f62019-12-10 10:32:29 +00001655 /* Allocate the dd so we can get to work */
1656 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
1657 sizeof(struct hfi1_pportdata));
1658 if (IS_ERR(dd)) {
1659 ret = PTR_ERR(dd);
1660 goto bail;
1661 }
1662
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001663 /* Validate some global module parameters */
David Brazdil0f672f62019-12-10 10:32:29 +00001664 ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001665 if (ret)
1666 goto bail;
1667
1668 /* use the encoding function as a sanitization check */
1669 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001670 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
1671 hfi1_hdrq_entsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001672 ret = -EINVAL;
1673 goto bail;
1674 }
1675
1676 /* The receive eager buffer size must be set before the receive
1677 * contexts are created.
1678 *
1679 * Set the eager buffer size. Validate that it falls in a range
1680 * allowed by the hardware - all powers of 2 between the min and
1681 * max. The maximum valid MTU is within the eager buffer range
1682 * so we do not need to cap the max_mtu by an eager buffer size
1683 * setting.
1684 */
1685 if (eager_buffer_size) {
1686 if (!is_power_of_2(eager_buffer_size))
1687 eager_buffer_size =
1688 roundup_pow_of_two(eager_buffer_size);
1689 eager_buffer_size =
1690 clamp_val(eager_buffer_size,
1691 MIN_EAGER_BUFFER * 8,
1692 MAX_EAGER_BUFFER_TOTAL);
David Brazdil0f672f62019-12-10 10:32:29 +00001693 dd_dev_info(dd, "Eager buffer size %u\n",
1694 eager_buffer_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001695 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001696 dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001697 ret = -EINVAL;
1698 goto bail;
1699 }
1700
1701 /* restrict value of hfi1_rcvarr_split */
1702 hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1703
David Brazdil0f672f62019-12-10 10:32:29 +00001704 ret = hfi1_pcie_init(dd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001705 if (ret)
1706 goto bail;
1707
1708 /*
1709 * Do device-specific initialization, function table setup, dd
1710 * allocation, etc.
1711 */
David Brazdil0f672f62019-12-10 10:32:29 +00001712 ret = hfi1_init_dd(dd);
1713 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001714 goto clean_bail; /* error already printed */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001715
1716 ret = create_workqueues(dd);
1717 if (ret)
1718 goto clean_bail;
1719
1720 /* do the generic initialization */
1721 initfail = hfi1_init(dd, 0);
1722
1723 /* setup vnic */
1724 hfi1_vnic_setup(dd);
1725
1726 ret = hfi1_register_ib_device(dd);
1727
1728 /*
1729 * Now ready for use. this should be cleared whenever we
1730 * detect a reset, or initiate one. If earlier failure,
1731 * we still create devices, so diags, etc. can be used
1732 * to determine cause of problem.
1733 */
1734 if (!initfail && !ret) {
1735 dd->flags |= HFI1_INITTED;
1736 /* create debufs files after init and ib register */
1737 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1738 }
1739
1740 j = hfi1_device_create(dd);
1741 if (j)
1742 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1743
1744 if (initfail || ret) {
David Brazdil0f672f62019-12-10 10:32:29 +00001745 msix_clean_up_interrupts(dd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001746 stop_timers(dd);
1747 flush_workqueue(ib_wq);
1748 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1749 hfi1_quiet_serdes(dd->pport + pidx);
1750 ppd = dd->pport + pidx;
1751 if (ppd->hfi1_wq) {
1752 destroy_workqueue(ppd->hfi1_wq);
1753 ppd->hfi1_wq = NULL;
1754 }
1755 if (ppd->link_wq) {
1756 destroy_workqueue(ppd->link_wq);
1757 ppd->link_wq = NULL;
1758 }
1759 }
1760 if (!j)
1761 hfi1_device_remove(dd);
1762 if (!ret)
1763 hfi1_unregister_ib_device(dd);
1764 hfi1_vnic_cleanup(dd);
1765 postinit_cleanup(dd);
1766 if (initfail)
1767 ret = initfail;
1768 goto bail; /* everything already cleaned */
1769 }
1770
1771 sdma_start(dd);
1772
1773 return 0;
1774
1775clean_bail:
1776 hfi1_pcie_cleanup(pdev);
1777bail:
1778 return ret;
1779}
1780
1781static void wait_for_clients(struct hfi1_devdata *dd)
1782{
1783 /*
1784 * Remove the device init value and complete the device if there is
1785 * no clients or wait for active clients to finish.
1786 */
1787 if (atomic_dec_and_test(&dd->user_refcount))
1788 complete(&dd->user_comp);
1789
1790 wait_for_completion(&dd->user_comp);
1791}
1792
1793static void remove_one(struct pci_dev *pdev)
1794{
1795 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1796
1797 /* close debugfs files before ib unregister */
1798 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1799
1800 /* remove the /dev hfi1 interface */
1801 hfi1_device_remove(dd);
1802
1803 /* wait for existing user space clients to finish */
1804 wait_for_clients(dd);
1805
1806 /* unregister from IB core */
1807 hfi1_unregister_ib_device(dd);
1808
1809 /* cleanup vnic */
1810 hfi1_vnic_cleanup(dd);
1811
1812 /*
1813 * Disable the IB link, disable interrupts on the device,
1814 * clear dma engines, etc.
1815 */
1816 shutdown_device(dd);
1817
1818 stop_timers(dd);
1819
1820 /* wait until all of our (qsfp) queue_work() calls complete */
1821 flush_workqueue(ib_wq);
1822
1823 postinit_cleanup(dd);
1824}
1825
1826static void shutdown_one(struct pci_dev *pdev)
1827{
1828 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1829
1830 shutdown_device(dd);
1831}
1832
1833/**
1834 * hfi1_create_rcvhdrq - create a receive header queue
1835 * @dd: the hfi1_ib device
1836 * @rcd: the context data
1837 *
1838 * This must be contiguous memory (from an i/o perspective), and must be
1839 * DMA'able (which means for some systems, it will go through an IOMMU,
1840 * or be forced into a low address range).
1841 */
1842int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1843{
1844 unsigned amt;
1845 u64 reg;
1846
1847 if (!rcd->rcvhdrq) {
1848 gfp_t gfp_flags;
1849
1850 amt = rcvhdrq_size(rcd);
1851
1852 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1853 gfp_flags = GFP_KERNEL;
1854 else
1855 gfp_flags = GFP_USER;
David Brazdil0f672f62019-12-10 10:32:29 +00001856 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1857 &rcd->rcvhdrq_dma,
1858 gfp_flags | __GFP_COMP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001859
1860 if (!rcd->rcvhdrq) {
1861 dd_dev_err(dd,
1862 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1863 amt, rcd->ctxt);
1864 goto bail;
1865 }
1866
1867 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1868 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001869 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1870 PAGE_SIZE,
1871 &rcd->rcvhdrqtailaddr_dma,
1872 gfp_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001873 if (!rcd->rcvhdrtail_kvaddr)
1874 goto bail_free;
1875 }
1876 }
1877 /*
1878 * These values are per-context:
1879 * RcvHdrCnt
1880 * RcvHdrEntSize
1881 * RcvHdrSize
1882 */
1883 reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1884 & RCV_HDR_CNT_CNT_MASK)
1885 << RCV_HDR_CNT_CNT_SHIFT;
1886 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1887 reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1888 & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1889 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1890 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1891 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK)
1892 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1893 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1894
1895 /*
1896 * Program dummy tail address for every receive context
1897 * before enabling any receive context
1898 */
1899 write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1900 dd->rcvhdrtail_dummy_dma);
1901
1902 return 0;
1903
1904bail_free:
1905 dd_dev_err(dd,
1906 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1907 rcd->ctxt);
1908 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1909 rcd->rcvhdrq_dma);
1910 rcd->rcvhdrq = NULL;
1911bail:
1912 return -ENOMEM;
1913}
1914
1915/**
1916 * allocate eager buffers, both kernel and user contexts.
1917 * @rcd: the context we are setting up.
1918 *
1919 * Allocate the eager TID buffers and program them into hip.
1920 * They are no longer completely contiguous, we do multiple allocation
1921 * calls. Otherwise we get the OOM code involved, by asking for too
1922 * much per call, with disastrous results on some kernels.
1923 */
1924int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1925{
1926 struct hfi1_devdata *dd = rcd->dd;
1927 u32 max_entries, egrtop, alloced_bytes = 0;
1928 gfp_t gfp_flags;
1929 u16 order, idx = 0;
1930 int ret = 0;
1931 u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1932
1933 /*
1934 * GFP_USER, but without GFP_FS, so buffer cache can be
1935 * coalesced (we hope); otherwise, even at order 4,
1936 * heavy filesystem activity makes these fail, and we can
1937 * use compound pages.
1938 */
1939 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1940
1941 /*
1942 * The minimum size of the eager buffers is a groups of MTU-sized
1943 * buffers.
1944 * The global eager_buffer_size parameter is checked against the
1945 * theoretical lower limit of the value. Here, we check against the
1946 * MTU.
1947 */
1948 if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1949 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1950 /*
1951 * If using one-pkt-per-egr-buffer, lower the eager buffer
1952 * size to the max MTU (page-aligned).
1953 */
1954 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1955 rcd->egrbufs.rcvtid_size = round_mtu;
1956
1957 /*
1958 * Eager buffers sizes of 1MB or less require smaller TID sizes
1959 * to satisfy the "multiple of 8 RcvArray entries" requirement.
1960 */
1961 if (rcd->egrbufs.size <= (1 << 20))
1962 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1963 rounddown_pow_of_two(rcd->egrbufs.size / 8));
1964
1965 while (alloced_bytes < rcd->egrbufs.size &&
1966 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1967 rcd->egrbufs.buffers[idx].addr =
David Brazdil0f672f62019-12-10 10:32:29 +00001968 dma_alloc_coherent(&dd->pcidev->dev,
1969 rcd->egrbufs.rcvtid_size,
1970 &rcd->egrbufs.buffers[idx].dma,
1971 gfp_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001972 if (rcd->egrbufs.buffers[idx].addr) {
1973 rcd->egrbufs.buffers[idx].len =
1974 rcd->egrbufs.rcvtid_size;
1975 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1976 rcd->egrbufs.buffers[idx].addr;
1977 rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1978 rcd->egrbufs.buffers[idx].dma;
1979 rcd->egrbufs.alloced++;
1980 alloced_bytes += rcd->egrbufs.rcvtid_size;
1981 idx++;
1982 } else {
1983 u32 new_size, i, j;
1984 u64 offset = 0;
1985
1986 /*
1987 * Fail the eager buffer allocation if:
1988 * - we are already using the lowest acceptable size
1989 * - we are using one-pkt-per-egr-buffer (this implies
1990 * that we are accepting only one size)
1991 */
1992 if (rcd->egrbufs.rcvtid_size == round_mtu ||
1993 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1994 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1995 rcd->ctxt);
1996 ret = -ENOMEM;
1997 goto bail_rcvegrbuf_phys;
1998 }
1999
2000 new_size = rcd->egrbufs.rcvtid_size / 2;
2001
2002 /*
2003 * If the first attempt to allocate memory failed, don't
2004 * fail everything but continue with the next lower
2005 * size.
2006 */
2007 if (idx == 0) {
2008 rcd->egrbufs.rcvtid_size = new_size;
2009 continue;
2010 }
2011
2012 /*
2013 * Re-partition already allocated buffers to a smaller
2014 * size.
2015 */
2016 rcd->egrbufs.alloced = 0;
2017 for (i = 0, j = 0, offset = 0; j < idx; i++) {
2018 if (i >= rcd->egrbufs.count)
2019 break;
2020 rcd->egrbufs.rcvtids[i].dma =
2021 rcd->egrbufs.buffers[j].dma + offset;
2022 rcd->egrbufs.rcvtids[i].addr =
2023 rcd->egrbufs.buffers[j].addr + offset;
2024 rcd->egrbufs.alloced++;
2025 if ((rcd->egrbufs.buffers[j].dma + offset +
2026 new_size) ==
2027 (rcd->egrbufs.buffers[j].dma +
2028 rcd->egrbufs.buffers[j].len)) {
2029 j++;
2030 offset = 0;
2031 } else {
2032 offset += new_size;
2033 }
2034 }
2035 rcd->egrbufs.rcvtid_size = new_size;
2036 }
2037 }
2038 rcd->egrbufs.numbufs = idx;
2039 rcd->egrbufs.size = alloced_bytes;
2040
2041 hfi1_cdbg(PROC,
David Brazdil0f672f62019-12-10 10:32:29 +00002042 "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002043 rcd->ctxt, rcd->egrbufs.alloced,
2044 rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
2045
2046 /*
2047 * Set the contexts rcv array head update threshold to the closest
2048 * power of 2 (so we can use a mask instead of modulo) below half
2049 * the allocated entries.
2050 */
2051 rcd->egrbufs.threshold =
2052 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
2053 /*
2054 * Compute the expected RcvArray entry base. This is done after
2055 * allocating the eager buffers in order to maximize the
2056 * expected RcvArray entries for the context.
2057 */
2058 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
2059 egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
2060 rcd->expected_count = max_entries - egrtop;
2061 if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2062 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2063
2064 rcd->expected_base = rcd->eager_base + egrtop;
2065 hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2066 rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2067 rcd->eager_base, rcd->expected_base);
2068
2069 if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2070 hfi1_cdbg(PROC,
2071 "ctxt%u: current Eager buffer size is invalid %u\n",
2072 rcd->ctxt, rcd->egrbufs.rcvtid_size);
2073 ret = -EINVAL;
2074 goto bail_rcvegrbuf_phys;
2075 }
2076
2077 for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2078 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2079 rcd->egrbufs.rcvtids[idx].dma, order);
2080 cond_resched();
2081 }
2082
2083 return 0;
2084
2085bail_rcvegrbuf_phys:
2086 for (idx = 0; idx < rcd->egrbufs.alloced &&
2087 rcd->egrbufs.buffers[idx].addr;
2088 idx++) {
2089 dma_free_coherent(&dd->pcidev->dev,
2090 rcd->egrbufs.buffers[idx].len,
2091 rcd->egrbufs.buffers[idx].addr,
2092 rcd->egrbufs.buffers[idx].dma);
2093 rcd->egrbufs.buffers[idx].addr = NULL;
2094 rcd->egrbufs.buffers[idx].dma = 0;
2095 rcd->egrbufs.buffers[idx].len = 0;
2096 }
2097
2098 return ret;
2099}