Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9c7d9da..5b63c50 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -205,17 +205,22 @@
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
- int i;
+ int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
- for (i = 1; i < count; i++) {
+ for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
@@ -228,6 +233,8 @@
if (q->slsb.val[bufnr] != __state)
break;
}
+
+out:
*state = __state;
return i;
}
@@ -312,9 +319,7 @@
int retries = 0, cc;
unsigned long laob = 0;
- WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
- !q->u.out.use_cq));
- if (q->u.out.use_cq && aob != 0) {
+ if (aob) {
fc = QDIO_SIGA_WRITEQ;
laob = aob;
}
@@ -371,7 +376,7 @@
static inline void qdio_sync_queues(struct qdio_q *q)
{
/* PCI capable outbound queues will also be scanned so sync them too */
- if (pci_out_supported(q))
+ if (pci_out_supported(q->irq_ptr))
qdio_siga_sync_all(q);
else
qdio_siga_sync_q(q);
@@ -382,7 +387,7 @@
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0, 0);
+ return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -415,7 +420,8 @@
q->q_stats.nr_sbals[pos]++;
}
-static void process_buffer_error(struct qdio_q *q, int count)
+static void process_buffer_error(struct qdio_q *q, unsigned int start,
+ int count)
{
unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
SLSB_P_OUTPUT_NOT_INIT;
@@ -424,29 +430,29 @@
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
- q->sbal[q->first_to_check]->element[15].sflags == 0x10) {
+ q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
goto set;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
- DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
+ DBF_ERROR("FTC:%3d C:%3d", start, count);
DBF_ERROR("F14:%2x F15:%2x",
- q->sbal[q->first_to_check]->element[14].sflags,
- q->sbal[q->first_to_check]->element[15].sflags);
+ q->sbal[start]->element[14].sflags,
+ q->sbal[start]->element[15].sflags);
set:
/*
* Interrupts may be avoided as long as the error is present
* so change the buffer state immediately to avoid starvation.
*/
- set_buf_states(q, q->first_to_check, state, count);
+ set_buf_states(q, start, state, count);
}
-static inline void inbound_primed(struct qdio_q *q, int count)
+static inline void inbound_primed(struct qdio_q *q, unsigned int start,
+ int count)
{
int new;
@@ -457,7 +463,7 @@
if (!q->u.in.polling) {
q->u.in.polling = 1;
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -465,7 +471,7 @@
set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.ack_count);
q->u.in.ack_count = count;
- q->u.in.ack_start = q->first_to_check;
+ q->u.in.ack_start = start;
return;
}
@@ -473,7 +479,7 @@
* ACK the newest buffer. The ACK will be removed in qdio_stop_polling
* or by the next inbound run.
*/
- new = add_buf(q->first_to_check, count - 1);
+ new = add_buf(start, count - 1);
if (q->u.in.polling) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
@@ -488,10 +494,10 @@
if (!count)
return;
/* need to change ALL buffers to get more interrupts */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
+ set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
}
-static int get_inbound_buffer_frontier(struct qdio_q *q)
+static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -504,64 +510,58 @@
*/
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
if (!count)
- goto out;
+ return 0;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
- count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
+ count = get_buf_states(q, start, &state, count, 1, 0);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ inbound_primed(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
- break;
+ return count;
case SLSB_P_INPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
- q->nr, q->first_to_check);
- break;
+ q->nr, start);
+ return 0;
default:
WARN_ON_ONCE(1);
- }
-out:
- return q->first_to_check;
-}
-
-static int qdio_inbound_q_moved(struct qdio_q *q)
-{
- int bufnr;
-
- bufnr = get_inbound_buffer_frontier(q);
-
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
- if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
- q->u.in.timestamp = get_tod_clock();
- return 1;
- } else
return 0;
+ }
}
-static inline int qdio_inbound_q_done(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
+{
+ int count;
+
+ count = get_inbound_buffer_frontier(q, start);
+
+ if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
+ q->u.in.timestamp = get_tod_clock();
+
+ return count;
+}
+
+static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
@@ -570,7 +570,7 @@
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
+ get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
@@ -588,26 +588,17 @@
* has (probably) not moved (see qdio_inbound_processing).
*/
if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
- q->first_to_check);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
return 1;
} else
return 0;
}
-static inline int contains_aobs(struct qdio_q *q)
-{
- return !q->is_input_q && q->u.out.use_cq;
-}
-
static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
{
unsigned char state = 0;
int j, b = start;
- if (!contains_aobs(q))
- return;
-
for (j = 0; j < count; ++j) {
get_buf_state(q, b, &state, 0);
if (state == SLSB_P_OUTPUT_PENDING) {
@@ -618,8 +609,6 @@
q->u.out.sbal_state[b].flags |=
QDIO_OUTBUF_STATE_FLAG_PENDING;
q->u.out.aobs[b] = NULL;
- } else if (state == SLSB_P_OUTPUT_EMPTY) {
- q->u.out.sbal_state[b].aob = NULL;
}
b = next_buf(b);
}
@@ -630,15 +619,11 @@
{
unsigned long phys_aob = 0;
- if (!q->use_cq)
- return 0;
-
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
@@ -648,17 +633,13 @@
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
{
int start = q->first_to_kick;
- int end = q->first_to_check;
- int count;
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
- count = sub_buf(end, start);
-
if (q->is_input_q) {
qperf_inc(q, inbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
@@ -668,13 +649,11 @@
start, count);
}
- qdio_handle_aobs(q, start, count);
-
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = end;
+ q->first_to_kick = add_buf(start, count);
q->qdio_error = 0;
}
@@ -689,14 +668,20 @@
static void __qdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
@@ -708,7 +693,7 @@
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -720,7 +705,7 @@
__qdio_inbound_processing(q);
}
-static int get_outbound_buffer_frontier(struct qdio_q *q)
+static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
int count;
@@ -729,59 +714,50 @@
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
- !pci_out_supported(q)) ||
+ !pci_out_supported(q->irq_ptr)) ||
(queue_type(q) == QDIO_IQDIO_QFMT &&
multicast_outbound(q)))
qdio_siga_sync_q(q);
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
- goto out;
+ return 0;
- count = get_buf_states(q, q->first_to_check, &state, count, 0,
- q->u.out.use_cq);
+ count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
if (!count)
- goto out;
+ return 0;
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
- q->first_to_check = add_buf(q->first_to_check, count);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
-
- break;
+ return count;
case SLSB_P_OUTPUT_ERROR:
- process_buffer_error(q, count);
- q->first_to_check = add_buf(q->first_to_check, count);
+ process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
- break;
+ return count;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
- break;
+ return 0;
case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
- break;
+ return 0;
default:
WARN_ON_ONCE(1);
+ return 0;
}
-
-out:
- return q->first_to_check;
}
/* all buffers processed? */
@@ -790,18 +766,19 @@
return atomic_read(&q->nr_buf_used) == 0;
}
-static inline int qdio_outbound_q_moved(struct qdio_q *q)
+static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
{
- int bufnr;
+ int count;
- bufnr = get_outbound_buffer_frontier(q);
+ count = get_outbound_buffer_frontier(q, start);
- if (bufnr != q->last_move) {
- q->last_move = bufnr;
+ if (count) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
- return 1;
- } else
- return 0;
+ if (q->u.out.use_cq)
+ qdio_handle_aobs(q, start, count);
+ }
+
+ return count;
}
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
@@ -848,15 +825,21 @@
static void __qdio_outbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_outbound);
WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
- if (qdio_outbound_q_moved(q))
- qdio_kick_handler(q);
+ count = qdio_outbound_q_moved(q, start);
+ if (count) {
+ q->first_to_check = add_buf(start, count);
+ qdio_kick_handler(q, count);
+ }
- if (queue_type(q) == QDIO_ZFCP_QFMT)
- if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
- goto sched;
+ if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
+ !qdio_outbound_q_done(q))
+ goto sched;
if (q->u.out.pci_out_enabled)
return;
@@ -892,37 +875,40 @@
qdio_tasklet_schedule(q);
}
-static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
{
struct qdio_q *out;
int i;
- if (!pci_out_supported(q))
+ if (!pci_out_supported(irq) || !irq->scan_threshold)
return;
- for_each_output_queue(q->irq_ptr, out, i)
+ for_each_output_queue(irq, out, i)
if (!qdio_outbound_q_done(out))
qdio_tasklet_schedule(out);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
+ unsigned int start = q->first_to_check;
+ int count;
+
qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
- /*
- * The interrupt could be caused by a PCI request. Check the
- * PCI capable outbound queues.
- */
- qdio_check_outbound_after_thinint(q);
+ /* The interrupt could be caused by a PCI request: */
+ qdio_check_outbound_pci_queues(q->irq_ptr);
- if (!qdio_inbound_q_moved(q))
+ count = qdio_inbound_q_moved(q, start);
+ if (count == 0)
return;
- qdio_kick_handler(q);
+ start = add_buf(start, count);
+ q->first_to_check = start;
+ qdio_kick_handler(q, count);
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched);
if (!qdio_tasklet_schedule(q))
return;
@@ -933,7 +919,7 @@
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
- if (!qdio_inbound_q_done(q)) {
+ if (!qdio_inbound_q_done(q, start)) {
qperf_inc(q, tasklet_inbound_resched2);
qdio_tasklet_schedule(q);
}
@@ -987,7 +973,7 @@
}
}
- if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1318,6 +1304,8 @@
for_each_output_queue(irq_ptr, q, i) {
if (use_cq) {
+ if (multicast_outbound(q))
+ continue;
if (qdio_enable_async_operation(&q->u.out) < 0) {
use_cq = 0;
continue;
@@ -1540,6 +1528,7 @@
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
int used, rc = 0;
@@ -1563,22 +1552,27 @@
/* One SIGA-W per buffer required for unicast HSI */
WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+ if (q->u.out.use_cq)
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+ state == SLSB_CU_OUTPUT_PRIMED) {
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
} else {
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q, 0);
- else
- qperf_inc(q, fast_requeue);
+ rc = qdio_kick_outbound_q(q, 0);
}
+ /* Let drivers implement their own completion scanning: */
+ if (!scan_threshold)
+ return rc;
+
/* in case of SIGA errors we must process the error immediately */
- if (used >= q->u.out.scan_threshold || rc)
+ if (used >= scan_threshold || rc)
qdio_tasklet_schedule(q);
else
/* free the SBALs in case of no further traffic */
@@ -1653,7 +1647,7 @@
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
- if (!qdio_inbound_q_done(q))
+ if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan;
return 0;
@@ -1667,6 +1661,44 @@
}
EXPORT_SYMBOL(qdio_start_irq);
+static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
+ unsigned int *error)
+{
+ unsigned int start = q->first_to_check;
+ int count;
+
+ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
+ qdio_outbound_q_moved(q, start);
+ if (count == 0)
+ return 0;
+
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_check = add_buf(start, count);
+ q->qdio_error = 0;
+
+ return count;
+}
+
+int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
+ unsigned int *bufnr, unsigned int *error)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
+
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+
+ return __qdio_inspect_queue(q, bufnr, error);
+}
+EXPORT_SYMBOL_GPL(qdio_inspect_queue);
+
/**
* qdio_get_next_buffers - process input buffers
* @cdev: associated ccw_device for the qdio subchannel
@@ -1683,7 +1715,6 @@
int *error)
{
struct qdio_q *q;
- int start, end;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
@@ -1697,25 +1728,13 @@
if (need_siga_sync(q))
qdio_sync_queues(q);
- /* check the PCI capable outbound queues. */
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return 0;
+ qdio_check_outbound_pci_queues(irq_ptr);
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
- start = q->first_to_kick;
- end = q->first_to_check;
- *bufnr = start;
- *error = q->qdio_error;
-
- /* for the next time */
- q->first_to_kick = end;
- q->qdio_error = 0;
- return sub_buf(end, start);
+ return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL(qdio_get_next_buffers);