blob: ff0018f5bbe5ef9ce244c3078534aa23d3d10bb7 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 *
6 * Adjunct processor bus, queue related code.
7 */
8
9#define KMSG_COMPONENT "ap"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <asm/facility.h>
15
16#include "ap_bus.h"
David Brazdil0f672f62019-12-10 10:32:29 +000017#include "ap_debug.h"
18
19static void __ap_flush_queue(struct ap_queue *aq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020
21/**
Olivier Deprez157378f2022-04-04 15:47:50 +020022 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023 * @qid: The AP queue number
24 * @ind: the notification indicator byte
25 *
26 * Enables interruption on AP queue via ap_aqic(). Based on the return
27 * value it waits a while and tests the AP queue if interrupts
28 * have been switched on using ap_test_queue().
29 */
Olivier Deprez157378f2022-04-04 15:47:50 +020030static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031{
32 struct ap_queue_status status;
33 struct ap_qirq_ctrl qirqctrl = { 0 };
34
35 qirqctrl.ir = 1;
36 qirqctrl.isc = AP_ISC;
37 status = ap_aqic(aq->qid, qirqctrl, ind);
38 switch (status.response_code) {
39 case AP_RESPONSE_NORMAL:
40 case AP_RESPONSE_OTHERWISE_CHANGED:
41 return 0;
42 case AP_RESPONSE_Q_NOT_AVAIL:
43 case AP_RESPONSE_DECONFIGURED:
44 case AP_RESPONSE_CHECKSTOPPED:
45 case AP_RESPONSE_INVALID_ADDRESS:
46 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
47 AP_QID_CARD(aq->qid),
48 AP_QID_QUEUE(aq->qid));
49 return -EOPNOTSUPP;
50 case AP_RESPONSE_RESET_IN_PROGRESS:
51 case AP_RESPONSE_BUSY:
52 default:
53 return -EBUSY;
54 }
55}
56
57/**
58 * __ap_send(): Send message to adjunct processor queue.
59 * @qid: The AP queue number
60 * @psmid: The program supplied message identifier
61 * @msg: The message text
62 * @length: The message length
63 * @special: Special Bit
64 *
65 * Returns AP queue status structure.
66 * Condition code 1 on NQAP can't happen because the L bit is 1.
67 * Condition code 2 on NQAP also means the send is incomplete,
68 * because a segment boundary was reached. The NQAP is repeated.
69 */
70static inline struct ap_queue_status
71__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
Olivier Deprez157378f2022-04-04 15:47:50 +020072 int special)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073{
Olivier Deprez157378f2022-04-04 15:47:50 +020074 if (special)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 qid |= 0x400000UL;
76 return ap_nqap(qid, psmid, msg, length);
77}
78
79int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
80{
81 struct ap_queue_status status;
82
83 status = __ap_send(qid, psmid, msg, length, 0);
84 switch (status.response_code) {
85 case AP_RESPONSE_NORMAL:
86 return 0;
87 case AP_RESPONSE_Q_FULL:
88 case AP_RESPONSE_RESET_IN_PROGRESS:
89 return -EBUSY;
90 case AP_RESPONSE_REQ_FAC_NOT_INST:
91 return -EINVAL;
92 default: /* Device is gone. */
93 return -ENODEV;
94 }
95}
96EXPORT_SYMBOL(ap_send);
97
98int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
99{
100 struct ap_queue_status status;
101
102 if (msg == NULL)
103 return -EINVAL;
104 status = ap_dqap(qid, psmid, msg, length);
105 switch (status.response_code) {
106 case AP_RESPONSE_NORMAL:
107 return 0;
108 case AP_RESPONSE_NO_PENDING_REPLY:
109 if (status.queue_empty)
110 return -ENOENT;
111 return -EBUSY;
112 case AP_RESPONSE_RESET_IN_PROGRESS:
113 return -EBUSY;
114 default:
115 return -ENODEV;
116 }
117}
118EXPORT_SYMBOL(ap_recv);
119
120/* State machine definitions and helpers */
121
Olivier Deprez157378f2022-04-04 15:47:50 +0200122static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123{
Olivier Deprez157378f2022-04-04 15:47:50 +0200124 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125}
126
127/**
128 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
129 * not change the state of the device.
130 * @aq: pointer to the AP queue
131 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200132 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133 */
134static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
135{
136 struct ap_queue_status status;
137 struct ap_message *ap_msg;
Olivier Deprez157378f2022-04-04 15:47:50 +0200138 bool found = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139
140 status = ap_dqap(aq->qid, &aq->reply->psmid,
Olivier Deprez157378f2022-04-04 15:47:50 +0200141 aq->reply->msg, aq->reply->len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000142 switch (status.response_code) {
143 case AP_RESPONSE_NORMAL:
Olivier Deprez157378f2022-04-04 15:47:50 +0200144 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
145 if (!status.queue_empty && !aq->queue_count)
146 aq->queue_count++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147 if (aq->queue_count > 0)
148 mod_timer(&aq->timeout,
149 jiffies + aq->request_timeout);
150 list_for_each_entry(ap_msg, &aq->pendingq, list) {
151 if (ap_msg->psmid != aq->reply->psmid)
152 continue;
153 list_del_init(&ap_msg->list);
154 aq->pendingq_count--;
155 ap_msg->receive(aq, ap_msg, aq->reply);
Olivier Deprez157378f2022-04-04 15:47:50 +0200156 found = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 break;
158 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200159 if (!found) {
160 AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
161 __func__, aq->reply->psmid,
162 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
163 }
164 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 case AP_RESPONSE_NO_PENDING_REPLY:
166 if (!status.queue_empty || aq->queue_count <= 0)
167 break;
168 /* The card shouldn't forget requests but who knows. */
169 aq->queue_count = 0;
170 list_splice_init(&aq->pendingq, &aq->requestq);
171 aq->requestq_count += aq->pendingq_count;
172 aq->pendingq_count = 0;
173 break;
174 default:
175 break;
176 }
177 return status;
178}
179
180/**
181 * ap_sm_read(): Receive pending reply messages from an AP queue.
182 * @aq: pointer to the AP queue
183 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200184 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200186static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187{
188 struct ap_queue_status status;
189
190 if (!aq->reply)
Olivier Deprez157378f2022-04-04 15:47:50 +0200191 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 status = ap_sm_recv(aq);
193 switch (status.response_code) {
194 case AP_RESPONSE_NORMAL:
195 if (aq->queue_count > 0) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200196 aq->sm_state = AP_SM_STATE_WORKING;
197 return AP_SM_WAIT_AGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200199 aq->sm_state = AP_SM_STATE_IDLE;
200 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000201 case AP_RESPONSE_NO_PENDING_REPLY:
202 if (aq->queue_count > 0)
Olivier Deprez157378f2022-04-04 15:47:50 +0200203 return aq->interrupt ?
204 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
205 aq->sm_state = AP_SM_STATE_IDLE;
206 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200208 aq->dev_state = AP_DEV_STATE_ERROR;
209 aq->last_err_rc = status.response_code;
210 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
211 __func__, status.response_code,
212 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
213 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214 }
215}
216
217/**
218 * ap_sm_write(): Send messages from the request queue to an AP queue.
219 * @aq: pointer to the AP queue
220 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200221 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200223static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224{
225 struct ap_queue_status status;
226 struct ap_message *ap_msg;
Olivier Deprez157378f2022-04-04 15:47:50 +0200227 ap_qid_t qid = aq->qid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000228
229 if (aq->requestq_count <= 0)
Olivier Deprez157378f2022-04-04 15:47:50 +0200230 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231 /* Start the next request on the queue. */
232 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
Olivier Deprez157378f2022-04-04 15:47:50 +0200233#ifdef CONFIG_ZCRYPT_DEBUG
234 if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
235 AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
236 __func__, ap_msg->fi.cmd);
237 qid = 0xFF00;
238 }
239#endif
240 status = __ap_send(qid, ap_msg->psmid,
241 ap_msg->msg, ap_msg->len,
242 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243 switch (status.response_code) {
244 case AP_RESPONSE_NORMAL:
Olivier Deprez157378f2022-04-04 15:47:50 +0200245 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246 if (aq->queue_count == 1)
247 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
248 list_move_tail(&ap_msg->list, &aq->pendingq);
249 aq->requestq_count--;
250 aq->pendingq_count++;
251 if (aq->queue_count < aq->card->queue_depth) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200252 aq->sm_state = AP_SM_STATE_WORKING;
253 return AP_SM_WAIT_AGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200255 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000256 case AP_RESPONSE_Q_FULL:
Olivier Deprez157378f2022-04-04 15:47:50 +0200257 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
258 return aq->interrupt ?
259 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260 case AP_RESPONSE_RESET_IN_PROGRESS:
Olivier Deprez157378f2022-04-04 15:47:50 +0200261 aq->sm_state = AP_SM_STATE_RESET_WAIT;
262 return AP_SM_WAIT_TIMEOUT;
263 case AP_RESPONSE_INVALID_DOMAIN:
264 AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
265 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266 case AP_RESPONSE_MESSAGE_TOO_BIG:
267 case AP_RESPONSE_REQ_FAC_NOT_INST:
268 list_del_init(&ap_msg->list);
269 aq->requestq_count--;
270 ap_msg->rc = -EINVAL;
271 ap_msg->receive(aq, ap_msg, NULL);
Olivier Deprez157378f2022-04-04 15:47:50 +0200272 return AP_SM_WAIT_AGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000273 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200274 aq->dev_state = AP_DEV_STATE_ERROR;
275 aq->last_err_rc = status.response_code;
276 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
277 __func__, status.response_code,
278 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
279 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 }
281}
282
283/**
284 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
285 * @aq: pointer to the AP queue
286 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200287 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200289static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290{
291 return min(ap_sm_read(aq), ap_sm_write(aq));
292}
293
294/**
295 * ap_sm_reset(): Reset an AP queue.
296 * @qid: The AP queue number
297 *
298 * Submit the Reset command to an AP queue.
299 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200300static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000301{
302 struct ap_queue_status status;
303
304 status = ap_rapq(aq->qid);
305 switch (status.response_code) {
306 case AP_RESPONSE_NORMAL:
307 case AP_RESPONSE_RESET_IN_PROGRESS:
Olivier Deprez157378f2022-04-04 15:47:50 +0200308 aq->sm_state = AP_SM_STATE_RESET_WAIT;
309 aq->interrupt = false;
310 return AP_SM_WAIT_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200312 aq->dev_state = AP_DEV_STATE_ERROR;
313 aq->last_err_rc = status.response_code;
314 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
315 __func__, status.response_code,
316 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
317 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000318 }
319}
320
321/**
322 * ap_sm_reset_wait(): Test queue for completion of the reset operation
323 * @aq: pointer to the AP queue
324 *
325 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
326 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200327static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000328{
329 struct ap_queue_status status;
330 void *lsi_ptr;
331
332 if (aq->queue_count > 0 && aq->reply)
333 /* Try to read a completed message and get the status */
334 status = ap_sm_recv(aq);
335 else
336 /* Get the status with TAPQ */
337 status = ap_tapq(aq->qid, NULL);
338
339 switch (status.response_code) {
340 case AP_RESPONSE_NORMAL:
341 lsi_ptr = ap_airq_ptr();
Olivier Deprez157378f2022-04-04 15:47:50 +0200342 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
343 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200345 aq->sm_state = (aq->queue_count > 0) ?
346 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
347 return AP_SM_WAIT_AGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000348 case AP_RESPONSE_BUSY:
349 case AP_RESPONSE_RESET_IN_PROGRESS:
Olivier Deprez157378f2022-04-04 15:47:50 +0200350 return AP_SM_WAIT_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000351 case AP_RESPONSE_Q_NOT_AVAIL:
352 case AP_RESPONSE_DECONFIGURED:
353 case AP_RESPONSE_CHECKSTOPPED:
354 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200355 aq->dev_state = AP_DEV_STATE_ERROR;
356 aq->last_err_rc = status.response_code;
357 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
358 __func__, status.response_code,
359 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
360 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000361 }
362}
363
364/**
365 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
366 * @aq: pointer to the AP queue
367 *
368 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
369 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200370static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371{
372 struct ap_queue_status status;
373
374 if (aq->queue_count > 0 && aq->reply)
375 /* Try to read a completed message and get the status */
376 status = ap_sm_recv(aq);
377 else
378 /* Get the status with TAPQ */
379 status = ap_tapq(aq->qid, NULL);
380
381 if (status.irq_enabled == 1) {
382 /* Irqs are now enabled */
Olivier Deprez157378f2022-04-04 15:47:50 +0200383 aq->interrupt = true;
384 aq->sm_state = (aq->queue_count > 0) ?
385 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000386 }
387
388 switch (status.response_code) {
389 case AP_RESPONSE_NORMAL:
390 if (aq->queue_count > 0)
Olivier Deprez157378f2022-04-04 15:47:50 +0200391 return AP_SM_WAIT_AGAIN;
392 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393 case AP_RESPONSE_NO_PENDING_REPLY:
Olivier Deprez157378f2022-04-04 15:47:50 +0200394 return AP_SM_WAIT_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200396 aq->dev_state = AP_DEV_STATE_ERROR;
397 aq->last_err_rc = status.response_code;
398 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
399 __func__, status.response_code,
400 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
401 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402 }
403}
404
405/*
406 * AP state machine jump table
407 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200408static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
409 [AP_SM_STATE_RESET_START] = {
410 [AP_SM_EVENT_POLL] = ap_sm_reset,
411 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 },
Olivier Deprez157378f2022-04-04 15:47:50 +0200413 [AP_SM_STATE_RESET_WAIT] = {
414 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
415 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000416 },
Olivier Deprez157378f2022-04-04 15:47:50 +0200417 [AP_SM_STATE_SETIRQ_WAIT] = {
418 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
419 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420 },
Olivier Deprez157378f2022-04-04 15:47:50 +0200421 [AP_SM_STATE_IDLE] = {
422 [AP_SM_EVENT_POLL] = ap_sm_write,
423 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424 },
Olivier Deprez157378f2022-04-04 15:47:50 +0200425 [AP_SM_STATE_WORKING] = {
426 [AP_SM_EVENT_POLL] = ap_sm_read_write,
427 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428 },
Olivier Deprez157378f2022-04-04 15:47:50 +0200429 [AP_SM_STATE_QUEUE_FULL] = {
430 [AP_SM_EVENT_POLL] = ap_sm_read,
431 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000432 },
433};
434
Olivier Deprez157378f2022-04-04 15:47:50 +0200435enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436{
Olivier Deprez157378f2022-04-04 15:47:50 +0200437 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
438 return ap_jumptable[aq->sm_state][event](aq);
439 else
440 return AP_SM_WAIT_NONE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441}
442
Olivier Deprez157378f2022-04-04 15:47:50 +0200443enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444{
Olivier Deprez157378f2022-04-04 15:47:50 +0200445 enum ap_sm_wait wait;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000446
Olivier Deprez157378f2022-04-04 15:47:50 +0200447 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000448 ;
449 return wait;
450}
451
452/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453 * AP queue related attributes.
454 */
455static ssize_t request_count_show(struct device *dev,
456 struct device_attribute *attr,
457 char *buf)
458{
459 struct ap_queue *aq = to_ap_queue(dev);
Olivier Deprez157378f2022-04-04 15:47:50 +0200460 bool valid = false;
Olivier Deprez0e641232021-09-23 10:07:05 +0200461 u64 req_cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000462
463 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200464 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
465 req_cnt = aq->total_request_count;
466 valid = true;
467 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468 spin_unlock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200469
470 if (valid)
471 return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
472 else
473 return scnprintf(buf, PAGE_SIZE, "-\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474}
475
476static ssize_t request_count_store(struct device *dev,
477 struct device_attribute *attr,
478 const char *buf, size_t count)
479{
480 struct ap_queue *aq = to_ap_queue(dev);
481
482 spin_lock_bh(&aq->lock);
483 aq->total_request_count = 0;
484 spin_unlock_bh(&aq->lock);
485
486 return count;
487}
488
489static DEVICE_ATTR_RW(request_count);
490
491static ssize_t requestq_count_show(struct device *dev,
492 struct device_attribute *attr, char *buf)
493{
494 struct ap_queue *aq = to_ap_queue(dev);
495 unsigned int reqq_cnt = 0;
496
497 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200498 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
499 reqq_cnt = aq->requestq_count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500 spin_unlock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200501 return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502}
503
504static DEVICE_ATTR_RO(requestq_count);
505
506static ssize_t pendingq_count_show(struct device *dev,
507 struct device_attribute *attr, char *buf)
508{
509 struct ap_queue *aq = to_ap_queue(dev);
510 unsigned int penq_cnt = 0;
511
512 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200513 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
514 penq_cnt = aq->pendingq_count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 spin_unlock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200516 return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517}
518
519static DEVICE_ATTR_RO(pendingq_count);
520
521static ssize_t reset_show(struct device *dev,
522 struct device_attribute *attr, char *buf)
523{
524 struct ap_queue *aq = to_ap_queue(dev);
525 int rc = 0;
526
527 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200528 switch (aq->sm_state) {
529 case AP_SM_STATE_RESET_START:
530 case AP_SM_STATE_RESET_WAIT:
531 rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200533 case AP_SM_STATE_WORKING:
534 case AP_SM_STATE_QUEUE_FULL:
535 rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000536 break;
537 default:
Olivier Deprez157378f2022-04-04 15:47:50 +0200538 rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000539 }
540 spin_unlock_bh(&aq->lock);
541 return rc;
542}
543
David Brazdil0f672f62019-12-10 10:32:29 +0000544static ssize_t reset_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t count)
547{
548 struct ap_queue *aq = to_ap_queue(dev);
549
550 spin_lock_bh(&aq->lock);
551 __ap_flush_queue(aq);
Olivier Deprez157378f2022-04-04 15:47:50 +0200552 aq->sm_state = AP_SM_STATE_RESET_START;
553 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
David Brazdil0f672f62019-12-10 10:32:29 +0000554 spin_unlock_bh(&aq->lock);
555
556 AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
557 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
558
559 return count;
560}
561
562static DEVICE_ATTR_RW(reset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563
564static ssize_t interrupt_show(struct device *dev,
565 struct device_attribute *attr, char *buf)
566{
567 struct ap_queue *aq = to_ap_queue(dev);
568 int rc = 0;
569
570 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200571 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
572 rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
573 else if (aq->interrupt)
574 rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 else
Olivier Deprez157378f2022-04-04 15:47:50 +0200576 rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577 spin_unlock_bh(&aq->lock);
578 return rc;
579}
580
581static DEVICE_ATTR_RO(interrupt);
582
Olivier Deprez157378f2022-04-04 15:47:50 +0200583static ssize_t config_show(struct device *dev,
584 struct device_attribute *attr, char *buf)
585{
586 struct ap_queue *aq = to_ap_queue(dev);
587 int rc;
588
589 spin_lock_bh(&aq->lock);
590 rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0);
591 spin_unlock_bh(&aq->lock);
592 return rc;
593}
594
595static DEVICE_ATTR_RO(config);
596
597#ifdef CONFIG_ZCRYPT_DEBUG
598static ssize_t states_show(struct device *dev,
599 struct device_attribute *attr, char *buf)
600{
601 struct ap_queue *aq = to_ap_queue(dev);
602 int rc = 0;
603
604 spin_lock_bh(&aq->lock);
605 /* queue device state */
606 switch (aq->dev_state) {
607 case AP_DEV_STATE_UNINITIATED:
608 rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n");
609 break;
610 case AP_DEV_STATE_OPERATING:
611 rc = scnprintf(buf, PAGE_SIZE, "OPERATING");
612 break;
613 case AP_DEV_STATE_SHUTDOWN:
614 rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN");
615 break;
616 case AP_DEV_STATE_ERROR:
617 rc = scnprintf(buf, PAGE_SIZE, "ERROR");
618 break;
619 default:
620 rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN");
621 }
622 /* state machine state */
623 if (aq->dev_state) {
624 switch (aq->sm_state) {
625 case AP_SM_STATE_RESET_START:
626 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
627 " [RESET_START]\n");
628 break;
629 case AP_SM_STATE_RESET_WAIT:
630 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
631 " [RESET_WAIT]\n");
632 break;
633 case AP_SM_STATE_SETIRQ_WAIT:
634 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
635 " [SETIRQ_WAIT]\n");
636 break;
637 case AP_SM_STATE_IDLE:
638 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
639 " [IDLE]\n");
640 break;
641 case AP_SM_STATE_WORKING:
642 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
643 " [WORKING]\n");
644 break;
645 case AP_SM_STATE_QUEUE_FULL:
646 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
647 " [FULL]\n");
648 break;
649 default:
650 rc += scnprintf(buf + rc, PAGE_SIZE - rc,
651 " [UNKNOWN]\n");
652 }
653 }
654 spin_unlock_bh(&aq->lock);
655
656 return rc;
657}
658static DEVICE_ATTR_RO(states);
659
660static ssize_t last_err_rc_show(struct device *dev,
661 struct device_attribute *attr, char *buf)
662{
663 struct ap_queue *aq = to_ap_queue(dev);
664 int rc;
665
666 spin_lock_bh(&aq->lock);
667 rc = aq->last_err_rc;
668 spin_unlock_bh(&aq->lock);
669
670 switch (rc) {
671 case AP_RESPONSE_NORMAL:
672 return scnprintf(buf, PAGE_SIZE, "NORMAL\n");
673 case AP_RESPONSE_Q_NOT_AVAIL:
674 return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n");
675 case AP_RESPONSE_RESET_IN_PROGRESS:
676 return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n");
677 case AP_RESPONSE_DECONFIGURED:
678 return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n");
679 case AP_RESPONSE_CHECKSTOPPED:
680 return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n");
681 case AP_RESPONSE_BUSY:
682 return scnprintf(buf, PAGE_SIZE, "BUSY\n");
683 case AP_RESPONSE_INVALID_ADDRESS:
684 return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n");
685 case AP_RESPONSE_OTHERWISE_CHANGED:
686 return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n");
687 case AP_RESPONSE_Q_FULL:
688 return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n");
689 case AP_RESPONSE_INDEX_TOO_BIG:
690 return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n");
691 case AP_RESPONSE_NO_FIRST_PART:
692 return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n");
693 case AP_RESPONSE_MESSAGE_TOO_BIG:
694 return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n");
695 case AP_RESPONSE_REQ_FAC_NOT_INST:
696 return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n");
697 default:
698 return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc);
699 }
700}
701static DEVICE_ATTR_RO(last_err_rc);
702#endif
703
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000704static struct attribute *ap_queue_dev_attrs[] = {
705 &dev_attr_request_count.attr,
706 &dev_attr_requestq_count.attr,
707 &dev_attr_pendingq_count.attr,
708 &dev_attr_reset.attr,
709 &dev_attr_interrupt.attr,
Olivier Deprez157378f2022-04-04 15:47:50 +0200710 &dev_attr_config.attr,
711#ifdef CONFIG_ZCRYPT_DEBUG
712 &dev_attr_states.attr,
713 &dev_attr_last_err_rc.attr,
714#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715 NULL
716};
717
718static struct attribute_group ap_queue_dev_attr_group = {
719 .attrs = ap_queue_dev_attrs
720};
721
722static const struct attribute_group *ap_queue_dev_attr_groups[] = {
723 &ap_queue_dev_attr_group,
724 NULL
725};
726
727static struct device_type ap_queue_type = {
728 .name = "ap_queue",
729 .groups = ap_queue_dev_attr_groups,
730};
731
732static void ap_queue_device_release(struct device *dev)
733{
734 struct ap_queue *aq = to_ap_queue(dev);
735
Olivier Deprez157378f2022-04-04 15:47:50 +0200736 spin_lock_bh(&ap_queues_lock);
737 hash_del(&aq->hnode);
738 spin_unlock_bh(&ap_queues_lock);
739
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000740 kfree(aq);
741}
742
743struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
744{
745 struct ap_queue *aq;
746
747 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
748 if (!aq)
749 return NULL;
750 aq->ap_dev.device.release = ap_queue_device_release;
751 aq->ap_dev.device.type = &ap_queue_type;
752 aq->ap_dev.device_type = device_type;
753 aq->qid = qid;
Olivier Deprez157378f2022-04-04 15:47:50 +0200754 aq->interrupt = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000755 spin_lock_init(&aq->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000756 INIT_LIST_HEAD(&aq->pendingq);
757 INIT_LIST_HEAD(&aq->requestq);
758 timer_setup(&aq->timeout, ap_request_timeout, 0);
759
760 return aq;
761}
762
763void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
764{
765 aq->reply = reply;
766
767 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200768 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000769 spin_unlock_bh(&aq->lock);
770}
771EXPORT_SYMBOL(ap_queue_init_reply);
772
773/**
774 * ap_queue_message(): Queue a request to an AP device.
775 * @aq: The AP device to queue the message to
776 * @ap_msg: The message that is to be added
777 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200778int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779{
Olivier Deprez157378f2022-04-04 15:47:50 +0200780 int rc = 0;
781
782 /* msg needs to have a valid receive-callback */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000783 BUG_ON(!ap_msg->receive);
784
785 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200786
787 /* only allow to queue new messages if device state is ok */
788 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
789 list_add_tail(&ap_msg->list, &aq->requestq);
790 aq->requestq_count++;
791 aq->total_request_count++;
792 atomic64_inc(&aq->card->total_request_count);
793 } else
794 rc = -ENODEV;
795
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796 /* Send/receive as many request from the queue as possible. */
Olivier Deprez157378f2022-04-04 15:47:50 +0200797 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
798
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799 spin_unlock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200800
801 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000802}
803EXPORT_SYMBOL(ap_queue_message);
804
805/**
806 * ap_cancel_message(): Cancel a crypto request.
807 * @aq: The AP device that has the message queued
808 * @ap_msg: The message that is to be removed
809 *
810 * Cancel a crypto request. This is done by removing the request
811 * from the device pending or request queue. Note that the
812 * request stays on the AP queue. When it finishes the message
813 * reply will be discarded because the psmid can't be found.
814 */
815void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
816{
817 struct ap_message *tmp;
818
819 spin_lock_bh(&aq->lock);
820 if (!list_empty(&ap_msg->list)) {
821 list_for_each_entry(tmp, &aq->pendingq, list)
822 if (tmp->psmid == ap_msg->psmid) {
823 aq->pendingq_count--;
824 goto found;
825 }
826 aq->requestq_count--;
827found:
828 list_del_init(&ap_msg->list);
829 }
830 spin_unlock_bh(&aq->lock);
831}
832EXPORT_SYMBOL(ap_cancel_message);
833
834/**
835 * __ap_flush_queue(): Flush requests.
836 * @aq: Pointer to the AP queue
837 *
838 * Flush all requests from the request/pending queue of an AP device.
839 */
840static void __ap_flush_queue(struct ap_queue *aq)
841{
842 struct ap_message *ap_msg, *next;
843
844 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
845 list_del_init(&ap_msg->list);
846 aq->pendingq_count--;
847 ap_msg->rc = -EAGAIN;
848 ap_msg->receive(aq, ap_msg, NULL);
849 }
850 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
851 list_del_init(&ap_msg->list);
852 aq->requestq_count--;
853 ap_msg->rc = -EAGAIN;
854 ap_msg->receive(aq, ap_msg, NULL);
855 }
David Brazdil0f672f62019-12-10 10:32:29 +0000856 aq->queue_count = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000857}
858
859void ap_flush_queue(struct ap_queue *aq)
860{
861 spin_lock_bh(&aq->lock);
862 __ap_flush_queue(aq);
863 spin_unlock_bh(&aq->lock);
864}
865EXPORT_SYMBOL(ap_flush_queue);
866
David Brazdil0f672f62019-12-10 10:32:29 +0000867void ap_queue_prepare_remove(struct ap_queue *aq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000868{
David Brazdil0f672f62019-12-10 10:32:29 +0000869 spin_lock_bh(&aq->lock);
870 /* flush queue */
871 __ap_flush_queue(aq);
Olivier Deprez157378f2022-04-04 15:47:50 +0200872 /* move queue device state to SHUTDOWN in progress */
873 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
David Brazdil0f672f62019-12-10 10:32:29 +0000874 spin_unlock_bh(&aq->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000875 del_timer_sync(&aq->timeout);
876}
David Brazdil0f672f62019-12-10 10:32:29 +0000877
878void ap_queue_remove(struct ap_queue *aq)
879{
880 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200881 * all messages have been flushed and the device state
882 * is SHUTDOWN. Now reset with zero which also clears
883 * the irq registration and move the device state
884 * to the initial value AP_DEV_STATE_UNINITIATED.
David Brazdil0f672f62019-12-10 10:32:29 +0000885 */
886 spin_lock_bh(&aq->lock);
887 ap_zapq(aq->qid);
Olivier Deprez157378f2022-04-04 15:47:50 +0200888 aq->dev_state = AP_DEV_STATE_UNINITIATED;
David Brazdil0f672f62019-12-10 10:32:29 +0000889 spin_unlock_bh(&aq->lock);
890}
891
Olivier Deprez0e641232021-09-23 10:07:05 +0200892void ap_queue_init_state(struct ap_queue *aq)
David Brazdil0f672f62019-12-10 10:32:29 +0000893{
894 spin_lock_bh(&aq->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200895 aq->dev_state = AP_DEV_STATE_OPERATING;
896 aq->sm_state = AP_SM_STATE_RESET_START;
897 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
David Brazdil0f672f62019-12-10 10:32:29 +0000898 spin_unlock_bh(&aq->lock);
899}
Olivier Deprez0e641232021-09-23 10:07:05 +0200900EXPORT_SYMBOL(ap_queue_init_state);