Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright IBM Corp. 2016 |
| 4 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 5 | * |
| 6 | * Adjunct processor bus, queue related code. |
| 7 | */ |
| 8 | |
| 9 | #define KMSG_COMPONENT "ap" |
| 10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 11 | |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <asm/facility.h> |
| 15 | |
| 16 | #include "ap_bus.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 17 | #include "ap_debug.h" |
| 18 | |
| 19 | static void __ap_flush_queue(struct ap_queue *aq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | |
| 21 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 22 | * ap_queue_enable_irq(): Enable interrupt support on this AP queue. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | * @qid: The AP queue number |
| 24 | * @ind: the notification indicator byte |
| 25 | * |
| 26 | * Enables interruption on AP queue via ap_aqic(). Based on the return |
| 27 | * value it waits a while and tests the AP queue if interrupts |
| 28 | * have been switched on using ap_test_queue(). |
| 29 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 30 | static int ap_queue_enable_irq(struct ap_queue *aq, void *ind) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | { |
| 32 | struct ap_queue_status status; |
| 33 | struct ap_qirq_ctrl qirqctrl = { 0 }; |
| 34 | |
| 35 | qirqctrl.ir = 1; |
| 36 | qirqctrl.isc = AP_ISC; |
| 37 | status = ap_aqic(aq->qid, qirqctrl, ind); |
| 38 | switch (status.response_code) { |
| 39 | case AP_RESPONSE_NORMAL: |
| 40 | case AP_RESPONSE_OTHERWISE_CHANGED: |
| 41 | return 0; |
| 42 | case AP_RESPONSE_Q_NOT_AVAIL: |
| 43 | case AP_RESPONSE_DECONFIGURED: |
| 44 | case AP_RESPONSE_CHECKSTOPPED: |
| 45 | case AP_RESPONSE_INVALID_ADDRESS: |
| 46 | pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n", |
| 47 | AP_QID_CARD(aq->qid), |
| 48 | AP_QID_QUEUE(aq->qid)); |
| 49 | return -EOPNOTSUPP; |
| 50 | case AP_RESPONSE_RESET_IN_PROGRESS: |
| 51 | case AP_RESPONSE_BUSY: |
| 52 | default: |
| 53 | return -EBUSY; |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | /** |
| 58 | * __ap_send(): Send message to adjunct processor queue. |
| 59 | * @qid: The AP queue number |
| 60 | * @psmid: The program supplied message identifier |
| 61 | * @msg: The message text |
| 62 | * @length: The message length |
| 63 | * @special: Special Bit |
| 64 | * |
| 65 | * Returns AP queue status structure. |
| 66 | * Condition code 1 on NQAP can't happen because the L bit is 1. |
| 67 | * Condition code 2 on NQAP also means the send is incomplete, |
| 68 | * because a segment boundary was reached. The NQAP is repeated. |
| 69 | */ |
| 70 | static inline struct ap_queue_status |
| 71 | __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 72 | int special) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 74 | if (special) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | qid |= 0x400000UL; |
| 76 | return ap_nqap(qid, psmid, msg, length); |
| 77 | } |
| 78 | |
| 79 | int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) |
| 80 | { |
| 81 | struct ap_queue_status status; |
| 82 | |
| 83 | status = __ap_send(qid, psmid, msg, length, 0); |
| 84 | switch (status.response_code) { |
| 85 | case AP_RESPONSE_NORMAL: |
| 86 | return 0; |
| 87 | case AP_RESPONSE_Q_FULL: |
| 88 | case AP_RESPONSE_RESET_IN_PROGRESS: |
| 89 | return -EBUSY; |
| 90 | case AP_RESPONSE_REQ_FAC_NOT_INST: |
| 91 | return -EINVAL; |
| 92 | default: /* Device is gone. */ |
| 93 | return -ENODEV; |
| 94 | } |
| 95 | } |
| 96 | EXPORT_SYMBOL(ap_send); |
| 97 | |
| 98 | int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) |
| 99 | { |
| 100 | struct ap_queue_status status; |
| 101 | |
| 102 | if (msg == NULL) |
| 103 | return -EINVAL; |
| 104 | status = ap_dqap(qid, psmid, msg, length); |
| 105 | switch (status.response_code) { |
| 106 | case AP_RESPONSE_NORMAL: |
| 107 | return 0; |
| 108 | case AP_RESPONSE_NO_PENDING_REPLY: |
| 109 | if (status.queue_empty) |
| 110 | return -ENOENT; |
| 111 | return -EBUSY; |
| 112 | case AP_RESPONSE_RESET_IN_PROGRESS: |
| 113 | return -EBUSY; |
| 114 | default: |
| 115 | return -ENODEV; |
| 116 | } |
| 117 | } |
| 118 | EXPORT_SYMBOL(ap_recv); |
| 119 | |
| 120 | /* State machine definitions and helpers */ |
| 121 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 122 | static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 124 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /** |
| 128 | * ap_sm_recv(): Receive pending reply messages from an AP queue but do |
| 129 | * not change the state of the device. |
| 130 | * @aq: pointer to the AP queue |
| 131 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 132 | * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | */ |
| 134 | static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) |
| 135 | { |
| 136 | struct ap_queue_status status; |
| 137 | struct ap_message *ap_msg; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 138 | bool found = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 139 | |
| 140 | status = ap_dqap(aq->qid, &aq->reply->psmid, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 141 | aq->reply->msg, aq->reply->len); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 142 | switch (status.response_code) { |
| 143 | case AP_RESPONSE_NORMAL: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 144 | aq->queue_count = max_t(int, 0, aq->queue_count - 1); |
| 145 | if (!status.queue_empty && !aq->queue_count) |
| 146 | aq->queue_count++; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | if (aq->queue_count > 0) |
| 148 | mod_timer(&aq->timeout, |
| 149 | jiffies + aq->request_timeout); |
| 150 | list_for_each_entry(ap_msg, &aq->pendingq, list) { |
| 151 | if (ap_msg->psmid != aq->reply->psmid) |
| 152 | continue; |
| 153 | list_del_init(&ap_msg->list); |
| 154 | aq->pendingq_count--; |
| 155 | ap_msg->receive(aq, ap_msg, aq->reply); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 156 | found = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 157 | break; |
| 158 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 159 | if (!found) { |
| 160 | AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n", |
| 161 | __func__, aq->reply->psmid, |
| 162 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 163 | } |
| 164 | fallthrough; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 165 | case AP_RESPONSE_NO_PENDING_REPLY: |
| 166 | if (!status.queue_empty || aq->queue_count <= 0) |
| 167 | break; |
| 168 | /* The card shouldn't forget requests but who knows. */ |
| 169 | aq->queue_count = 0; |
| 170 | list_splice_init(&aq->pendingq, &aq->requestq); |
| 171 | aq->requestq_count += aq->pendingq_count; |
| 172 | aq->pendingq_count = 0; |
| 173 | break; |
| 174 | default: |
| 175 | break; |
| 176 | } |
| 177 | return status; |
| 178 | } |
| 179 | |
| 180 | /** |
| 181 | * ap_sm_read(): Receive pending reply messages from an AP queue. |
| 182 | * @aq: pointer to the AP queue |
| 183 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 184 | * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 186 | static enum ap_sm_wait ap_sm_read(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 187 | { |
| 188 | struct ap_queue_status status; |
| 189 | |
| 190 | if (!aq->reply) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 191 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | status = ap_sm_recv(aq); |
| 193 | switch (status.response_code) { |
| 194 | case AP_RESPONSE_NORMAL: |
| 195 | if (aq->queue_count > 0) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 196 | aq->sm_state = AP_SM_STATE_WORKING; |
| 197 | return AP_SM_WAIT_AGAIN; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 199 | aq->sm_state = AP_SM_STATE_IDLE; |
| 200 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | case AP_RESPONSE_NO_PENDING_REPLY: |
| 202 | if (aq->queue_count > 0) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 203 | return aq->interrupt ? |
| 204 | AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT; |
| 205 | aq->sm_state = AP_SM_STATE_IDLE; |
| 206 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 207 | default: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 208 | aq->dev_state = AP_DEV_STATE_ERROR; |
| 209 | aq->last_err_rc = status.response_code; |
| 210 | AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", |
| 211 | __func__, status.response_code, |
| 212 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 213 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | } |
| 215 | } |
| 216 | |
| 217 | /** |
| 218 | * ap_sm_write(): Send messages from the request queue to an AP queue. |
| 219 | * @aq: pointer to the AP queue |
| 220 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 221 | * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 222 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 223 | static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 224 | { |
| 225 | struct ap_queue_status status; |
| 226 | struct ap_message *ap_msg; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 227 | ap_qid_t qid = aq->qid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | |
| 229 | if (aq->requestq_count <= 0) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 230 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | /* Start the next request on the queue. */ |
| 232 | ap_msg = list_entry(aq->requestq.next, struct ap_message, list); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 233 | #ifdef CONFIG_ZCRYPT_DEBUG |
| 234 | if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) { |
| 235 | AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n", |
| 236 | __func__, ap_msg->fi.cmd); |
| 237 | qid = 0xFF00; |
| 238 | } |
| 239 | #endif |
| 240 | status = __ap_send(qid, ap_msg->psmid, |
| 241 | ap_msg->msg, ap_msg->len, |
| 242 | ap_msg->flags & AP_MSG_FLAG_SPECIAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 243 | switch (status.response_code) { |
| 244 | case AP_RESPONSE_NORMAL: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 245 | aq->queue_count = max_t(int, 1, aq->queue_count + 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 246 | if (aq->queue_count == 1) |
| 247 | mod_timer(&aq->timeout, jiffies + aq->request_timeout); |
| 248 | list_move_tail(&ap_msg->list, &aq->pendingq); |
| 249 | aq->requestq_count--; |
| 250 | aq->pendingq_count++; |
| 251 | if (aq->queue_count < aq->card->queue_depth) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 252 | aq->sm_state = AP_SM_STATE_WORKING; |
| 253 | return AP_SM_WAIT_AGAIN; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 254 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 255 | fallthrough; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | case AP_RESPONSE_Q_FULL: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 257 | aq->sm_state = AP_SM_STATE_QUEUE_FULL; |
| 258 | return aq->interrupt ? |
| 259 | AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 260 | case AP_RESPONSE_RESET_IN_PROGRESS: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 261 | aq->sm_state = AP_SM_STATE_RESET_WAIT; |
| 262 | return AP_SM_WAIT_TIMEOUT; |
| 263 | case AP_RESPONSE_INVALID_DOMAIN: |
| 264 | AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n"); |
| 265 | fallthrough; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 266 | case AP_RESPONSE_MESSAGE_TOO_BIG: |
| 267 | case AP_RESPONSE_REQ_FAC_NOT_INST: |
| 268 | list_del_init(&ap_msg->list); |
| 269 | aq->requestq_count--; |
| 270 | ap_msg->rc = -EINVAL; |
| 271 | ap_msg->receive(aq, ap_msg, NULL); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 272 | return AP_SM_WAIT_AGAIN; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 273 | default: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 274 | aq->dev_state = AP_DEV_STATE_ERROR; |
| 275 | aq->last_err_rc = status.response_code; |
| 276 | AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", |
| 277 | __func__, status.response_code, |
| 278 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 279 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | } |
| 281 | } |
| 282 | |
| 283 | /** |
| 284 | * ap_sm_read_write(): Send and receive messages to/from an AP queue. |
| 285 | * @aq: pointer to the AP queue |
| 286 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 287 | * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 288 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 289 | static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 290 | { |
| 291 | return min(ap_sm_read(aq), ap_sm_write(aq)); |
| 292 | } |
| 293 | |
| 294 | /** |
| 295 | * ap_sm_reset(): Reset an AP queue. |
| 296 | * @qid: The AP queue number |
| 297 | * |
| 298 | * Submit the Reset command to an AP queue. |
| 299 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 300 | static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 301 | { |
| 302 | struct ap_queue_status status; |
| 303 | |
| 304 | status = ap_rapq(aq->qid); |
| 305 | switch (status.response_code) { |
| 306 | case AP_RESPONSE_NORMAL: |
| 307 | case AP_RESPONSE_RESET_IN_PROGRESS: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 308 | aq->sm_state = AP_SM_STATE_RESET_WAIT; |
| 309 | aq->interrupt = false; |
| 310 | return AP_SM_WAIT_TIMEOUT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 311 | default: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 312 | aq->dev_state = AP_DEV_STATE_ERROR; |
| 313 | aq->last_err_rc = status.response_code; |
| 314 | AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", |
| 315 | __func__, status.response_code, |
| 316 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 317 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 318 | } |
| 319 | } |
| 320 | |
| 321 | /** |
| 322 | * ap_sm_reset_wait(): Test queue for completion of the reset operation |
| 323 | * @aq: pointer to the AP queue |
| 324 | * |
| 325 | * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. |
| 326 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 327 | static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 328 | { |
| 329 | struct ap_queue_status status; |
| 330 | void *lsi_ptr; |
| 331 | |
| 332 | if (aq->queue_count > 0 && aq->reply) |
| 333 | /* Try to read a completed message and get the status */ |
| 334 | status = ap_sm_recv(aq); |
| 335 | else |
| 336 | /* Get the status with TAPQ */ |
| 337 | status = ap_tapq(aq->qid, NULL); |
| 338 | |
| 339 | switch (status.response_code) { |
| 340 | case AP_RESPONSE_NORMAL: |
| 341 | lsi_ptr = ap_airq_ptr(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 342 | if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) |
| 343 | aq->sm_state = AP_SM_STATE_SETIRQ_WAIT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 344 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 345 | aq->sm_state = (aq->queue_count > 0) ? |
| 346 | AP_SM_STATE_WORKING : AP_SM_STATE_IDLE; |
| 347 | return AP_SM_WAIT_AGAIN; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 348 | case AP_RESPONSE_BUSY: |
| 349 | case AP_RESPONSE_RESET_IN_PROGRESS: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 350 | return AP_SM_WAIT_TIMEOUT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 351 | case AP_RESPONSE_Q_NOT_AVAIL: |
| 352 | case AP_RESPONSE_DECONFIGURED: |
| 353 | case AP_RESPONSE_CHECKSTOPPED: |
| 354 | default: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 355 | aq->dev_state = AP_DEV_STATE_ERROR; |
| 356 | aq->last_err_rc = status.response_code; |
| 357 | AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", |
| 358 | __func__, status.response_code, |
| 359 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 360 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | } |
| 362 | } |
| 363 | |
| 364 | /** |
| 365 | * ap_sm_setirq_wait(): Test queue for completion of the irq enablement |
| 366 | * @aq: pointer to the AP queue |
| 367 | * |
| 368 | * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. |
| 369 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 370 | static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 371 | { |
| 372 | struct ap_queue_status status; |
| 373 | |
| 374 | if (aq->queue_count > 0 && aq->reply) |
| 375 | /* Try to read a completed message and get the status */ |
| 376 | status = ap_sm_recv(aq); |
| 377 | else |
| 378 | /* Get the status with TAPQ */ |
| 379 | status = ap_tapq(aq->qid, NULL); |
| 380 | |
| 381 | if (status.irq_enabled == 1) { |
| 382 | /* Irqs are now enabled */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 383 | aq->interrupt = true; |
| 384 | aq->sm_state = (aq->queue_count > 0) ? |
| 385 | AP_SM_STATE_WORKING : AP_SM_STATE_IDLE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 386 | } |
| 387 | |
| 388 | switch (status.response_code) { |
| 389 | case AP_RESPONSE_NORMAL: |
| 390 | if (aq->queue_count > 0) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 391 | return AP_SM_WAIT_AGAIN; |
| 392 | fallthrough; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 393 | case AP_RESPONSE_NO_PENDING_REPLY: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 394 | return AP_SM_WAIT_TIMEOUT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 395 | default: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 396 | aq->dev_state = AP_DEV_STATE_ERROR; |
| 397 | aq->last_err_rc = status.response_code; |
| 398 | AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", |
| 399 | __func__, status.response_code, |
| 400 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 401 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 402 | } |
| 403 | } |
| 404 | |
| 405 | /* |
| 406 | * AP state machine jump table |
| 407 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 408 | static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = { |
| 409 | [AP_SM_STATE_RESET_START] = { |
| 410 | [AP_SM_EVENT_POLL] = ap_sm_reset, |
| 411 | [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 412 | }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 413 | [AP_SM_STATE_RESET_WAIT] = { |
| 414 | [AP_SM_EVENT_POLL] = ap_sm_reset_wait, |
| 415 | [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 416 | }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 417 | [AP_SM_STATE_SETIRQ_WAIT] = { |
| 418 | [AP_SM_EVENT_POLL] = ap_sm_setirq_wait, |
| 419 | [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 420 | }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 421 | [AP_SM_STATE_IDLE] = { |
| 422 | [AP_SM_EVENT_POLL] = ap_sm_write, |
| 423 | [AP_SM_EVENT_TIMEOUT] = ap_sm_nop, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 424 | }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 425 | [AP_SM_STATE_WORKING] = { |
| 426 | [AP_SM_EVENT_POLL] = ap_sm_read_write, |
| 427 | [AP_SM_EVENT_TIMEOUT] = ap_sm_reset, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 428 | }, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 429 | [AP_SM_STATE_QUEUE_FULL] = { |
| 430 | [AP_SM_EVENT_POLL] = ap_sm_read, |
| 431 | [AP_SM_EVENT_TIMEOUT] = ap_sm_reset, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 432 | }, |
| 433 | }; |
| 434 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 435 | enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 436 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 437 | if (aq->dev_state > AP_DEV_STATE_UNINITIATED) |
| 438 | return ap_jumptable[aq->sm_state][event](aq); |
| 439 | else |
| 440 | return AP_SM_WAIT_NONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 441 | } |
| 442 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 443 | enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 444 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 445 | enum ap_sm_wait wait; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 446 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 447 | while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 448 | ; |
| 449 | return wait; |
| 450 | } |
| 451 | |
| 452 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 453 | * AP queue related attributes. |
| 454 | */ |
| 455 | static ssize_t request_count_show(struct device *dev, |
| 456 | struct device_attribute *attr, |
| 457 | char *buf) |
| 458 | { |
| 459 | struct ap_queue *aq = to_ap_queue(dev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 460 | bool valid = false; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 461 | u64 req_cnt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 462 | |
| 463 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 464 | if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { |
| 465 | req_cnt = aq->total_request_count; |
| 466 | valid = true; |
| 467 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 468 | spin_unlock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 469 | |
| 470 | if (valid) |
| 471 | return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); |
| 472 | else |
| 473 | return scnprintf(buf, PAGE_SIZE, "-\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | static ssize_t request_count_store(struct device *dev, |
| 477 | struct device_attribute *attr, |
| 478 | const char *buf, size_t count) |
| 479 | { |
| 480 | struct ap_queue *aq = to_ap_queue(dev); |
| 481 | |
| 482 | spin_lock_bh(&aq->lock); |
| 483 | aq->total_request_count = 0; |
| 484 | spin_unlock_bh(&aq->lock); |
| 485 | |
| 486 | return count; |
| 487 | } |
| 488 | |
| 489 | static DEVICE_ATTR_RW(request_count); |
| 490 | |
| 491 | static ssize_t requestq_count_show(struct device *dev, |
| 492 | struct device_attribute *attr, char *buf) |
| 493 | { |
| 494 | struct ap_queue *aq = to_ap_queue(dev); |
| 495 | unsigned int reqq_cnt = 0; |
| 496 | |
| 497 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 498 | if (aq->dev_state > AP_DEV_STATE_UNINITIATED) |
| 499 | reqq_cnt = aq->requestq_count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 500 | spin_unlock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 501 | return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | static DEVICE_ATTR_RO(requestq_count); |
| 505 | |
| 506 | static ssize_t pendingq_count_show(struct device *dev, |
| 507 | struct device_attribute *attr, char *buf) |
| 508 | { |
| 509 | struct ap_queue *aq = to_ap_queue(dev); |
| 510 | unsigned int penq_cnt = 0; |
| 511 | |
| 512 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 513 | if (aq->dev_state > AP_DEV_STATE_UNINITIATED) |
| 514 | penq_cnt = aq->pendingq_count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 515 | spin_unlock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 516 | return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | static DEVICE_ATTR_RO(pendingq_count); |
| 520 | |
| 521 | static ssize_t reset_show(struct device *dev, |
| 522 | struct device_attribute *attr, char *buf) |
| 523 | { |
| 524 | struct ap_queue *aq = to_ap_queue(dev); |
| 525 | int rc = 0; |
| 526 | |
| 527 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 528 | switch (aq->sm_state) { |
| 529 | case AP_SM_STATE_RESET_START: |
| 530 | case AP_SM_STATE_RESET_WAIT: |
| 531 | rc = scnprintf(buf, PAGE_SIZE, "Reset in progress.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 532 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 533 | case AP_SM_STATE_WORKING: |
| 534 | case AP_SM_STATE_QUEUE_FULL: |
| 535 | rc = scnprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 536 | break; |
| 537 | default: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 538 | rc = scnprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 539 | } |
| 540 | spin_unlock_bh(&aq->lock); |
| 541 | return rc; |
| 542 | } |
| 543 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 544 | static ssize_t reset_store(struct device *dev, |
| 545 | struct device_attribute *attr, |
| 546 | const char *buf, size_t count) |
| 547 | { |
| 548 | struct ap_queue *aq = to_ap_queue(dev); |
| 549 | |
| 550 | spin_lock_bh(&aq->lock); |
| 551 | __ap_flush_queue(aq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 552 | aq->sm_state = AP_SM_STATE_RESET_START; |
| 553 | ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 554 | spin_unlock_bh(&aq->lock); |
| 555 | |
| 556 | AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n", |
| 557 | AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); |
| 558 | |
| 559 | return count; |
| 560 | } |
| 561 | |
| 562 | static DEVICE_ATTR_RW(reset); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 563 | |
| 564 | static ssize_t interrupt_show(struct device *dev, |
| 565 | struct device_attribute *attr, char *buf) |
| 566 | { |
| 567 | struct ap_queue *aq = to_ap_queue(dev); |
| 568 | int rc = 0; |
| 569 | |
| 570 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 571 | if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) |
| 572 | rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); |
| 573 | else if (aq->interrupt) |
| 574 | rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 575 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 576 | rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 577 | spin_unlock_bh(&aq->lock); |
| 578 | return rc; |
| 579 | } |
| 580 | |
| 581 | static DEVICE_ATTR_RO(interrupt); |
| 582 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 583 | static ssize_t config_show(struct device *dev, |
| 584 | struct device_attribute *attr, char *buf) |
| 585 | { |
| 586 | struct ap_queue *aq = to_ap_queue(dev); |
| 587 | int rc; |
| 588 | |
| 589 | spin_lock_bh(&aq->lock); |
| 590 | rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->config ? 1 : 0); |
| 591 | spin_unlock_bh(&aq->lock); |
| 592 | return rc; |
| 593 | } |
| 594 | |
| 595 | static DEVICE_ATTR_RO(config); |
| 596 | |
| 597 | #ifdef CONFIG_ZCRYPT_DEBUG |
| 598 | static ssize_t states_show(struct device *dev, |
| 599 | struct device_attribute *attr, char *buf) |
| 600 | { |
| 601 | struct ap_queue *aq = to_ap_queue(dev); |
| 602 | int rc = 0; |
| 603 | |
| 604 | spin_lock_bh(&aq->lock); |
| 605 | /* queue device state */ |
| 606 | switch (aq->dev_state) { |
| 607 | case AP_DEV_STATE_UNINITIATED: |
| 608 | rc = scnprintf(buf, PAGE_SIZE, "UNINITIATED\n"); |
| 609 | break; |
| 610 | case AP_DEV_STATE_OPERATING: |
| 611 | rc = scnprintf(buf, PAGE_SIZE, "OPERATING"); |
| 612 | break; |
| 613 | case AP_DEV_STATE_SHUTDOWN: |
| 614 | rc = scnprintf(buf, PAGE_SIZE, "SHUTDOWN"); |
| 615 | break; |
| 616 | case AP_DEV_STATE_ERROR: |
| 617 | rc = scnprintf(buf, PAGE_SIZE, "ERROR"); |
| 618 | break; |
| 619 | default: |
| 620 | rc = scnprintf(buf, PAGE_SIZE, "UNKNOWN"); |
| 621 | } |
| 622 | /* state machine state */ |
| 623 | if (aq->dev_state) { |
| 624 | switch (aq->sm_state) { |
| 625 | case AP_SM_STATE_RESET_START: |
| 626 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 627 | " [RESET_START]\n"); |
| 628 | break; |
| 629 | case AP_SM_STATE_RESET_WAIT: |
| 630 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 631 | " [RESET_WAIT]\n"); |
| 632 | break; |
| 633 | case AP_SM_STATE_SETIRQ_WAIT: |
| 634 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 635 | " [SETIRQ_WAIT]\n"); |
| 636 | break; |
| 637 | case AP_SM_STATE_IDLE: |
| 638 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 639 | " [IDLE]\n"); |
| 640 | break; |
| 641 | case AP_SM_STATE_WORKING: |
| 642 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 643 | " [WORKING]\n"); |
| 644 | break; |
| 645 | case AP_SM_STATE_QUEUE_FULL: |
| 646 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 647 | " [FULL]\n"); |
| 648 | break; |
| 649 | default: |
| 650 | rc += scnprintf(buf + rc, PAGE_SIZE - rc, |
| 651 | " [UNKNOWN]\n"); |
| 652 | } |
| 653 | } |
| 654 | spin_unlock_bh(&aq->lock); |
| 655 | |
| 656 | return rc; |
| 657 | } |
| 658 | static DEVICE_ATTR_RO(states); |
| 659 | |
| 660 | static ssize_t last_err_rc_show(struct device *dev, |
| 661 | struct device_attribute *attr, char *buf) |
| 662 | { |
| 663 | struct ap_queue *aq = to_ap_queue(dev); |
| 664 | int rc; |
| 665 | |
| 666 | spin_lock_bh(&aq->lock); |
| 667 | rc = aq->last_err_rc; |
| 668 | spin_unlock_bh(&aq->lock); |
| 669 | |
| 670 | switch (rc) { |
| 671 | case AP_RESPONSE_NORMAL: |
| 672 | return scnprintf(buf, PAGE_SIZE, "NORMAL\n"); |
| 673 | case AP_RESPONSE_Q_NOT_AVAIL: |
| 674 | return scnprintf(buf, PAGE_SIZE, "Q_NOT_AVAIL\n"); |
| 675 | case AP_RESPONSE_RESET_IN_PROGRESS: |
| 676 | return scnprintf(buf, PAGE_SIZE, "RESET_IN_PROGRESS\n"); |
| 677 | case AP_RESPONSE_DECONFIGURED: |
| 678 | return scnprintf(buf, PAGE_SIZE, "DECONFIGURED\n"); |
| 679 | case AP_RESPONSE_CHECKSTOPPED: |
| 680 | return scnprintf(buf, PAGE_SIZE, "CHECKSTOPPED\n"); |
| 681 | case AP_RESPONSE_BUSY: |
| 682 | return scnprintf(buf, PAGE_SIZE, "BUSY\n"); |
| 683 | case AP_RESPONSE_INVALID_ADDRESS: |
| 684 | return scnprintf(buf, PAGE_SIZE, "INVALID_ADDRESS\n"); |
| 685 | case AP_RESPONSE_OTHERWISE_CHANGED: |
| 686 | return scnprintf(buf, PAGE_SIZE, "OTHERWISE_CHANGED\n"); |
| 687 | case AP_RESPONSE_Q_FULL: |
| 688 | return scnprintf(buf, PAGE_SIZE, "Q_FULL/NO_PENDING_REPLY\n"); |
| 689 | case AP_RESPONSE_INDEX_TOO_BIG: |
| 690 | return scnprintf(buf, PAGE_SIZE, "INDEX_TOO_BIG\n"); |
| 691 | case AP_RESPONSE_NO_FIRST_PART: |
| 692 | return scnprintf(buf, PAGE_SIZE, "NO_FIRST_PART\n"); |
| 693 | case AP_RESPONSE_MESSAGE_TOO_BIG: |
| 694 | return scnprintf(buf, PAGE_SIZE, "MESSAGE_TOO_BIG\n"); |
| 695 | case AP_RESPONSE_REQ_FAC_NOT_INST: |
| 696 | return scnprintf(buf, PAGE_SIZE, "REQ_FAC_NOT_INST\n"); |
| 697 | default: |
| 698 | return scnprintf(buf, PAGE_SIZE, "response code %d\n", rc); |
| 699 | } |
| 700 | } |
| 701 | static DEVICE_ATTR_RO(last_err_rc); |
| 702 | #endif |
| 703 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 704 | static struct attribute *ap_queue_dev_attrs[] = { |
| 705 | &dev_attr_request_count.attr, |
| 706 | &dev_attr_requestq_count.attr, |
| 707 | &dev_attr_pendingq_count.attr, |
| 708 | &dev_attr_reset.attr, |
| 709 | &dev_attr_interrupt.attr, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 710 | &dev_attr_config.attr, |
| 711 | #ifdef CONFIG_ZCRYPT_DEBUG |
| 712 | &dev_attr_states.attr, |
| 713 | &dev_attr_last_err_rc.attr, |
| 714 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 715 | NULL |
| 716 | }; |
| 717 | |
| 718 | static struct attribute_group ap_queue_dev_attr_group = { |
| 719 | .attrs = ap_queue_dev_attrs |
| 720 | }; |
| 721 | |
| 722 | static const struct attribute_group *ap_queue_dev_attr_groups[] = { |
| 723 | &ap_queue_dev_attr_group, |
| 724 | NULL |
| 725 | }; |
| 726 | |
| 727 | static struct device_type ap_queue_type = { |
| 728 | .name = "ap_queue", |
| 729 | .groups = ap_queue_dev_attr_groups, |
| 730 | }; |
| 731 | |
| 732 | static void ap_queue_device_release(struct device *dev) |
| 733 | { |
| 734 | struct ap_queue *aq = to_ap_queue(dev); |
| 735 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 736 | spin_lock_bh(&ap_queues_lock); |
| 737 | hash_del(&aq->hnode); |
| 738 | spin_unlock_bh(&ap_queues_lock); |
| 739 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 740 | kfree(aq); |
| 741 | } |
| 742 | |
| 743 | struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) |
| 744 | { |
| 745 | struct ap_queue *aq; |
| 746 | |
| 747 | aq = kzalloc(sizeof(*aq), GFP_KERNEL); |
| 748 | if (!aq) |
| 749 | return NULL; |
| 750 | aq->ap_dev.device.release = ap_queue_device_release; |
| 751 | aq->ap_dev.device.type = &ap_queue_type; |
| 752 | aq->ap_dev.device_type = device_type; |
| 753 | aq->qid = qid; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 754 | aq->interrupt = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 755 | spin_lock_init(&aq->lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 756 | INIT_LIST_HEAD(&aq->pendingq); |
| 757 | INIT_LIST_HEAD(&aq->requestq); |
| 758 | timer_setup(&aq->timeout, ap_request_timeout, 0); |
| 759 | |
| 760 | return aq; |
| 761 | } |
| 762 | |
| 763 | void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply) |
| 764 | { |
| 765 | aq->reply = reply; |
| 766 | |
| 767 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 768 | ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 769 | spin_unlock_bh(&aq->lock); |
| 770 | } |
| 771 | EXPORT_SYMBOL(ap_queue_init_reply); |
| 772 | |
| 773 | /** |
| 774 | * ap_queue_message(): Queue a request to an AP device. |
| 775 | * @aq: The AP device to queue the message to |
| 776 | * @ap_msg: The message that is to be added |
| 777 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 778 | int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 779 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 780 | int rc = 0; |
| 781 | |
| 782 | /* msg needs to have a valid receive-callback */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 783 | BUG_ON(!ap_msg->receive); |
| 784 | |
| 785 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 786 | |
| 787 | /* only allow to queue new messages if device state is ok */ |
| 788 | if (aq->dev_state == AP_DEV_STATE_OPERATING) { |
| 789 | list_add_tail(&ap_msg->list, &aq->requestq); |
| 790 | aq->requestq_count++; |
| 791 | aq->total_request_count++; |
| 792 | atomic64_inc(&aq->card->total_request_count); |
| 793 | } else |
| 794 | rc = -ENODEV; |
| 795 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 796 | /* Send/receive as many request from the queue as possible. */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 797 | ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL)); |
| 798 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 799 | spin_unlock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 800 | |
| 801 | return rc; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 802 | } |
| 803 | EXPORT_SYMBOL(ap_queue_message); |
| 804 | |
| 805 | /** |
| 806 | * ap_cancel_message(): Cancel a crypto request. |
| 807 | * @aq: The AP device that has the message queued |
| 808 | * @ap_msg: The message that is to be removed |
| 809 | * |
| 810 | * Cancel a crypto request. This is done by removing the request |
| 811 | * from the device pending or request queue. Note that the |
| 812 | * request stays on the AP queue. When it finishes the message |
| 813 | * reply will be discarded because the psmid can't be found. |
| 814 | */ |
| 815 | void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg) |
| 816 | { |
| 817 | struct ap_message *tmp; |
| 818 | |
| 819 | spin_lock_bh(&aq->lock); |
| 820 | if (!list_empty(&ap_msg->list)) { |
| 821 | list_for_each_entry(tmp, &aq->pendingq, list) |
| 822 | if (tmp->psmid == ap_msg->psmid) { |
| 823 | aq->pendingq_count--; |
| 824 | goto found; |
| 825 | } |
| 826 | aq->requestq_count--; |
| 827 | found: |
| 828 | list_del_init(&ap_msg->list); |
| 829 | } |
| 830 | spin_unlock_bh(&aq->lock); |
| 831 | } |
| 832 | EXPORT_SYMBOL(ap_cancel_message); |
| 833 | |
| 834 | /** |
| 835 | * __ap_flush_queue(): Flush requests. |
| 836 | * @aq: Pointer to the AP queue |
| 837 | * |
| 838 | * Flush all requests from the request/pending queue of an AP device. |
| 839 | */ |
| 840 | static void __ap_flush_queue(struct ap_queue *aq) |
| 841 | { |
| 842 | struct ap_message *ap_msg, *next; |
| 843 | |
| 844 | list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) { |
| 845 | list_del_init(&ap_msg->list); |
| 846 | aq->pendingq_count--; |
| 847 | ap_msg->rc = -EAGAIN; |
| 848 | ap_msg->receive(aq, ap_msg, NULL); |
| 849 | } |
| 850 | list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) { |
| 851 | list_del_init(&ap_msg->list); |
| 852 | aq->requestq_count--; |
| 853 | ap_msg->rc = -EAGAIN; |
| 854 | ap_msg->receive(aq, ap_msg, NULL); |
| 855 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 856 | aq->queue_count = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 857 | } |
| 858 | |
| 859 | void ap_flush_queue(struct ap_queue *aq) |
| 860 | { |
| 861 | spin_lock_bh(&aq->lock); |
| 862 | __ap_flush_queue(aq); |
| 863 | spin_unlock_bh(&aq->lock); |
| 864 | } |
| 865 | EXPORT_SYMBOL(ap_flush_queue); |
| 866 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 867 | void ap_queue_prepare_remove(struct ap_queue *aq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 868 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 869 | spin_lock_bh(&aq->lock); |
| 870 | /* flush queue */ |
| 871 | __ap_flush_queue(aq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 872 | /* move queue device state to SHUTDOWN in progress */ |
| 873 | aq->dev_state = AP_DEV_STATE_SHUTDOWN; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 874 | spin_unlock_bh(&aq->lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 875 | del_timer_sync(&aq->timeout); |
| 876 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 877 | |
| 878 | void ap_queue_remove(struct ap_queue *aq) |
| 879 | { |
| 880 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 881 | * all messages have been flushed and the device state |
| 882 | * is SHUTDOWN. Now reset with zero which also clears |
| 883 | * the irq registration and move the device state |
| 884 | * to the initial value AP_DEV_STATE_UNINITIATED. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 885 | */ |
| 886 | spin_lock_bh(&aq->lock); |
| 887 | ap_zapq(aq->qid); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 888 | aq->dev_state = AP_DEV_STATE_UNINITIATED; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 889 | spin_unlock_bh(&aq->lock); |
| 890 | } |
| 891 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 892 | void ap_queue_init_state(struct ap_queue *aq) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 893 | { |
| 894 | spin_lock_bh(&aq->lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 895 | aq->dev_state = AP_DEV_STATE_OPERATING; |
| 896 | aq->sm_state = AP_SM_STATE_RESET_START; |
| 897 | ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 898 | spin_unlock_bh(&aq->lock); |
| 899 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 900 | EXPORT_SYMBOL(ap_queue_init_state); |