Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (C) 2003-2008 Takahiro Hirofuchi |
| 4 | */ |
| 5 | |
| 6 | #include <asm/byteorder.h> |
| 7 | #include <linux/kthread.h> |
| 8 | #include <linux/usb.h> |
| 9 | #include <linux/usb/hcd.h> |
| 10 | |
| 11 | #include "usbip_common.h" |
| 12 | #include "stub.h" |
| 13 | |
| 14 | static int is_clear_halt_cmd(struct urb *urb) |
| 15 | { |
| 16 | struct usb_ctrlrequest *req; |
| 17 | |
| 18 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 19 | |
| 20 | return (req->bRequest == USB_REQ_CLEAR_FEATURE) && |
| 21 | (req->bRequestType == USB_RECIP_ENDPOINT) && |
| 22 | (req->wValue == USB_ENDPOINT_HALT); |
| 23 | } |
| 24 | |
| 25 | static int is_set_interface_cmd(struct urb *urb) |
| 26 | { |
| 27 | struct usb_ctrlrequest *req; |
| 28 | |
| 29 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 30 | |
| 31 | return (req->bRequest == USB_REQ_SET_INTERFACE) && |
| 32 | (req->bRequestType == USB_RECIP_INTERFACE); |
| 33 | } |
| 34 | |
| 35 | static int is_set_configuration_cmd(struct urb *urb) |
| 36 | { |
| 37 | struct usb_ctrlrequest *req; |
| 38 | |
| 39 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 40 | |
| 41 | return (req->bRequest == USB_REQ_SET_CONFIGURATION) && |
| 42 | (req->bRequestType == USB_RECIP_DEVICE); |
| 43 | } |
| 44 | |
| 45 | static int is_reset_device_cmd(struct urb *urb) |
| 46 | { |
| 47 | struct usb_ctrlrequest *req; |
| 48 | __u16 value; |
| 49 | __u16 index; |
| 50 | |
| 51 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 52 | value = le16_to_cpu(req->wValue); |
| 53 | index = le16_to_cpu(req->wIndex); |
| 54 | |
| 55 | if ((req->bRequest == USB_REQ_SET_FEATURE) && |
| 56 | (req->bRequestType == USB_RT_PORT) && |
| 57 | (value == USB_PORT_FEAT_RESET)) { |
| 58 | usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index); |
| 59 | return 1; |
| 60 | } else |
| 61 | return 0; |
| 62 | } |
| 63 | |
| 64 | static int tweak_clear_halt_cmd(struct urb *urb) |
| 65 | { |
| 66 | struct usb_ctrlrequest *req; |
| 67 | int target_endp; |
| 68 | int target_dir; |
| 69 | int target_pipe; |
| 70 | int ret; |
| 71 | |
| 72 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 73 | |
| 74 | /* |
| 75 | * The stalled endpoint is specified in the wIndex value. The endpoint |
| 76 | * of the urb is the target of this clear_halt request (i.e., control |
| 77 | * endpoint). |
| 78 | */ |
| 79 | target_endp = le16_to_cpu(req->wIndex) & 0x000f; |
| 80 | |
| 81 | /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */ |
| 82 | target_dir = le16_to_cpu(req->wIndex) & 0x0080; |
| 83 | |
| 84 | if (target_dir) |
| 85 | target_pipe = usb_rcvctrlpipe(urb->dev, target_endp); |
| 86 | else |
| 87 | target_pipe = usb_sndctrlpipe(urb->dev, target_endp); |
| 88 | |
| 89 | ret = usb_clear_halt(urb->dev, target_pipe); |
| 90 | if (ret < 0) |
| 91 | dev_err(&urb->dev->dev, |
| 92 | "usb_clear_halt error: devnum %d endp %d ret %d\n", |
| 93 | urb->dev->devnum, target_endp, ret); |
| 94 | else |
| 95 | dev_info(&urb->dev->dev, |
| 96 | "usb_clear_halt done: devnum %d endp %d\n", |
| 97 | urb->dev->devnum, target_endp); |
| 98 | |
| 99 | return ret; |
| 100 | } |
| 101 | |
| 102 | static int tweak_set_interface_cmd(struct urb *urb) |
| 103 | { |
| 104 | struct usb_ctrlrequest *req; |
| 105 | __u16 alternate; |
| 106 | __u16 interface; |
| 107 | int ret; |
| 108 | |
| 109 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 110 | alternate = le16_to_cpu(req->wValue); |
| 111 | interface = le16_to_cpu(req->wIndex); |
| 112 | |
| 113 | usbip_dbg_stub_rx("set_interface: inf %u alt %u\n", |
| 114 | interface, alternate); |
| 115 | |
| 116 | ret = usb_set_interface(urb->dev, interface, alternate); |
| 117 | if (ret < 0) |
| 118 | dev_err(&urb->dev->dev, |
| 119 | "usb_set_interface error: inf %u alt %u ret %d\n", |
| 120 | interface, alternate, ret); |
| 121 | else |
| 122 | dev_info(&urb->dev->dev, |
| 123 | "usb_set_interface done: inf %u alt %u\n", |
| 124 | interface, alternate); |
| 125 | |
| 126 | return ret; |
| 127 | } |
| 128 | |
| 129 | static int tweak_set_configuration_cmd(struct urb *urb) |
| 130 | { |
| 131 | struct stub_priv *priv = (struct stub_priv *) urb->context; |
| 132 | struct stub_device *sdev = priv->sdev; |
| 133 | struct usb_ctrlrequest *req; |
| 134 | __u16 config; |
| 135 | int err; |
| 136 | |
| 137 | req = (struct usb_ctrlrequest *) urb->setup_packet; |
| 138 | config = le16_to_cpu(req->wValue); |
| 139 | |
| 140 | err = usb_set_configuration(sdev->udev, config); |
| 141 | if (err && err != -ENODEV) |
| 142 | dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n", |
| 143 | config, err); |
| 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | static int tweak_reset_device_cmd(struct urb *urb) |
| 148 | { |
| 149 | struct stub_priv *priv = (struct stub_priv *) urb->context; |
| 150 | struct stub_device *sdev = priv->sdev; |
| 151 | |
| 152 | dev_info(&urb->dev->dev, "usb_queue_reset_device\n"); |
| 153 | |
| 154 | if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) { |
| 155 | dev_err(&urb->dev->dev, "could not obtain lock to reset device\n"); |
| 156 | return 0; |
| 157 | } |
| 158 | usb_reset_device(sdev->udev); |
| 159 | usb_unlock_device(sdev->udev); |
| 160 | |
| 161 | return 0; |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * clear_halt, set_interface, and set_configuration require special tricks. |
| 166 | */ |
| 167 | static void tweak_special_requests(struct urb *urb) |
| 168 | { |
| 169 | if (!urb || !urb->setup_packet) |
| 170 | return; |
| 171 | |
| 172 | if (usb_pipetype(urb->pipe) != PIPE_CONTROL) |
| 173 | return; |
| 174 | |
| 175 | if (is_clear_halt_cmd(urb)) |
| 176 | /* tweak clear_halt */ |
| 177 | tweak_clear_halt_cmd(urb); |
| 178 | |
| 179 | else if (is_set_interface_cmd(urb)) |
| 180 | /* tweak set_interface */ |
| 181 | tweak_set_interface_cmd(urb); |
| 182 | |
| 183 | else if (is_set_configuration_cmd(urb)) |
| 184 | /* tweak set_configuration */ |
| 185 | tweak_set_configuration_cmd(urb); |
| 186 | |
| 187 | else if (is_reset_device_cmd(urb)) |
| 188 | tweak_reset_device_cmd(urb); |
| 189 | else |
| 190 | usbip_dbg_stub_rx("no need to tweak\n"); |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb(). |
| 195 | * By unlinking the urb asynchronously, stub_rx can continuously |
| 196 | * process coming urbs. Even if the urb is unlinked, its completion |
| 197 | * handler will be called and stub_tx will send a return pdu. |
| 198 | * |
| 199 | * See also comments about unlinking strategy in vhci_hcd.c. |
| 200 | */ |
| 201 | static int stub_recv_cmd_unlink(struct stub_device *sdev, |
| 202 | struct usbip_header *pdu) |
| 203 | { |
| 204 | int ret; |
| 205 | unsigned long flags; |
| 206 | struct stub_priv *priv; |
| 207 | |
| 208 | spin_lock_irqsave(&sdev->priv_lock, flags); |
| 209 | |
| 210 | list_for_each_entry(priv, &sdev->priv_init, list) { |
| 211 | if (priv->seqnum != pdu->u.cmd_unlink.seqnum) |
| 212 | continue; |
| 213 | |
| 214 | /* |
| 215 | * This matched urb is not completed yet (i.e., be in |
| 216 | * flight in usb hcd hardware/driver). Now we are |
| 217 | * cancelling it. The unlinking flag means that we are |
| 218 | * now not going to return the normal result pdu of a |
| 219 | * submission request, but going to return a result pdu |
| 220 | * of the unlink request. |
| 221 | */ |
| 222 | priv->unlinking = 1; |
| 223 | |
| 224 | /* |
| 225 | * In the case that unlinking flag is on, prev->seqnum |
| 226 | * is changed from the seqnum of the cancelling urb to |
| 227 | * the seqnum of the unlink request. This will be used |
| 228 | * to make the result pdu of the unlink request. |
| 229 | */ |
| 230 | priv->seqnum = pdu->base.seqnum; |
| 231 | |
| 232 | spin_unlock_irqrestore(&sdev->priv_lock, flags); |
| 233 | |
| 234 | /* |
| 235 | * usb_unlink_urb() is now out of spinlocking to avoid |
| 236 | * spinlock recursion since stub_complete() is |
| 237 | * sometimes called in this context but not in the |
| 238 | * interrupt context. If stub_complete() is executed |
| 239 | * before we call usb_unlink_urb(), usb_unlink_urb() |
| 240 | * will return an error value. In this case, stub_tx |
| 241 | * will return the result pdu of this unlink request |
| 242 | * though submission is completed and actual unlinking |
| 243 | * is not executed. OK? |
| 244 | */ |
| 245 | /* In the above case, urb->status is not -ECONNRESET, |
| 246 | * so a driver in a client host will know the failure |
| 247 | * of the unlink request ? |
| 248 | */ |
| 249 | ret = usb_unlink_urb(priv->urb); |
| 250 | if (ret != -EINPROGRESS) |
| 251 | dev_err(&priv->urb->dev->dev, |
| 252 | "failed to unlink a urb # %lu, ret %d\n", |
| 253 | priv->seqnum, ret); |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | usbip_dbg_stub_rx("seqnum %d is not pending\n", |
| 259 | pdu->u.cmd_unlink.seqnum); |
| 260 | |
| 261 | /* |
| 262 | * The urb of the unlink target is not found in priv_init queue. It was |
| 263 | * already completed and its results is/was going to be sent by a |
| 264 | * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only |
| 265 | * return the completeness of this unlink request to vhci_hcd. |
| 266 | */ |
| 267 | stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0); |
| 268 | |
| 269 | spin_unlock_irqrestore(&sdev->priv_lock, flags); |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static int valid_request(struct stub_device *sdev, struct usbip_header *pdu) |
| 275 | { |
| 276 | struct usbip_device *ud = &sdev->ud; |
| 277 | int valid = 0; |
| 278 | |
| 279 | if (pdu->base.devid == sdev->devid) { |
| 280 | spin_lock_irq(&ud->lock); |
| 281 | if (ud->status == SDEV_ST_USED) { |
| 282 | /* A request is valid. */ |
| 283 | valid = 1; |
| 284 | } |
| 285 | spin_unlock_irq(&ud->lock); |
| 286 | } |
| 287 | |
| 288 | return valid; |
| 289 | } |
| 290 | |
| 291 | static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, |
| 292 | struct usbip_header *pdu) |
| 293 | { |
| 294 | struct stub_priv *priv; |
| 295 | struct usbip_device *ud = &sdev->ud; |
| 296 | unsigned long flags; |
| 297 | |
| 298 | spin_lock_irqsave(&sdev->priv_lock, flags); |
| 299 | |
| 300 | priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC); |
| 301 | if (!priv) { |
| 302 | dev_err(&sdev->udev->dev, "alloc stub_priv\n"); |
| 303 | spin_unlock_irqrestore(&sdev->priv_lock, flags); |
| 304 | usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); |
| 305 | return NULL; |
| 306 | } |
| 307 | |
| 308 | priv->seqnum = pdu->base.seqnum; |
| 309 | priv->sdev = sdev; |
| 310 | |
| 311 | /* |
| 312 | * After a stub_priv is linked to a list_head, |
| 313 | * our error handler can free allocated data. |
| 314 | */ |
| 315 | list_add_tail(&priv->list, &sdev->priv_init); |
| 316 | |
| 317 | spin_unlock_irqrestore(&sdev->priv_lock, flags); |
| 318 | |
| 319 | return priv; |
| 320 | } |
| 321 | |
| 322 | static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) |
| 323 | { |
| 324 | struct usb_device *udev = sdev->udev; |
| 325 | struct usb_host_endpoint *ep; |
| 326 | struct usb_endpoint_descriptor *epd = NULL; |
| 327 | int epnum = pdu->base.ep; |
| 328 | int dir = pdu->base.direction; |
| 329 | |
| 330 | if (epnum < 0 || epnum > 15) |
| 331 | goto err_ret; |
| 332 | |
| 333 | if (dir == USBIP_DIR_IN) |
| 334 | ep = udev->ep_in[epnum & 0x7f]; |
| 335 | else |
| 336 | ep = udev->ep_out[epnum & 0x7f]; |
| 337 | if (!ep) |
| 338 | goto err_ret; |
| 339 | |
| 340 | epd = &ep->desc; |
| 341 | |
| 342 | if (usb_endpoint_xfer_control(epd)) { |
| 343 | if (dir == USBIP_DIR_OUT) |
| 344 | return usb_sndctrlpipe(udev, epnum); |
| 345 | else |
| 346 | return usb_rcvctrlpipe(udev, epnum); |
| 347 | } |
| 348 | |
| 349 | if (usb_endpoint_xfer_bulk(epd)) { |
| 350 | if (dir == USBIP_DIR_OUT) |
| 351 | return usb_sndbulkpipe(udev, epnum); |
| 352 | else |
| 353 | return usb_rcvbulkpipe(udev, epnum); |
| 354 | } |
| 355 | |
| 356 | if (usb_endpoint_xfer_int(epd)) { |
| 357 | if (dir == USBIP_DIR_OUT) |
| 358 | return usb_sndintpipe(udev, epnum); |
| 359 | else |
| 360 | return usb_rcvintpipe(udev, epnum); |
| 361 | } |
| 362 | |
| 363 | if (usb_endpoint_xfer_isoc(epd)) { |
| 364 | /* validate packet size and number of packets */ |
| 365 | unsigned int maxp, packets, bytes; |
| 366 | |
| 367 | maxp = usb_endpoint_maxp(epd); |
| 368 | maxp *= usb_endpoint_maxp_mult(epd); |
| 369 | bytes = pdu->u.cmd_submit.transfer_buffer_length; |
| 370 | packets = DIV_ROUND_UP(bytes, maxp); |
| 371 | |
| 372 | if (pdu->u.cmd_submit.number_of_packets < 0 || |
| 373 | pdu->u.cmd_submit.number_of_packets > packets) { |
| 374 | dev_err(&sdev->udev->dev, |
| 375 | "CMD_SUBMIT: isoc invalid num packets %d\n", |
| 376 | pdu->u.cmd_submit.number_of_packets); |
| 377 | return -1; |
| 378 | } |
| 379 | if (dir == USBIP_DIR_OUT) |
| 380 | return usb_sndisocpipe(udev, epnum); |
| 381 | else |
| 382 | return usb_rcvisocpipe(udev, epnum); |
| 383 | } |
| 384 | |
| 385 | err_ret: |
| 386 | /* NOT REACHED */ |
| 387 | dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); |
| 388 | return -1; |
| 389 | } |
| 390 | |
| 391 | static void masking_bogus_flags(struct urb *urb) |
| 392 | { |
| 393 | int xfertype; |
| 394 | struct usb_device *dev; |
| 395 | struct usb_host_endpoint *ep; |
| 396 | int is_out; |
| 397 | unsigned int allowed; |
| 398 | |
| 399 | if (!urb || urb->hcpriv || !urb->complete) |
| 400 | return; |
| 401 | dev = urb->dev; |
| 402 | if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED)) |
| 403 | return; |
| 404 | |
| 405 | ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out) |
| 406 | [usb_pipeendpoint(urb->pipe)]; |
| 407 | if (!ep) |
| 408 | return; |
| 409 | |
| 410 | xfertype = usb_endpoint_type(&ep->desc); |
| 411 | if (xfertype == USB_ENDPOINT_XFER_CONTROL) { |
| 412 | struct usb_ctrlrequest *setup = |
| 413 | (struct usb_ctrlrequest *) urb->setup_packet; |
| 414 | |
| 415 | if (!setup) |
| 416 | return; |
| 417 | is_out = !(setup->bRequestType & USB_DIR_IN) || |
| 418 | !setup->wLength; |
| 419 | } else { |
| 420 | is_out = usb_endpoint_dir_out(&ep->desc); |
| 421 | } |
| 422 | |
| 423 | /* enforce simple/standard policy */ |
| 424 | allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | |
| 425 | URB_DIR_MASK | URB_FREE_BUFFER); |
| 426 | switch (xfertype) { |
| 427 | case USB_ENDPOINT_XFER_BULK: |
| 428 | if (is_out) |
| 429 | allowed |= URB_ZERO_PACKET; |
| 430 | /* FALLTHROUGH */ |
| 431 | default: /* all non-iso endpoints */ |
| 432 | if (!is_out) |
| 433 | allowed |= URB_SHORT_NOT_OK; |
| 434 | break; |
| 435 | case USB_ENDPOINT_XFER_ISOC: |
| 436 | allowed |= URB_ISO_ASAP; |
| 437 | break; |
| 438 | } |
| 439 | urb->transfer_flags &= allowed; |
| 440 | } |
| 441 | |
| 442 | static void stub_recv_cmd_submit(struct stub_device *sdev, |
| 443 | struct usbip_header *pdu) |
| 444 | { |
| 445 | int ret; |
| 446 | struct stub_priv *priv; |
| 447 | struct usbip_device *ud = &sdev->ud; |
| 448 | struct usb_device *udev = sdev->udev; |
| 449 | int pipe = get_pipe(sdev, pdu); |
| 450 | |
| 451 | if (pipe == -1) |
| 452 | return; |
| 453 | |
| 454 | priv = stub_priv_alloc(sdev, pdu); |
| 455 | if (!priv) |
| 456 | return; |
| 457 | |
| 458 | /* setup a urb */ |
| 459 | if (usb_pipeisoc(pipe)) |
| 460 | priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets, |
| 461 | GFP_KERNEL); |
| 462 | else |
| 463 | priv->urb = usb_alloc_urb(0, GFP_KERNEL); |
| 464 | |
| 465 | if (!priv->urb) { |
| 466 | usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); |
| 467 | return; |
| 468 | } |
| 469 | |
| 470 | /* allocate urb transfer buffer, if needed */ |
| 471 | if (pdu->u.cmd_submit.transfer_buffer_length > 0) { |
| 472 | priv->urb->transfer_buffer = |
| 473 | kzalloc(pdu->u.cmd_submit.transfer_buffer_length, |
| 474 | GFP_KERNEL); |
| 475 | if (!priv->urb->transfer_buffer) { |
| 476 | usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); |
| 477 | return; |
| 478 | } |
| 479 | } |
| 480 | |
| 481 | /* copy urb setup packet */ |
| 482 | priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8, |
| 483 | GFP_KERNEL); |
| 484 | if (!priv->urb->setup_packet) { |
| 485 | dev_err(&udev->dev, "allocate setup_packet\n"); |
| 486 | usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC); |
| 487 | return; |
| 488 | } |
| 489 | |
| 490 | /* set other members from the base header of pdu */ |
| 491 | priv->urb->context = (void *) priv; |
| 492 | priv->urb->dev = udev; |
| 493 | priv->urb->pipe = pipe; |
| 494 | priv->urb->complete = stub_complete; |
| 495 | |
| 496 | usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0); |
| 497 | |
| 498 | |
| 499 | if (usbip_recv_xbuff(ud, priv->urb) < 0) |
| 500 | return; |
| 501 | |
| 502 | if (usbip_recv_iso(ud, priv->urb) < 0) |
| 503 | return; |
| 504 | |
| 505 | /* no need to submit an intercepted request, but harmless? */ |
| 506 | tweak_special_requests(priv->urb); |
| 507 | |
| 508 | masking_bogus_flags(priv->urb); |
| 509 | /* urb is now ready to submit */ |
| 510 | ret = usb_submit_urb(priv->urb, GFP_KERNEL); |
| 511 | |
| 512 | if (ret == 0) |
| 513 | usbip_dbg_stub_rx("submit urb ok, seqnum %u\n", |
| 514 | pdu->base.seqnum); |
| 515 | else { |
| 516 | dev_err(&udev->dev, "submit_urb error, %d\n", ret); |
| 517 | usbip_dump_header(pdu); |
| 518 | usbip_dump_urb(priv->urb); |
| 519 | |
| 520 | /* |
| 521 | * Pessimistic. |
| 522 | * This connection will be discarded. |
| 523 | */ |
| 524 | usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT); |
| 525 | } |
| 526 | |
| 527 | usbip_dbg_stub_rx("Leave\n"); |
| 528 | } |
| 529 | |
| 530 | /* recv a pdu */ |
| 531 | static void stub_rx_pdu(struct usbip_device *ud) |
| 532 | { |
| 533 | int ret; |
| 534 | struct usbip_header pdu; |
| 535 | struct stub_device *sdev = container_of(ud, struct stub_device, ud); |
| 536 | struct device *dev = &sdev->udev->dev; |
| 537 | |
| 538 | usbip_dbg_stub_rx("Enter\n"); |
| 539 | |
| 540 | memset(&pdu, 0, sizeof(pdu)); |
| 541 | |
| 542 | /* receive a pdu header */ |
| 543 | ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu)); |
| 544 | if (ret != sizeof(pdu)) { |
| 545 | dev_err(dev, "recv a header, %d\n", ret); |
| 546 | usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); |
| 547 | return; |
| 548 | } |
| 549 | |
| 550 | usbip_header_correct_endian(&pdu, 0); |
| 551 | |
| 552 | if (usbip_dbg_flag_stub_rx) |
| 553 | usbip_dump_header(&pdu); |
| 554 | |
| 555 | if (!valid_request(sdev, &pdu)) { |
| 556 | dev_err(dev, "recv invalid request\n"); |
| 557 | usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); |
| 558 | return; |
| 559 | } |
| 560 | |
| 561 | switch (pdu.base.command) { |
| 562 | case USBIP_CMD_UNLINK: |
| 563 | stub_recv_cmd_unlink(sdev, &pdu); |
| 564 | break; |
| 565 | |
| 566 | case USBIP_CMD_SUBMIT: |
| 567 | stub_recv_cmd_submit(sdev, &pdu); |
| 568 | break; |
| 569 | |
| 570 | default: |
| 571 | /* NOTREACHED */ |
| 572 | dev_err(dev, "unknown pdu\n"); |
| 573 | usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); |
| 574 | break; |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | int stub_rx_loop(void *data) |
| 579 | { |
| 580 | struct usbip_device *ud = data; |
| 581 | |
| 582 | while (!kthread_should_stop()) { |
| 583 | if (usbip_event_happened(ud)) |
| 584 | break; |
| 585 | |
| 586 | stub_rx_pdu(ud); |
| 587 | } |
| 588 | |
| 589 | return 0; |
| 590 | } |