David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* RxRPC individual remote procedure call handling |
| 3 | * |
| 4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
| 10 | #include <linux/slab.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/circ_buf.h> |
| 13 | #include <linux/spinlock_types.h> |
| 14 | #include <net/sock.h> |
| 15 | #include <net/af_rxrpc.h> |
| 16 | #include "ar-internal.h" |
| 17 | |
| 18 | const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = { |
| 19 | [RXRPC_CALL_UNINITIALISED] = "Uninit ", |
| 20 | [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn", |
| 21 | [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq", |
| 22 | [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl", |
| 23 | [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", |
| 24 | [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", |
| 25 | [RXRPC_CALL_SERVER_SECURING] = "SvSecure", |
| 26 | [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", |
| 27 | [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", |
| 28 | [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", |
| 29 | [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", |
| 30 | [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK", |
| 31 | [RXRPC_CALL_COMPLETE] = "Complete", |
| 32 | }; |
| 33 | |
| 34 | const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = { |
| 35 | [RXRPC_CALL_SUCCEEDED] = "Complete", |
| 36 | [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort", |
| 37 | [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort", |
| 38 | [RXRPC_CALL_LOCAL_ERROR] = "LocError", |
| 39 | [RXRPC_CALL_NETWORK_ERROR] = "NetError", |
| 40 | }; |
| 41 | |
| 42 | struct kmem_cache *rxrpc_call_jar; |
| 43 | |
| 44 | static void rxrpc_call_timer_expired(struct timer_list *t) |
| 45 | { |
| 46 | struct rxrpc_call *call = from_timer(call, t, timer); |
| 47 | |
| 48 | _enter("%d", call->debug_id); |
| 49 | |
| 50 | if (call->state < RXRPC_CALL_COMPLETE) { |
| 51 | trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); |
| 52 | rxrpc_queue_call(call); |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; |
| 57 | |
| 58 | /* |
| 59 | * find an extant server call |
| 60 | * - called in process context with IRQs enabled |
| 61 | */ |
| 62 | struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx, |
| 63 | unsigned long user_call_ID) |
| 64 | { |
| 65 | struct rxrpc_call *call; |
| 66 | struct rb_node *p; |
| 67 | |
| 68 | _enter("%p,%lx", rx, user_call_ID); |
| 69 | |
| 70 | read_lock(&rx->call_lock); |
| 71 | |
| 72 | p = rx->calls.rb_node; |
| 73 | while (p) { |
| 74 | call = rb_entry(p, struct rxrpc_call, sock_node); |
| 75 | |
| 76 | if (user_call_ID < call->user_call_ID) |
| 77 | p = p->rb_left; |
| 78 | else if (user_call_ID > call->user_call_ID) |
| 79 | p = p->rb_right; |
| 80 | else |
| 81 | goto found_extant_call; |
| 82 | } |
| 83 | |
| 84 | read_unlock(&rx->call_lock); |
| 85 | _leave(" = NULL"); |
| 86 | return NULL; |
| 87 | |
| 88 | found_extant_call: |
| 89 | rxrpc_get_call(call, rxrpc_call_got); |
| 90 | read_unlock(&rx->call_lock); |
| 91 | _leave(" = %p [%d]", call, atomic_read(&call->usage)); |
| 92 | return call; |
| 93 | } |
| 94 | |
| 95 | /* |
| 96 | * allocate a new call |
| 97 | */ |
| 98 | struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, |
| 99 | unsigned int debug_id) |
| 100 | { |
| 101 | struct rxrpc_call *call; |
| 102 | struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
| 103 | |
| 104 | call = kmem_cache_zalloc(rxrpc_call_jar, gfp); |
| 105 | if (!call) |
| 106 | return NULL; |
| 107 | |
| 108 | call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE, |
| 109 | sizeof(struct sk_buff *), |
| 110 | gfp); |
| 111 | if (!call->rxtx_buffer) |
| 112 | goto nomem; |
| 113 | |
| 114 | call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp); |
| 115 | if (!call->rxtx_annotations) |
| 116 | goto nomem_2; |
| 117 | |
| 118 | mutex_init(&call->user_mutex); |
| 119 | |
| 120 | /* Prevent lockdep reporting a deadlock false positive between the afs |
| 121 | * filesystem and sys_sendmsg() via the mmap sem. |
| 122 | */ |
| 123 | if (rx->sk.sk_kern_sock) |
| 124 | lockdep_set_class(&call->user_mutex, |
| 125 | &rxrpc_call_user_mutex_lock_class_key); |
| 126 | |
| 127 | timer_setup(&call->timer, rxrpc_call_timer_expired, 0); |
| 128 | INIT_WORK(&call->processor, &rxrpc_process_call); |
| 129 | INIT_LIST_HEAD(&call->link); |
| 130 | INIT_LIST_HEAD(&call->chan_wait_link); |
| 131 | INIT_LIST_HEAD(&call->accept_link); |
| 132 | INIT_LIST_HEAD(&call->recvmsg_link); |
| 133 | INIT_LIST_HEAD(&call->sock_link); |
| 134 | init_waitqueue_head(&call->waitq); |
| 135 | spin_lock_init(&call->lock); |
| 136 | spin_lock_init(&call->notify_lock); |
| 137 | spin_lock_init(&call->input_lock); |
| 138 | rwlock_init(&call->state_lock); |
| 139 | atomic_set(&call->usage, 1); |
| 140 | call->debug_id = debug_id; |
| 141 | call->tx_total_len = -1; |
| 142 | call->next_rx_timo = 20 * HZ; |
| 143 | call->next_req_timo = 1 * HZ; |
| 144 | |
| 145 | memset(&call->sock_node, 0xed, sizeof(call->sock_node)); |
| 146 | |
| 147 | /* Leave space in the ring to handle a maxed-out jumbo packet */ |
| 148 | call->rx_winsize = rxrpc_rx_window_size; |
| 149 | call->tx_winsize = 16; |
| 150 | call->rx_expect_next = 1; |
| 151 | |
| 152 | call->cong_cwnd = 2; |
| 153 | call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; |
| 154 | |
| 155 | call->rxnet = rxnet; |
| 156 | atomic_inc(&rxnet->nr_calls); |
| 157 | return call; |
| 158 | |
| 159 | nomem_2: |
| 160 | kfree(call->rxtx_buffer); |
| 161 | nomem: |
| 162 | kmem_cache_free(rxrpc_call_jar, call); |
| 163 | return NULL; |
| 164 | } |
| 165 | |
| 166 | /* |
| 167 | * Allocate a new client call. |
| 168 | */ |
| 169 | static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, |
| 170 | struct sockaddr_rxrpc *srx, |
| 171 | gfp_t gfp, |
| 172 | unsigned int debug_id) |
| 173 | { |
| 174 | struct rxrpc_call *call; |
| 175 | ktime_t now; |
| 176 | |
| 177 | _enter(""); |
| 178 | |
| 179 | call = rxrpc_alloc_call(rx, gfp, debug_id); |
| 180 | if (!call) |
| 181 | return ERR_PTR(-ENOMEM); |
| 182 | call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; |
| 183 | call->service_id = srx->srx_service; |
| 184 | call->tx_phase = true; |
| 185 | now = ktime_get_real(); |
| 186 | call->acks_latest_ts = now; |
| 187 | call->cong_tstamp = now; |
| 188 | |
| 189 | _leave(" = %p", call); |
| 190 | return call; |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Initiate the call ack/resend/expiry timer. |
| 195 | */ |
| 196 | static void rxrpc_start_call_timer(struct rxrpc_call *call) |
| 197 | { |
| 198 | unsigned long now = jiffies; |
| 199 | unsigned long j = now + MAX_JIFFY_OFFSET; |
| 200 | |
| 201 | call->ack_at = j; |
| 202 | call->ack_lost_at = j; |
| 203 | call->resend_at = j; |
| 204 | call->ping_at = j; |
| 205 | call->expect_rx_by = j; |
| 206 | call->expect_req_by = j; |
| 207 | call->expect_term_by = j; |
| 208 | call->timer.expires = now; |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * Set up a call for the given parameters. |
| 213 | * - Called with the socket lock held, which it must release. |
| 214 | * - If it returns a call, the call's lock will need releasing by the caller. |
| 215 | */ |
| 216 | struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, |
| 217 | struct rxrpc_conn_parameters *cp, |
| 218 | struct sockaddr_rxrpc *srx, |
| 219 | struct rxrpc_call_params *p, |
| 220 | gfp_t gfp, |
| 221 | unsigned int debug_id) |
| 222 | __releases(&rx->sk.sk_lock.slock) |
| 223 | __acquires(&call->user_mutex) |
| 224 | { |
| 225 | struct rxrpc_call *call, *xcall; |
| 226 | struct rxrpc_net *rxnet; |
| 227 | struct rb_node *parent, **pp; |
| 228 | const void *here = __builtin_return_address(0); |
| 229 | int ret; |
| 230 | |
| 231 | _enter("%p,%lx", rx, p->user_call_ID); |
| 232 | |
| 233 | call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id); |
| 234 | if (IS_ERR(call)) { |
| 235 | release_sock(&rx->sk); |
| 236 | _leave(" = %ld", PTR_ERR(call)); |
| 237 | return call; |
| 238 | } |
| 239 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 240 | if (p->intr) |
| 241 | __set_bit(RXRPC_CALL_IS_INTR, &call->flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | call->tx_total_len = p->tx_total_len; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 243 | trace_rxrpc_call(call->debug_id, rxrpc_call_new_client, |
| 244 | atomic_read(&call->usage), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | here, (const void *)p->user_call_ID); |
| 246 | |
| 247 | /* We need to protect a partially set up call against the user as we |
| 248 | * will be acting outside the socket lock. |
| 249 | */ |
| 250 | mutex_lock(&call->user_mutex); |
| 251 | |
| 252 | /* Publish the call, even though it is incompletely set up as yet */ |
| 253 | write_lock(&rx->call_lock); |
| 254 | |
| 255 | pp = &rx->calls.rb_node; |
| 256 | parent = NULL; |
| 257 | while (*pp) { |
| 258 | parent = *pp; |
| 259 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
| 260 | |
| 261 | if (p->user_call_ID < xcall->user_call_ID) |
| 262 | pp = &(*pp)->rb_left; |
| 263 | else if (p->user_call_ID > xcall->user_call_ID) |
| 264 | pp = &(*pp)->rb_right; |
| 265 | else |
| 266 | goto error_dup_user_ID; |
| 267 | } |
| 268 | |
| 269 | rcu_assign_pointer(call->socket, rx); |
| 270 | call->user_call_ID = p->user_call_ID; |
| 271 | __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); |
| 272 | rxrpc_get_call(call, rxrpc_call_got_userid); |
| 273 | rb_link_node(&call->sock_node, parent, pp); |
| 274 | rb_insert_color(&call->sock_node, &rx->calls); |
| 275 | list_add(&call->sock_link, &rx->sock_calls); |
| 276 | |
| 277 | write_unlock(&rx->call_lock); |
| 278 | |
| 279 | rxnet = call->rxnet; |
| 280 | write_lock(&rxnet->call_lock); |
| 281 | list_add_tail(&call->link, &rxnet->calls); |
| 282 | write_unlock(&rxnet->call_lock); |
| 283 | |
| 284 | /* From this point on, the call is protected by its own lock. */ |
| 285 | release_sock(&rx->sk); |
| 286 | |
| 287 | /* Set up or get a connection record and set the protocol parameters, |
| 288 | * including channel number and call ID. |
| 289 | */ |
| 290 | ret = rxrpc_connect_call(rx, call, cp, srx, gfp); |
| 291 | if (ret < 0) |
| 292 | goto error; |
| 293 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 294 | trace_rxrpc_call(call->debug_id, rxrpc_call_connected, |
| 295 | atomic_read(&call->usage), here, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 296 | |
| 297 | rxrpc_start_call_timer(call); |
| 298 | |
| 299 | _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id); |
| 300 | |
| 301 | _leave(" = %p [new]", call); |
| 302 | return call; |
| 303 | |
| 304 | /* We unexpectedly found the user ID in the list after taking |
| 305 | * the call_lock. This shouldn't happen unless the user races |
| 306 | * with itself and tries to add the same user ID twice at the |
| 307 | * same time in different threads. |
| 308 | */ |
| 309 | error_dup_user_ID: |
| 310 | write_unlock(&rx->call_lock); |
| 311 | release_sock(&rx->sk); |
| 312 | ret = -EEXIST; |
| 313 | |
| 314 | error: |
| 315 | __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, |
| 316 | RX_CALL_DEAD, ret); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 317 | trace_rxrpc_call(call->debug_id, rxrpc_call_error, |
| 318 | atomic_read(&call->usage), here, ERR_PTR(ret)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 319 | rxrpc_release_call(rx, call); |
| 320 | mutex_unlock(&call->user_mutex); |
| 321 | rxrpc_put_call(call, rxrpc_call_put); |
| 322 | _leave(" = %d", ret); |
| 323 | return ERR_PTR(ret); |
| 324 | } |
| 325 | |
| 326 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | * Set up an incoming call. call->conn points to the connection. |
| 328 | * This is called in BH context and isn't allowed to fail. |
| 329 | */ |
| 330 | void rxrpc_incoming_call(struct rxrpc_sock *rx, |
| 331 | struct rxrpc_call *call, |
| 332 | struct sk_buff *skb) |
| 333 | { |
| 334 | struct rxrpc_connection *conn = call->conn; |
| 335 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
| 336 | u32 chan; |
| 337 | |
| 338 | _enter(",%d", call->conn->debug_id); |
| 339 | |
| 340 | rcu_assign_pointer(call->socket, rx); |
| 341 | call->call_id = sp->hdr.callNumber; |
| 342 | call->service_id = sp->hdr.serviceId; |
| 343 | call->cid = sp->hdr.cid; |
| 344 | call->state = RXRPC_CALL_SERVER_ACCEPTING; |
| 345 | if (sp->hdr.securityIndex > 0) |
| 346 | call->state = RXRPC_CALL_SERVER_SECURING; |
| 347 | call->cong_tstamp = skb->tstamp; |
| 348 | |
| 349 | /* Set the channel for this call. We don't get channel_lock as we're |
| 350 | * only defending against the data_ready handler (which we're called |
| 351 | * from) and the RESPONSE packet parser (which is only really |
| 352 | * interested in call_counter and can cope with a disagreement with the |
| 353 | * call pointer). |
| 354 | */ |
| 355 | chan = sp->hdr.cid & RXRPC_CHANNELMASK; |
| 356 | conn->channels[chan].call_counter = call->call_id; |
| 357 | conn->channels[chan].call_id = call->call_id; |
| 358 | rcu_assign_pointer(conn->channels[chan].call, call); |
| 359 | |
| 360 | spin_lock(&conn->params.peer->lock); |
| 361 | hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets); |
| 362 | spin_unlock(&conn->params.peer->lock); |
| 363 | |
| 364 | _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); |
| 365 | |
| 366 | rxrpc_start_call_timer(call); |
| 367 | _leave(""); |
| 368 | } |
| 369 | |
| 370 | /* |
| 371 | * Queue a call's work processor, getting a ref to pass to the work queue. |
| 372 | */ |
| 373 | bool rxrpc_queue_call(struct rxrpc_call *call) |
| 374 | { |
| 375 | const void *here = __builtin_return_address(0); |
| 376 | int n = atomic_fetch_add_unless(&call->usage, 1, 0); |
| 377 | if (n == 0) |
| 378 | return false; |
| 379 | if (rxrpc_queue_work(&call->processor)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 380 | trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1, |
| 381 | here, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 382 | else |
| 383 | rxrpc_put_call(call, rxrpc_call_put_noqueue); |
| 384 | return true; |
| 385 | } |
| 386 | |
| 387 | /* |
| 388 | * Queue a call's work processor, passing the callers ref to the work queue. |
| 389 | */ |
| 390 | bool __rxrpc_queue_call(struct rxrpc_call *call) |
| 391 | { |
| 392 | const void *here = __builtin_return_address(0); |
| 393 | int n = atomic_read(&call->usage); |
| 394 | ASSERTCMP(n, >=, 1); |
| 395 | if (rxrpc_queue_work(&call->processor)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 396 | trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n, |
| 397 | here, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 398 | else |
| 399 | rxrpc_put_call(call, rxrpc_call_put_noqueue); |
| 400 | return true; |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * Note the re-emergence of a call. |
| 405 | */ |
| 406 | void rxrpc_see_call(struct rxrpc_call *call) |
| 407 | { |
| 408 | const void *here = __builtin_return_address(0); |
| 409 | if (call) { |
| 410 | int n = atomic_read(&call->usage); |
| 411 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 412 | trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n, |
| 413 | here, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 414 | } |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Note the addition of a ref on a call. |
| 419 | */ |
| 420 | void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) |
| 421 | { |
| 422 | const void *here = __builtin_return_address(0); |
| 423 | int n = atomic_inc_return(&call->usage); |
| 424 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 425 | trace_rxrpc_call(call->debug_id, op, n, here, NULL); |
| 426 | } |
| 427 | |
| 428 | /* |
| 429 | * Clean up the RxTx skb ring. |
| 430 | */ |
| 431 | static void rxrpc_cleanup_ring(struct rxrpc_call *call) |
| 432 | { |
| 433 | int i; |
| 434 | |
| 435 | for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { |
| 436 | rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned); |
| 437 | call->rxtx_buffer[i] = NULL; |
| 438 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | /* |
| 442 | * Detach a call from its owning socket. |
| 443 | */ |
| 444 | void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) |
| 445 | { |
| 446 | const void *here = __builtin_return_address(0); |
| 447 | struct rxrpc_connection *conn = call->conn; |
| 448 | bool put = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 449 | |
| 450 | _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); |
| 451 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 452 | trace_rxrpc_call(call->debug_id, rxrpc_call_release, |
| 453 | atomic_read(&call->usage), |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 454 | here, (const void *)call->flags); |
| 455 | |
| 456 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); |
| 457 | |
| 458 | spin_lock_bh(&call->lock); |
| 459 | if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) |
| 460 | BUG(); |
| 461 | spin_unlock_bh(&call->lock); |
| 462 | |
| 463 | del_timer_sync(&call->timer); |
| 464 | |
| 465 | /* Make sure we don't get any more notifications */ |
| 466 | write_lock_bh(&rx->recvmsg_lock); |
| 467 | |
| 468 | if (!list_empty(&call->recvmsg_link)) { |
| 469 | _debug("unlinking once-pending call %p { e=%lx f=%lx }", |
| 470 | call, call->events, call->flags); |
| 471 | list_del(&call->recvmsg_link); |
| 472 | put = true; |
| 473 | } |
| 474 | |
| 475 | /* list_empty() must return false in rxrpc_notify_socket() */ |
| 476 | call->recvmsg_link.next = NULL; |
| 477 | call->recvmsg_link.prev = NULL; |
| 478 | |
| 479 | write_unlock_bh(&rx->recvmsg_lock); |
| 480 | if (put) |
| 481 | rxrpc_put_call(call, rxrpc_call_put); |
| 482 | |
| 483 | write_lock(&rx->call_lock); |
| 484 | |
| 485 | if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { |
| 486 | rb_erase(&call->sock_node, &rx->calls); |
| 487 | memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); |
| 488 | rxrpc_put_call(call, rxrpc_call_put_userid); |
| 489 | } |
| 490 | |
| 491 | list_del(&call->sock_link); |
| 492 | write_unlock(&rx->call_lock); |
| 493 | |
| 494 | _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); |
| 495 | |
| 496 | if (conn) |
| 497 | rxrpc_disconnect_call(call); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 498 | if (call->security) |
| 499 | call->security->free_call_crypto(call); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 500 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 501 | rxrpc_cleanup_ring(call); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 502 | _leave(""); |
| 503 | } |
| 504 | |
| 505 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 506 | * release all the calls associated with a socket |
| 507 | */ |
| 508 | void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) |
| 509 | { |
| 510 | struct rxrpc_call *call; |
| 511 | |
| 512 | _enter("%p", rx); |
| 513 | |
| 514 | while (!list_empty(&rx->to_be_accepted)) { |
| 515 | call = list_entry(rx->to_be_accepted.next, |
| 516 | struct rxrpc_call, accept_link); |
| 517 | list_del(&call->accept_link); |
| 518 | rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET); |
| 519 | rxrpc_put_call(call, rxrpc_call_put); |
| 520 | } |
| 521 | |
| 522 | while (!list_empty(&rx->sock_calls)) { |
| 523 | call = list_entry(rx->sock_calls.next, |
| 524 | struct rxrpc_call, sock_link); |
| 525 | rxrpc_get_call(call, rxrpc_call_got); |
| 526 | rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET); |
| 527 | rxrpc_send_abort_packet(call); |
| 528 | rxrpc_release_call(rx, call); |
| 529 | rxrpc_put_call(call, rxrpc_call_put); |
| 530 | } |
| 531 | |
| 532 | _leave(""); |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * release a call |
| 537 | */ |
| 538 | void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) |
| 539 | { |
| 540 | struct rxrpc_net *rxnet = call->rxnet; |
| 541 | const void *here = __builtin_return_address(0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 542 | unsigned int debug_id = call->debug_id; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 543 | int n; |
| 544 | |
| 545 | ASSERT(call != NULL); |
| 546 | |
| 547 | n = atomic_dec_return(&call->usage); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 548 | trace_rxrpc_call(debug_id, op, n, here, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 549 | ASSERTCMP(n, >=, 0); |
| 550 | if (n == 0) { |
| 551 | _debug("call %d dead", call->debug_id); |
| 552 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); |
| 553 | |
| 554 | if (!list_empty(&call->link)) { |
| 555 | write_lock(&rxnet->call_lock); |
| 556 | list_del_init(&call->link); |
| 557 | write_unlock(&rxnet->call_lock); |
| 558 | } |
| 559 | |
| 560 | rxrpc_cleanup_call(call); |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | /* |
| 565 | * Final call destruction under RCU. |
| 566 | */ |
| 567 | static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) |
| 568 | { |
| 569 | struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); |
| 570 | struct rxrpc_net *rxnet = call->rxnet; |
| 571 | |
| 572 | rxrpc_put_peer(call->peer); |
| 573 | kfree(call->rxtx_buffer); |
| 574 | kfree(call->rxtx_annotations); |
| 575 | kmem_cache_free(rxrpc_call_jar, call); |
| 576 | if (atomic_dec_and_test(&rxnet->nr_calls)) |
| 577 | wake_up_var(&rxnet->nr_calls); |
| 578 | } |
| 579 | |
| 580 | /* |
| 581 | * clean up a call |
| 582 | */ |
| 583 | void rxrpc_cleanup_call(struct rxrpc_call *call) |
| 584 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 585 | _net("DESTROY CALL %d", call->debug_id); |
| 586 | |
| 587 | memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); |
| 588 | |
| 589 | del_timer_sync(&call->timer); |
| 590 | |
| 591 | ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); |
| 592 | ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); |
| 593 | ASSERTCMP(call->conn, ==, NULL); |
| 594 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 595 | rxrpc_cleanup_ring(call); |
| 596 | rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 597 | |
| 598 | call_rcu(&call->rcu, rxrpc_rcu_destroy_call); |
| 599 | } |
| 600 | |
| 601 | /* |
| 602 | * Make sure that all calls are gone from a network namespace. To reach this |
| 603 | * point, any open UDP sockets in that namespace must have been closed, so any |
| 604 | * outstanding calls cannot be doing I/O. |
| 605 | */ |
| 606 | void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) |
| 607 | { |
| 608 | struct rxrpc_call *call; |
| 609 | |
| 610 | _enter(""); |
| 611 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 612 | if (!list_empty(&rxnet->calls)) { |
| 613 | write_lock(&rxnet->call_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 614 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 615 | while (!list_empty(&rxnet->calls)) { |
| 616 | call = list_entry(rxnet->calls.next, |
| 617 | struct rxrpc_call, link); |
| 618 | _debug("Zapping call %p", call); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 619 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 620 | rxrpc_see_call(call); |
| 621 | list_del_init(&call->link); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 622 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 623 | pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", |
| 624 | call, atomic_read(&call->usage), |
| 625 | rxrpc_call_states[call->state], |
| 626 | call->flags, call->events); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 627 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 628 | write_unlock(&rxnet->call_lock); |
| 629 | cond_resched(); |
| 630 | write_lock(&rxnet->call_lock); |
| 631 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 632 | |
| 633 | write_unlock(&rxnet->call_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 634 | } |
| 635 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 636 | atomic_dec(&rxnet->nr_calls); |
| 637 | wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls)); |
| 638 | } |