blob: 444e1792d02cfb21c2c2569488d2b4c5cc6d1e76 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * net/tipc/server.c: TIPC server infrastructure
3 *
4 * Copyright (c) 2012-2013, Wind River Systems
5 * Copyright (c) 2017-2018, Ericsson AB
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "subscr.h"
38#include "topsrv.h"
39#include "core.h"
40#include "socket.h"
41#include "addr.h"
42#include "msg.h"
David Brazdil0f672f62019-12-10 10:32:29 +000043#include "bearer.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044#include <net/sock.h>
45#include <linux/module.h>
46
47/* Number of messages to send before rescheduling */
48#define MAX_SEND_MSG_COUNT 25
49#define MAX_RECV_MSG_COUNT 25
50#define CF_CONNECTED 1
51#define CF_SERVER 2
52
53#define TIPC_SERVER_NAME_LEN 32
54
55/**
56 * struct tipc_topsrv - TIPC server structure
57 * @conn_idr: identifier set of connection
58 * @idr_lock: protect the connection identifier set
59 * @idr_in_use: amount of allocated identifier entry
60 * @net: network namspace instance
David Brazdil0f672f62019-12-10 10:32:29 +000061 * @awork: accept work item
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062 * @rcv_wq: receive workqueue
63 * @send_wq: send workqueue
David Brazdil0f672f62019-12-10 10:32:29 +000064 * @listener: topsrv listener socket
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065 * @name: server name
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 */
67struct tipc_topsrv {
68 struct idr conn_idr;
69 spinlock_t idr_lock; /* for idr list */
70 int idr_in_use;
71 struct net *net;
72 struct work_struct awork;
73 struct workqueue_struct *rcv_wq;
74 struct workqueue_struct *send_wq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075 struct socket *listener;
76 char name[TIPC_SERVER_NAME_LEN];
77};
78
79/**
80 * struct tipc_conn - TIPC connection structure
81 * @kref: reference counter to connection object
82 * @conid: connection identifier
83 * @sock: socket handler associated with connection
84 * @flags: indicates connection state
85 * @server: pointer to connected server
86 * @sub_list: lsit to all pertaing subscriptions
87 * @sub_lock: lock protecting the subscription list
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000088 * @rwork: receive work item
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089 * @outqueue: pointer to first outbound message in queue
90 * @outqueue_lock: control access to the outqueue
91 * @swork: send work item
92 */
93struct tipc_conn {
94 struct kref kref;
95 int conid;
96 struct socket *sock;
97 unsigned long flags;
98 struct tipc_topsrv *server;
99 struct list_head sub_list;
100 spinlock_t sub_lock; /* for subscription list */
101 struct work_struct rwork;
102 struct list_head outqueue;
103 spinlock_t outqueue_lock; /* for outqueue */
104 struct work_struct swork;
105};
106
107/* An entry waiting to be sent */
108struct outqueue_entry {
109 bool inactive;
110 struct tipc_event evt;
111 struct list_head list;
112};
113
114static void tipc_conn_recv_work(struct work_struct *work);
115static void tipc_conn_send_work(struct work_struct *work);
116static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt);
117static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s);
118
119static bool connected(struct tipc_conn *con)
120{
121 return con && test_bit(CF_CONNECTED, &con->flags);
122}
123
124static void tipc_conn_kref_release(struct kref *kref)
125{
126 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
127 struct tipc_topsrv *s = con->server;
128 struct outqueue_entry *e, *safe;
129
130 spin_lock_bh(&s->idr_lock);
131 idr_remove(&s->conn_idr, con->conid);
132 s->idr_in_use--;
133 spin_unlock_bh(&s->idr_lock);
134 if (con->sock)
135 sock_release(con->sock);
136
137 spin_lock_bh(&con->outqueue_lock);
138 list_for_each_entry_safe(e, safe, &con->outqueue, list) {
139 list_del(&e->list);
140 kfree(e);
141 }
142 spin_unlock_bh(&con->outqueue_lock);
143 kfree(con);
144}
145
146static void conn_put(struct tipc_conn *con)
147{
148 kref_put(&con->kref, tipc_conn_kref_release);
149}
150
151static void conn_get(struct tipc_conn *con)
152{
153 kref_get(&con->kref);
154}
155
156static void tipc_conn_close(struct tipc_conn *con)
157{
158 struct sock *sk = con->sock->sk;
159 bool disconnect = false;
160
161 write_lock_bh(&sk->sk_callback_lock);
162 disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
163
164 if (disconnect) {
165 sk->sk_user_data = NULL;
166 tipc_conn_delete_sub(con, NULL);
167 }
168 write_unlock_bh(&sk->sk_callback_lock);
169
170 /* Handle concurrent calls from sending and receiving threads */
171 if (!disconnect)
172 return;
173
174 /* Don't flush pending works, -just let them expire */
175 kernel_sock_shutdown(con->sock, SHUT_RDWR);
176
177 conn_put(con);
178}
179
180static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
181{
182 struct tipc_conn *con;
183 int ret;
184
185 con = kzalloc(sizeof(*con), GFP_ATOMIC);
186 if (!con)
187 return ERR_PTR(-ENOMEM);
188
189 kref_init(&con->kref);
190 INIT_LIST_HEAD(&con->outqueue);
191 INIT_LIST_HEAD(&con->sub_list);
192 spin_lock_init(&con->outqueue_lock);
193 spin_lock_init(&con->sub_lock);
194 INIT_WORK(&con->swork, tipc_conn_send_work);
195 INIT_WORK(&con->rwork, tipc_conn_recv_work);
196
197 spin_lock_bh(&s->idr_lock);
198 ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
199 if (ret < 0) {
200 kfree(con);
201 spin_unlock_bh(&s->idr_lock);
202 return ERR_PTR(-ENOMEM);
203 }
204 con->conid = ret;
205 s->idr_in_use++;
206 spin_unlock_bh(&s->idr_lock);
207
208 set_bit(CF_CONNECTED, &con->flags);
209 con->server = s;
210
211 return con;
212}
213
214static struct tipc_conn *tipc_conn_lookup(struct tipc_topsrv *s, int conid)
215{
216 struct tipc_conn *con;
217
218 spin_lock_bh(&s->idr_lock);
219 con = idr_find(&s->conn_idr, conid);
220 if (!connected(con) || !kref_get_unless_zero(&con->kref))
221 con = NULL;
222 spin_unlock_bh(&s->idr_lock);
223 return con;
224}
225
226/* tipc_conn_delete_sub - delete a specific or all subscriptions
227 * for a given subscriber
228 */
229static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
230{
231 struct tipc_net *tn = tipc_net(con->server->net);
232 struct list_head *sub_list = &con->sub_list;
233 struct tipc_subscription *sub, *tmp;
234
235 spin_lock_bh(&con->sub_lock);
236 list_for_each_entry_safe(sub, tmp, sub_list, sub_list) {
237 if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
238 tipc_sub_unsubscribe(sub);
239 atomic_dec(&tn->subscription_count);
240 } else if (s) {
241 break;
242 }
243 }
244 spin_unlock_bh(&con->sub_lock);
245}
246
247static void tipc_conn_send_to_sock(struct tipc_conn *con)
248{
249 struct list_head *queue = &con->outqueue;
250 struct tipc_topsrv *srv = con->server;
251 struct outqueue_entry *e;
252 struct tipc_event *evt;
253 struct msghdr msg;
254 struct kvec iov;
255 int count = 0;
256 int ret;
257
258 spin_lock_bh(&con->outqueue_lock);
259
260 while (!list_empty(queue)) {
261 e = list_first_entry(queue, struct outqueue_entry, list);
262 evt = &e->evt;
263 spin_unlock_bh(&con->outqueue_lock);
264
265 if (e->inactive)
266 tipc_conn_delete_sub(con, &evt->s);
267
268 memset(&msg, 0, sizeof(msg));
269 msg.msg_flags = MSG_DONTWAIT;
270 iov.iov_base = evt;
271 iov.iov_len = sizeof(*evt);
272 msg.msg_name = NULL;
273
274 if (con->sock) {
275 ret = kernel_sendmsg(con->sock, &msg, &iov,
276 1, sizeof(*evt));
277 if (ret == -EWOULDBLOCK || ret == 0) {
278 cond_resched();
279 return;
280 } else if (ret < 0) {
281 return tipc_conn_close(con);
282 }
283 } else {
284 tipc_topsrv_kern_evt(srv->net, evt);
285 }
286
287 /* Don't starve users filling buffers */
288 if (++count >= MAX_SEND_MSG_COUNT) {
289 cond_resched();
290 count = 0;
291 }
292 spin_lock_bh(&con->outqueue_lock);
293 list_del(&e->list);
294 kfree(e);
295 }
296 spin_unlock_bh(&con->outqueue_lock);
297}
298
299static void tipc_conn_send_work(struct work_struct *work)
300{
301 struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
302
303 if (connected(con))
304 tipc_conn_send_to_sock(con);
305
306 conn_put(con);
307}
308
309/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
310 * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
311 */
312void tipc_topsrv_queue_evt(struct net *net, int conid,
313 u32 event, struct tipc_event *evt)
314{
315 struct tipc_topsrv *srv = tipc_topsrv(net);
316 struct outqueue_entry *e;
317 struct tipc_conn *con;
318
319 con = tipc_conn_lookup(srv, conid);
320 if (!con)
321 return;
322
323 if (!connected(con))
324 goto err;
325
326 e = kmalloc(sizeof(*e), GFP_ATOMIC);
327 if (!e)
328 goto err;
329 e->inactive = (event == TIPC_SUBSCR_TIMEOUT);
330 memcpy(&e->evt, evt, sizeof(*evt));
331 spin_lock_bh(&con->outqueue_lock);
332 list_add_tail(&e->list, &con->outqueue);
333 spin_unlock_bh(&con->outqueue_lock);
334
335 if (queue_work(srv->send_wq, &con->swork))
336 return;
337err:
338 conn_put(con);
339}
340
341/* tipc_conn_write_space - interrupt callback after a sendmsg EAGAIN
342 * Indicates that there now is more space in the send buffer
343 * The queued work is launched into tipc_send_work()->tipc_conn_send_to_sock()
344 */
345static void tipc_conn_write_space(struct sock *sk)
346{
347 struct tipc_conn *con;
348
349 read_lock_bh(&sk->sk_callback_lock);
350 con = sk->sk_user_data;
351 if (connected(con)) {
352 conn_get(con);
353 if (!queue_work(con->server->send_wq, &con->swork))
354 conn_put(con);
355 }
356 read_unlock_bh(&sk->sk_callback_lock);
357}
358
359static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
360 struct tipc_conn *con,
361 struct tipc_subscr *s)
362{
363 struct tipc_net *tn = tipc_net(srv->net);
364 struct tipc_subscription *sub;
365
366 if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
David Brazdil0f672f62019-12-10 10:32:29 +0000367 s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368 tipc_conn_delete_sub(con, s);
369 return 0;
370 }
371 if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCR) {
372 pr_warn("Subscription rejected, max (%u)\n", TIPC_MAX_SUBSCR);
373 return -1;
374 }
375 sub = tipc_sub_subscribe(srv->net, s, con->conid);
376 if (!sub)
377 return -1;
378 atomic_inc(&tn->subscription_count);
379 spin_lock_bh(&con->sub_lock);
380 list_add(&sub->sub_list, &con->sub_list);
381 spin_unlock_bh(&con->sub_lock);
382 return 0;
383}
384
385static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
386{
387 struct tipc_topsrv *srv = con->server;
388 struct sock *sk = con->sock->sk;
389 struct msghdr msg = {};
390 struct tipc_subscr s;
391 struct kvec iov;
392 int ret;
393
394 iov.iov_base = &s;
395 iov.iov_len = sizeof(s);
396 msg.msg_name = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000397 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
399 if (ret == -EWOULDBLOCK)
400 return -EWOULDBLOCK;
David Brazdil0f672f62019-12-10 10:32:29 +0000401 if (ret == sizeof(s)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402 read_lock_bh(&sk->sk_callback_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200403 /* RACE: the connection can be closed in the meantime */
404 if (likely(connected(con)))
405 ret = tipc_conn_rcv_sub(srv, con, &s);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000406 read_unlock_bh(&sk->sk_callback_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200407 if (!ret)
408 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000409 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410
Olivier Deprez0e641232021-09-23 10:07:05 +0200411 tipc_conn_close(con);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000412 return ret;
413}
414
415static void tipc_conn_recv_work(struct work_struct *work)
416{
417 struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
418 int count = 0;
419
420 while (connected(con)) {
421 if (tipc_conn_rcv_from_sock(con))
422 break;
423
424 /* Don't flood Rx machine */
425 if (++count >= MAX_RECV_MSG_COUNT) {
426 cond_resched();
427 count = 0;
428 }
429 }
430 conn_put(con);
431}
432
433/* tipc_conn_data_ready - interrupt callback indicating the socket has data
434 * The queued work is launched into tipc_recv_work()->tipc_conn_rcv_from_sock()
435 */
436static void tipc_conn_data_ready(struct sock *sk)
437{
438 struct tipc_conn *con;
439
440 read_lock_bh(&sk->sk_callback_lock);
441 con = sk->sk_user_data;
442 if (connected(con)) {
443 conn_get(con);
444 if (!queue_work(con->server->rcv_wq, &con->rwork))
445 conn_put(con);
446 }
447 read_unlock_bh(&sk->sk_callback_lock);
448}
449
450static void tipc_topsrv_accept(struct work_struct *work)
451{
452 struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
453 struct socket *lsock = srv->listener;
454 struct socket *newsock;
455 struct tipc_conn *con;
456 struct sock *newsk;
457 int ret;
458
459 while (1) {
460 ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
461 if (ret < 0)
462 return;
463 con = tipc_conn_alloc(srv);
464 if (IS_ERR(con)) {
465 ret = PTR_ERR(con);
466 sock_release(newsock);
467 return;
468 }
469 /* Register callbacks */
470 newsk = newsock->sk;
471 write_lock_bh(&newsk->sk_callback_lock);
472 newsk->sk_data_ready = tipc_conn_data_ready;
473 newsk->sk_write_space = tipc_conn_write_space;
474 newsk->sk_user_data = con;
475 con->sock = newsock;
476 write_unlock_bh(&newsk->sk_callback_lock);
477
478 /* Wake up receive process in case of 'SYN+' message */
479 newsk->sk_data_ready(newsk);
480 }
481}
482
David Brazdil0f672f62019-12-10 10:32:29 +0000483/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484 * The queued job is launched into tipc_topsrv_accept()
485 */
486static void tipc_topsrv_listener_data_ready(struct sock *sk)
487{
488 struct tipc_topsrv *srv;
489
490 read_lock_bh(&sk->sk_callback_lock);
491 srv = sk->sk_user_data;
492 if (srv->listener)
493 queue_work(srv->rcv_wq, &srv->awork);
494 read_unlock_bh(&sk->sk_callback_lock);
495}
496
497static int tipc_topsrv_create_listener(struct tipc_topsrv *srv)
498{
499 int imp = TIPC_CRITICAL_IMPORTANCE;
500 struct socket *lsock = NULL;
501 struct sockaddr_tipc saddr;
502 struct sock *sk;
503 int rc;
504
505 rc = sock_create_kern(srv->net, AF_TIPC, SOCK_SEQPACKET, 0, &lsock);
506 if (rc < 0)
507 return rc;
508
509 srv->listener = lsock;
510 sk = lsock->sk;
511 write_lock_bh(&sk->sk_callback_lock);
512 sk->sk_data_ready = tipc_topsrv_listener_data_ready;
513 sk->sk_user_data = srv;
514 write_unlock_bh(&sk->sk_callback_lock);
515
516 rc = kernel_setsockopt(lsock, SOL_TIPC, TIPC_IMPORTANCE,
517 (char *)&imp, sizeof(imp));
518 if (rc < 0)
519 goto err;
520
521 saddr.family = AF_TIPC;
522 saddr.addrtype = TIPC_ADDR_NAMESEQ;
523 saddr.addr.nameseq.type = TIPC_TOP_SRV;
524 saddr.addr.nameseq.lower = TIPC_TOP_SRV;
525 saddr.addr.nameseq.upper = TIPC_TOP_SRV;
526 saddr.scope = TIPC_NODE_SCOPE;
527
528 rc = kernel_bind(lsock, (struct sockaddr *)&saddr, sizeof(saddr));
529 if (rc < 0)
530 goto err;
531 rc = kernel_listen(lsock, 0);
532 if (rc < 0)
533 goto err;
534
535 /* As server's listening socket owner and creator is the same module,
536 * we have to decrease TIPC module reference count to guarantee that
537 * it remains zero after the server socket is created, otherwise,
538 * executing "rmmod" command is unable to make TIPC module deleted
539 * after TIPC module is inserted successfully.
540 *
541 * However, the reference count is ever increased twice in
542 * sock_create_kern(): one is to increase the reference count of owner
543 * of TIPC socket's proto_ops struct; another is to increment the
544 * reference count of owner of TIPC proto struct. Therefore, we must
545 * decrement the module reference count twice to ensure that it keeps
546 * zero after server's listening socket is created. Of course, we
547 * must bump the module reference count twice as well before the socket
548 * is closed.
549 */
550 module_put(lsock->ops->owner);
551 module_put(sk->sk_prot_creator->owner);
552
553 return 0;
554err:
555 sock_release(lsock);
556 return -EINVAL;
557}
558
559bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
560 u32 upper, u32 filter, int *conid)
561{
562 struct tipc_subscr sub;
563 struct tipc_conn *con;
564 int rc;
565
566 sub.seq.type = type;
567 sub.seq.lower = lower;
568 sub.seq.upper = upper;
569 sub.timeout = TIPC_WAIT_FOREVER;
570 sub.filter = filter;
571 *(u32 *)&sub.usr_handle = port;
572
573 con = tipc_conn_alloc(tipc_topsrv(net));
574 if (IS_ERR(con))
575 return false;
576
577 *conid = con->conid;
578 con->sock = NULL;
579 rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
580 if (rc >= 0)
581 return true;
582 conn_put(con);
583 return false;
584}
585
586void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
587{
588 struct tipc_conn *con;
589
590 con = tipc_conn_lookup(tipc_topsrv(net), conid);
591 if (!con)
592 return;
593
594 test_and_clear_bit(CF_CONNECTED, &con->flags);
595 tipc_conn_delete_sub(con, NULL);
596 conn_put(con);
597 conn_put(con);
598}
599
600static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt)
601{
602 u32 port = *(u32 *)&evt->s.usr_handle;
603 u32 self = tipc_own_addr(net);
604 struct sk_buff_head evtq;
605 struct sk_buff *skb;
606
607 skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
608 self, self, port, port, 0);
609 if (!skb)
610 return;
611 msg_set_dest_droppable(buf_msg(skb), true);
612 memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
613 skb_queue_head_init(&evtq);
614 __skb_queue_tail(&evtq, skb);
David Brazdil0f672f62019-12-10 10:32:29 +0000615 tipc_loopback_trace(net, &evtq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000616 tipc_sk_rcv(net, &evtq);
617}
618
619static int tipc_topsrv_work_start(struct tipc_topsrv *s)
620{
621 s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
622 if (!s->rcv_wq) {
623 pr_err("can't start tipc receive workqueue\n");
624 return -ENOMEM;
625 }
626
627 s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
628 if (!s->send_wq) {
629 pr_err("can't start tipc send workqueue\n");
630 destroy_workqueue(s->rcv_wq);
631 return -ENOMEM;
632 }
633
634 return 0;
635}
636
637static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
638{
639 destroy_workqueue(s->rcv_wq);
640 destroy_workqueue(s->send_wq);
641}
642
David Brazdil0f672f62019-12-10 10:32:29 +0000643static int tipc_topsrv_start(struct net *net)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644{
645 struct tipc_net *tn = tipc_net(net);
646 const char name[] = "topology_server";
647 struct tipc_topsrv *srv;
648 int ret;
649
650 srv = kzalloc(sizeof(*srv), GFP_ATOMIC);
651 if (!srv)
652 return -ENOMEM;
653
654 srv->net = net;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655 INIT_WORK(&srv->awork, tipc_topsrv_accept);
656
657 strscpy(srv->name, name, sizeof(srv->name));
658 tn->topsrv = srv;
659 atomic_set(&tn->subscription_count, 0);
660
661 spin_lock_init(&srv->idr_lock);
662 idr_init(&srv->conn_idr);
663 srv->idr_in_use = 0;
664
665 ret = tipc_topsrv_work_start(srv);
666 if (ret < 0)
Olivier Deprez0e641232021-09-23 10:07:05 +0200667 goto err_start;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668
669 ret = tipc_topsrv_create_listener(srv);
670 if (ret < 0)
Olivier Deprez0e641232021-09-23 10:07:05 +0200671 goto err_create;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000672
Olivier Deprez0e641232021-09-23 10:07:05 +0200673 return 0;
674
675err_create:
676 tipc_topsrv_work_stop(srv);
677err_start:
678 kfree(srv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679 return ret;
680}
681
David Brazdil0f672f62019-12-10 10:32:29 +0000682static void tipc_topsrv_stop(struct net *net)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683{
684 struct tipc_topsrv *srv = tipc_topsrv(net);
685 struct socket *lsock = srv->listener;
686 struct tipc_conn *con;
687 int id;
688
689 spin_lock_bh(&srv->idr_lock);
690 for (id = 0; srv->idr_in_use; id++) {
691 con = idr_find(&srv->conn_idr, id);
692 if (con) {
693 spin_unlock_bh(&srv->idr_lock);
694 tipc_conn_close(con);
695 spin_lock_bh(&srv->idr_lock);
696 }
697 }
698 __module_get(lsock->ops->owner);
699 __module_get(lsock->sk->sk_prot_creator->owner);
700 srv->listener = NULL;
701 spin_unlock_bh(&srv->idr_lock);
702 sock_release(lsock);
703 tipc_topsrv_work_stop(srv);
704 idr_destroy(&srv->conn_idr);
705 kfree(srv);
706}
David Brazdil0f672f62019-12-10 10:32:29 +0000707
708int __net_init tipc_topsrv_init_net(struct net *net)
709{
710 return tipc_topsrv_start(net);
711}
712
713void __net_exit tipc_topsrv_exit_net(struct net *net)
714{
715 tipc_topsrv_stop(net);
716}