David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* AFS cell and server record management |
| 3 | * |
| 4 | * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/key.h> |
| 10 | #include <linux/ctype.h> |
| 11 | #include <linux/dns_resolver.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/inet.h> |
| 14 | #include <linux/namei.h> |
| 15 | #include <keys/rxrpc-type.h> |
| 16 | #include "internal.h" |
| 17 | |
| 18 | static unsigned __read_mostly afs_cell_gc_delay = 10; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 19 | static unsigned __read_mostly afs_cell_min_ttl = 10 * 60; |
| 20 | static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | |
| 22 | static void afs_manage_cell(struct work_struct *); |
| 23 | |
| 24 | static void afs_dec_cells_outstanding(struct afs_net *net) |
| 25 | { |
| 26 | if (atomic_dec_and_test(&net->cells_outstanding)) |
| 27 | wake_up_var(&net->cells_outstanding); |
| 28 | } |
| 29 | |
| 30 | /* |
| 31 | * Set the cell timer to fire after a given delay, assuming it's not already |
| 32 | * set for an earlier time. |
| 33 | */ |
| 34 | static void afs_set_cell_timer(struct afs_net *net, time64_t delay) |
| 35 | { |
| 36 | if (net->live) { |
| 37 | atomic_inc(&net->cells_outstanding); |
| 38 | if (timer_reduce(&net->cells_timer, jiffies + delay * HZ)) |
| 39 | afs_dec_cells_outstanding(net); |
| 40 | } |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * Look up and get an activation reference on a cell record under RCU |
| 45 | * conditions. The caller must hold the RCU read lock. |
| 46 | */ |
| 47 | struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, |
| 48 | const char *name, unsigned int namesz) |
| 49 | { |
| 50 | struct afs_cell *cell = NULL; |
| 51 | struct rb_node *p; |
| 52 | int n, seq = 0, ret = 0; |
| 53 | |
| 54 | _enter("%*.*s", namesz, namesz, name); |
| 55 | |
| 56 | if (name && namesz == 0) |
| 57 | return ERR_PTR(-EINVAL); |
| 58 | if (namesz > AFS_MAXCELLNAME) |
| 59 | return ERR_PTR(-ENAMETOOLONG); |
| 60 | |
| 61 | do { |
| 62 | /* Unfortunately, rbtree walking doesn't give reliable results |
| 63 | * under just the RCU read lock, so we have to check for |
| 64 | * changes. |
| 65 | */ |
| 66 | if (cell) |
| 67 | afs_put_cell(net, cell); |
| 68 | cell = NULL; |
| 69 | ret = -ENOENT; |
| 70 | |
| 71 | read_seqbegin_or_lock(&net->cells_lock, &seq); |
| 72 | |
| 73 | if (!name) { |
| 74 | cell = rcu_dereference_raw(net->ws_cell); |
| 75 | if (cell) { |
| 76 | afs_get_cell(cell); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 77 | ret = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | break; |
| 79 | } |
| 80 | ret = -EDESTADDRREQ; |
| 81 | continue; |
| 82 | } |
| 83 | |
| 84 | p = rcu_dereference_raw(net->cells.rb_node); |
| 85 | while (p) { |
| 86 | cell = rb_entry(p, struct afs_cell, net_node); |
| 87 | |
| 88 | n = strncasecmp(cell->name, name, |
| 89 | min_t(size_t, cell->name_len, namesz)); |
| 90 | if (n == 0) |
| 91 | n = cell->name_len - namesz; |
| 92 | if (n < 0) { |
| 93 | p = rcu_dereference_raw(p->rb_left); |
| 94 | } else if (n > 0) { |
| 95 | p = rcu_dereference_raw(p->rb_right); |
| 96 | } else { |
| 97 | if (atomic_inc_not_zero(&cell->usage)) { |
| 98 | ret = 0; |
| 99 | break; |
| 100 | } |
| 101 | /* We want to repeat the search, this time with |
| 102 | * the lock properly locked. |
| 103 | */ |
| 104 | } |
| 105 | cell = NULL; |
| 106 | } |
| 107 | |
| 108 | } while (need_seqretry(&net->cells_lock, seq)); |
| 109 | |
| 110 | done_seqretry(&net->cells_lock, seq); |
| 111 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 112 | if (ret != 0 && cell) |
| 113 | afs_put_cell(net, cell); |
| 114 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | return ret == 0 ? cell : ERR_PTR(ret); |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * Set up a cell record and fill in its name, VL server address list and |
| 120 | * allocate an anonymous key |
| 121 | */ |
| 122 | static struct afs_cell *afs_alloc_cell(struct afs_net *net, |
| 123 | const char *name, unsigned int namelen, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 124 | const char *addresses) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 125 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 126 | struct afs_vlserver_list *vllist; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | struct afs_cell *cell; |
| 128 | int i, ret; |
| 129 | |
| 130 | ASSERT(name); |
| 131 | if (namelen == 0) |
| 132 | return ERR_PTR(-EINVAL); |
| 133 | if (namelen > AFS_MAXCELLNAME) { |
| 134 | _leave(" = -ENAMETOOLONG"); |
| 135 | return ERR_PTR(-ENAMETOOLONG); |
| 136 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 137 | |
| 138 | /* Prohibit cell names that contain unprintable chars, '/' and '@' or |
| 139 | * that begin with a dot. This also precludes "@cell". |
| 140 | */ |
| 141 | if (name[0] == '.') |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 142 | return ERR_PTR(-EINVAL); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 143 | for (i = 0; i < namelen; i++) { |
| 144 | char ch = name[i]; |
| 145 | if (!isprint(ch) || ch == '/' || ch == '@') |
| 146 | return ERR_PTR(-EINVAL); |
| 147 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 149 | _enter("%*.*s,%s", namelen, namelen, name, addresses); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 150 | |
| 151 | cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL); |
| 152 | if (!cell) { |
| 153 | _leave(" = -ENOMEM"); |
| 154 | return ERR_PTR(-ENOMEM); |
| 155 | } |
| 156 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 157 | cell->name = kmalloc(namelen + 1, GFP_KERNEL); |
| 158 | if (!cell->name) { |
| 159 | kfree(cell); |
| 160 | return ERR_PTR(-ENOMEM); |
| 161 | } |
| 162 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 163 | cell->net = net; |
| 164 | cell->name_len = namelen; |
| 165 | for (i = 0; i < namelen; i++) |
| 166 | cell->name[i] = tolower(name[i]); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 167 | cell->name[i] = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | |
| 169 | atomic_set(&cell->usage, 2); |
| 170 | INIT_WORK(&cell->manager, afs_manage_cell); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | INIT_LIST_HEAD(&cell->proc_volumes); |
| 172 | rwlock_init(&cell->proc_lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 173 | rwlock_init(&cell->vl_servers_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 174 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 175 | /* Provide a VL server list, filling it in if we were given a list of |
| 176 | * addresses to use. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 177 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 178 | if (addresses) { |
| 179 | vllist = afs_parse_text_addrs(net, |
| 180 | addresses, strlen(addresses), ':', |
| 181 | VL_SERVICE, AFS_VL_PORT); |
| 182 | if (IS_ERR(vllist)) { |
| 183 | ret = PTR_ERR(vllist); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 184 | goto parse_failed; |
| 185 | } |
| 186 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 187 | vllist->source = DNS_RECORD_FROM_CONFIG; |
| 188 | vllist->status = DNS_LOOKUP_NOT_DONE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | cell->dns_expiry = TIME64_MAX; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 190 | } else { |
| 191 | ret = -ENOMEM; |
| 192 | vllist = afs_alloc_vlserver_list(0); |
| 193 | if (!vllist) |
| 194 | goto error; |
| 195 | vllist->source = DNS_RECORD_UNAVAILABLE; |
| 196 | vllist->status = DNS_LOOKUP_NOT_DONE; |
| 197 | cell->dns_expiry = ktime_get_real_seconds(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | } |
| 199 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 200 | rcu_assign_pointer(cell->vl_servers, vllist); |
| 201 | |
| 202 | cell->dns_source = vllist->source; |
| 203 | cell->dns_status = vllist->status; |
| 204 | smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */ |
| 205 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | _leave(" = %p", cell); |
| 207 | return cell; |
| 208 | |
| 209 | parse_failed: |
| 210 | if (ret == -EINVAL) |
| 211 | printk(KERN_ERR "kAFS: bad VL server IP address\n"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 212 | error: |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 213 | kfree(cell->name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | kfree(cell); |
| 215 | _leave(" = %d", ret); |
| 216 | return ERR_PTR(ret); |
| 217 | } |
| 218 | |
| 219 | /* |
| 220 | * afs_lookup_cell - Look up or create a cell record. |
| 221 | * @net: The network namespace |
| 222 | * @name: The name of the cell. |
| 223 | * @namesz: The strlen of the cell name. |
| 224 | * @vllist: A colon/comma separated list of numeric IP addresses or NULL. |
| 225 | * @excl: T if an error should be given if the cell name already exists. |
| 226 | * |
| 227 | * Look up a cell record by name and query the DNS for VL server addresses if |
| 228 | * needed. Note that that actual DNS query is punted off to the manager thread |
| 229 | * so that this function can return immediately if interrupted whilst allowing |
| 230 | * cell records to be shared even if not yet fully constructed. |
| 231 | */ |
| 232 | struct afs_cell *afs_lookup_cell(struct afs_net *net, |
| 233 | const char *name, unsigned int namesz, |
| 234 | const char *vllist, bool excl) |
| 235 | { |
| 236 | struct afs_cell *cell, *candidate, *cursor; |
| 237 | struct rb_node *parent, **pp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 238 | enum afs_cell_state state; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 239 | int ret, n; |
| 240 | |
| 241 | _enter("%s,%s", name, vllist); |
| 242 | |
| 243 | if (!excl) { |
| 244 | rcu_read_lock(); |
| 245 | cell = afs_lookup_cell_rcu(net, name, namesz); |
| 246 | rcu_read_unlock(); |
| 247 | if (!IS_ERR(cell)) |
| 248 | goto wait_for_cell; |
| 249 | } |
| 250 | |
| 251 | /* Assume we're probably going to create a cell and preallocate and |
| 252 | * mostly set up a candidate record. We can then use this to stash the |
| 253 | * name, the net namespace and VL server addresses. |
| 254 | * |
| 255 | * We also want to do this before we hold any locks as it may involve |
| 256 | * upcalling to userspace to make DNS queries. |
| 257 | */ |
| 258 | candidate = afs_alloc_cell(net, name, namesz, vllist); |
| 259 | if (IS_ERR(candidate)) { |
| 260 | _leave(" = %ld", PTR_ERR(candidate)); |
| 261 | return candidate; |
| 262 | } |
| 263 | |
| 264 | /* Find the insertion point and check to see if someone else added a |
| 265 | * cell whilst we were allocating. |
| 266 | */ |
| 267 | write_seqlock(&net->cells_lock); |
| 268 | |
| 269 | pp = &net->cells.rb_node; |
| 270 | parent = NULL; |
| 271 | while (*pp) { |
| 272 | parent = *pp; |
| 273 | cursor = rb_entry(parent, struct afs_cell, net_node); |
| 274 | |
| 275 | n = strncasecmp(cursor->name, name, |
| 276 | min_t(size_t, cursor->name_len, namesz)); |
| 277 | if (n == 0) |
| 278 | n = cursor->name_len - namesz; |
| 279 | if (n < 0) |
| 280 | pp = &(*pp)->rb_left; |
| 281 | else if (n > 0) |
| 282 | pp = &(*pp)->rb_right; |
| 283 | else |
| 284 | goto cell_already_exists; |
| 285 | } |
| 286 | |
| 287 | cell = candidate; |
| 288 | candidate = NULL; |
| 289 | rb_link_node_rcu(&cell->net_node, parent, pp); |
| 290 | rb_insert_color(&cell->net_node, &net->cells); |
| 291 | atomic_inc(&net->cells_outstanding); |
| 292 | write_sequnlock(&net->cells_lock); |
| 293 | |
| 294 | queue_work(afs_wq, &cell->manager); |
| 295 | |
| 296 | wait_for_cell: |
| 297 | _debug("wait_for_cell"); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 298 | wait_var_event(&cell->state, |
| 299 | ({ |
| 300 | state = smp_load_acquire(&cell->state); /* vs error */ |
| 301 | state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED; |
| 302 | })); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 303 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 304 | /* Check the state obtained from the wait check. */ |
| 305 | if (state == AFS_CELL_FAILED) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 306 | ret = cell->error; |
| 307 | goto error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | _leave(" = %p [cell]", cell); |
| 311 | return cell; |
| 312 | |
| 313 | cell_already_exists: |
| 314 | _debug("cell exists"); |
| 315 | cell = cursor; |
| 316 | if (excl) { |
| 317 | ret = -EEXIST; |
| 318 | } else { |
| 319 | afs_get_cell(cursor); |
| 320 | ret = 0; |
| 321 | } |
| 322 | write_sequnlock(&net->cells_lock); |
| 323 | kfree(candidate); |
| 324 | if (ret == 0) |
| 325 | goto wait_for_cell; |
| 326 | goto error_noput; |
| 327 | error: |
| 328 | afs_put_cell(net, cell); |
| 329 | error_noput: |
| 330 | _leave(" = %d [error]", ret); |
| 331 | return ERR_PTR(ret); |
| 332 | } |
| 333 | |
| 334 | /* |
| 335 | * set the root cell information |
| 336 | * - can be called with a module parameter string |
| 337 | * - can be called from a write to /proc/fs/afs/rootcell |
| 338 | */ |
| 339 | int afs_cell_init(struct afs_net *net, const char *rootcell) |
| 340 | { |
| 341 | struct afs_cell *old_root, *new_root; |
| 342 | const char *cp, *vllist; |
| 343 | size_t len; |
| 344 | |
| 345 | _enter(""); |
| 346 | |
| 347 | if (!rootcell) { |
| 348 | /* module is loaded with no parameters, or built statically. |
| 349 | * - in the future we might initialize cell DB here. |
| 350 | */ |
| 351 | _leave(" = 0 [no root]"); |
| 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | cp = strchr(rootcell, ':'); |
| 356 | if (!cp) { |
| 357 | _debug("kAFS: no VL server IP addresses specified"); |
| 358 | vllist = NULL; |
| 359 | len = strlen(rootcell); |
| 360 | } else { |
| 361 | vllist = cp + 1; |
| 362 | len = cp - rootcell; |
| 363 | } |
| 364 | |
| 365 | /* allocate a cell record for the root cell */ |
| 366 | new_root = afs_lookup_cell(net, rootcell, len, vllist, false); |
| 367 | if (IS_ERR(new_root)) { |
| 368 | _leave(" = %ld", PTR_ERR(new_root)); |
| 369 | return PTR_ERR(new_root); |
| 370 | } |
| 371 | |
| 372 | if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags)) |
| 373 | afs_get_cell(new_root); |
| 374 | |
| 375 | /* install the new cell */ |
| 376 | write_seqlock(&net->cells_lock); |
| 377 | old_root = rcu_access_pointer(net->ws_cell); |
| 378 | rcu_assign_pointer(net->ws_cell, new_root); |
| 379 | write_sequnlock(&net->cells_lock); |
| 380 | |
| 381 | afs_put_cell(net, old_root); |
| 382 | _leave(" = 0"); |
| 383 | return 0; |
| 384 | } |
| 385 | |
| 386 | /* |
| 387 | * Update a cell's VL server address list from the DNS. |
| 388 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | static int afs_update_cell(struct afs_cell *cell) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 390 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 391 | struct afs_vlserver_list *vllist, *old = NULL, *p; |
| 392 | unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl); |
| 393 | unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl); |
| 394 | time64_t now, expiry = 0; |
| 395 | int ret = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 396 | |
| 397 | _enter("%s", cell->name); |
| 398 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 399 | vllist = afs_dns_query(cell, &expiry); |
| 400 | if (IS_ERR(vllist)) { |
| 401 | ret = PTR_ERR(vllist); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 402 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 403 | _debug("%s: fail %d", cell->name, ret); |
| 404 | if (ret == -ENOMEM) |
| 405 | goto out_wake; |
| 406 | |
| 407 | ret = -ENOMEM; |
| 408 | vllist = afs_alloc_vlserver_list(0); |
| 409 | if (!vllist) |
| 410 | goto out_wake; |
| 411 | |
| 412 | switch (ret) { |
| 413 | case -ENODATA: |
| 414 | case -EDESTADDRREQ: |
| 415 | vllist->status = DNS_LOOKUP_GOT_NOT_FOUND; |
| 416 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 417 | case -EAGAIN: |
| 418 | case -ECONNREFUSED: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 419 | vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE; |
| 420 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 421 | default: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 422 | vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 423 | break; |
| 424 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | } |
| 426 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 427 | _debug("%s: got list %d %d", cell->name, vllist->source, vllist->status); |
| 428 | cell->dns_status = vllist->status; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 429 | |
| 430 | now = ktime_get_real_seconds(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 431 | if (min_ttl > max_ttl) |
| 432 | max_ttl = min_ttl; |
| 433 | if (expiry < now + min_ttl) |
| 434 | expiry = now + min_ttl; |
| 435 | else if (expiry > now + max_ttl) |
| 436 | expiry = now + max_ttl; |
| 437 | |
| 438 | _debug("%s: status %d", cell->name, vllist->status); |
| 439 | if (vllist->source == DNS_RECORD_UNAVAILABLE) { |
| 440 | switch (vllist->status) { |
| 441 | case DNS_LOOKUP_GOT_NOT_FOUND: |
| 442 | /* The DNS said that the cell does not exist or there |
| 443 | * weren't any addresses to be had. |
| 444 | */ |
| 445 | cell->dns_expiry = expiry; |
| 446 | break; |
| 447 | |
| 448 | case DNS_LOOKUP_BAD: |
| 449 | case DNS_LOOKUP_GOT_LOCAL_FAILURE: |
| 450 | case DNS_LOOKUP_GOT_TEMP_FAILURE: |
| 451 | case DNS_LOOKUP_GOT_NS_FAILURE: |
| 452 | default: |
| 453 | cell->dns_expiry = now + 10; |
| 454 | break; |
| 455 | } |
| 456 | } else { |
| 457 | cell->dns_expiry = expiry; |
| 458 | } |
| 459 | |
| 460 | /* Replace the VL server list if the new record has servers or the old |
| 461 | * record doesn't. |
| 462 | */ |
| 463 | write_lock(&cell->vl_servers_lock); |
| 464 | p = rcu_dereference_protected(cell->vl_servers, true); |
| 465 | if (vllist->nr_servers > 0 || p->nr_servers == 0) { |
| 466 | rcu_assign_pointer(cell->vl_servers, vllist); |
| 467 | cell->dns_source = vllist->source; |
| 468 | old = p; |
| 469 | } |
| 470 | write_unlock(&cell->vl_servers_lock); |
| 471 | afs_put_vlserverlist(cell->net, old); |
| 472 | |
| 473 | out_wake: |
| 474 | smp_store_release(&cell->dns_lookup_count, |
| 475 | cell->dns_lookup_count + 1); /* vs source/status */ |
| 476 | wake_up_var(&cell->dns_lookup_count); |
| 477 | _leave(" = %d", ret); |
| 478 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | /* |
| 482 | * Destroy a cell record |
| 483 | */ |
| 484 | static void afs_cell_destroy(struct rcu_head *rcu) |
| 485 | { |
| 486 | struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu); |
| 487 | |
| 488 | _enter("%p{%s}", cell, cell->name); |
| 489 | |
| 490 | ASSERTCMP(atomic_read(&cell->usage), ==, 0); |
| 491 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 492 | afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 493 | key_put(cell->anonymous_key); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 494 | kfree(cell->name); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 495 | kfree(cell); |
| 496 | |
| 497 | _leave(" [destroyed]"); |
| 498 | } |
| 499 | |
| 500 | /* |
| 501 | * Queue the cell manager. |
| 502 | */ |
| 503 | static void afs_queue_cell_manager(struct afs_net *net) |
| 504 | { |
| 505 | int outstanding = atomic_inc_return(&net->cells_outstanding); |
| 506 | |
| 507 | _enter("%d", outstanding); |
| 508 | |
| 509 | if (!queue_work(afs_wq, &net->cells_manager)) |
| 510 | afs_dec_cells_outstanding(net); |
| 511 | } |
| 512 | |
| 513 | /* |
| 514 | * Cell management timer. We have an increment on cells_outstanding that we |
| 515 | * need to pass along to the work item. |
| 516 | */ |
| 517 | void afs_cells_timer(struct timer_list *timer) |
| 518 | { |
| 519 | struct afs_net *net = container_of(timer, struct afs_net, cells_timer); |
| 520 | |
| 521 | _enter(""); |
| 522 | if (!queue_work(afs_wq, &net->cells_manager)) |
| 523 | afs_dec_cells_outstanding(net); |
| 524 | } |
| 525 | |
| 526 | /* |
| 527 | * Get a reference on a cell record. |
| 528 | */ |
| 529 | struct afs_cell *afs_get_cell(struct afs_cell *cell) |
| 530 | { |
| 531 | atomic_inc(&cell->usage); |
| 532 | return cell; |
| 533 | } |
| 534 | |
| 535 | /* |
| 536 | * Drop a reference on a cell record. |
| 537 | */ |
| 538 | void afs_put_cell(struct afs_net *net, struct afs_cell *cell) |
| 539 | { |
| 540 | time64_t now, expire_delay; |
| 541 | |
| 542 | if (!cell) |
| 543 | return; |
| 544 | |
| 545 | _enter("%s", cell->name); |
| 546 | |
| 547 | now = ktime_get_real_seconds(); |
| 548 | cell->last_inactive = now; |
| 549 | expire_delay = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 550 | if (cell->vl_servers->nr_servers) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 551 | expire_delay = afs_cell_gc_delay; |
| 552 | |
| 553 | if (atomic_dec_return(&cell->usage) > 1) |
| 554 | return; |
| 555 | |
| 556 | /* 'cell' may now be garbage collected. */ |
| 557 | afs_set_cell_timer(net, expire_delay); |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * Allocate a key to use as a placeholder for anonymous user security. |
| 562 | */ |
| 563 | static int afs_alloc_anon_key(struct afs_cell *cell) |
| 564 | { |
| 565 | struct key *key; |
| 566 | char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp; |
| 567 | |
| 568 | /* Create a key to represent an anonymous user. */ |
| 569 | memcpy(keyname, "afs@", 4); |
| 570 | dp = keyname + 4; |
| 571 | cp = cell->name; |
| 572 | do { |
| 573 | *dp++ = tolower(*cp); |
| 574 | } while (*cp++); |
| 575 | |
| 576 | key = rxrpc_get_null_key(keyname); |
| 577 | if (IS_ERR(key)) |
| 578 | return PTR_ERR(key); |
| 579 | |
| 580 | cell->anonymous_key = key; |
| 581 | |
| 582 | _debug("anon key %p{%x}", |
| 583 | cell->anonymous_key, key_serial(cell->anonymous_key)); |
| 584 | return 0; |
| 585 | } |
| 586 | |
| 587 | /* |
| 588 | * Activate a cell. |
| 589 | */ |
| 590 | static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell) |
| 591 | { |
| 592 | struct hlist_node **p; |
| 593 | struct afs_cell *pcell; |
| 594 | int ret; |
| 595 | |
| 596 | if (!cell->anonymous_key) { |
| 597 | ret = afs_alloc_anon_key(cell); |
| 598 | if (ret < 0) |
| 599 | return ret; |
| 600 | } |
| 601 | |
| 602 | #ifdef CONFIG_AFS_FSCACHE |
| 603 | cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index, |
| 604 | &afs_cell_cache_index_def, |
| 605 | cell->name, strlen(cell->name), |
| 606 | NULL, 0, |
| 607 | cell, 0, true); |
| 608 | #endif |
| 609 | ret = afs_proc_cell_setup(cell); |
| 610 | if (ret < 0) |
| 611 | return ret; |
| 612 | |
| 613 | mutex_lock(&net->proc_cells_lock); |
| 614 | for (p = &net->proc_cells.first; *p; p = &(*p)->next) { |
| 615 | pcell = hlist_entry(*p, struct afs_cell, proc_link); |
| 616 | if (strcmp(cell->name, pcell->name) < 0) |
| 617 | break; |
| 618 | } |
| 619 | |
| 620 | cell->proc_link.pprev = p; |
| 621 | cell->proc_link.next = *p; |
| 622 | rcu_assign_pointer(*p, &cell->proc_link.next); |
| 623 | if (cell->proc_link.next) |
| 624 | cell->proc_link.next->pprev = &cell->proc_link.next; |
| 625 | |
| 626 | afs_dynroot_mkdir(net, cell); |
| 627 | mutex_unlock(&net->proc_cells_lock); |
| 628 | return 0; |
| 629 | } |
| 630 | |
| 631 | /* |
| 632 | * Deactivate a cell. |
| 633 | */ |
| 634 | static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) |
| 635 | { |
| 636 | _enter("%s", cell->name); |
| 637 | |
| 638 | afs_proc_cell_remove(cell); |
| 639 | |
| 640 | mutex_lock(&net->proc_cells_lock); |
| 641 | hlist_del_rcu(&cell->proc_link); |
| 642 | afs_dynroot_rmdir(net, cell); |
| 643 | mutex_unlock(&net->proc_cells_lock); |
| 644 | |
| 645 | #ifdef CONFIG_AFS_FSCACHE |
| 646 | fscache_relinquish_cookie(cell->cache, NULL, false); |
| 647 | cell->cache = NULL; |
| 648 | #endif |
| 649 | |
| 650 | _leave(""); |
| 651 | } |
| 652 | |
| 653 | /* |
| 654 | * Manage a cell record, initialising and destroying it, maintaining its DNS |
| 655 | * records. |
| 656 | */ |
| 657 | static void afs_manage_cell(struct work_struct *work) |
| 658 | { |
| 659 | struct afs_cell *cell = container_of(work, struct afs_cell, manager); |
| 660 | struct afs_net *net = cell->net; |
| 661 | bool deleted; |
| 662 | int ret, usage; |
| 663 | |
| 664 | _enter("%s", cell->name); |
| 665 | |
| 666 | again: |
| 667 | _debug("state %u", cell->state); |
| 668 | switch (cell->state) { |
| 669 | case AFS_CELL_INACTIVE: |
| 670 | case AFS_CELL_FAILED: |
| 671 | write_seqlock(&net->cells_lock); |
| 672 | usage = 1; |
| 673 | deleted = atomic_try_cmpxchg_relaxed(&cell->usage, &usage, 0); |
| 674 | if (deleted) |
| 675 | rb_erase(&cell->net_node, &net->cells); |
| 676 | write_sequnlock(&net->cells_lock); |
| 677 | if (deleted) |
| 678 | goto final_destruction; |
| 679 | if (cell->state == AFS_CELL_FAILED) |
| 680 | goto done; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 681 | smp_store_release(&cell->state, AFS_CELL_UNSET); |
| 682 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 683 | goto again; |
| 684 | |
| 685 | case AFS_CELL_UNSET: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 686 | smp_store_release(&cell->state, AFS_CELL_ACTIVATING); |
| 687 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 688 | goto again; |
| 689 | |
| 690 | case AFS_CELL_ACTIVATING: |
| 691 | ret = afs_activate_cell(net, cell); |
| 692 | if (ret < 0) |
| 693 | goto activation_failed; |
| 694 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 695 | smp_store_release(&cell->state, AFS_CELL_ACTIVE); |
| 696 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 697 | goto again; |
| 698 | |
| 699 | case AFS_CELL_ACTIVE: |
| 700 | if (atomic_read(&cell->usage) > 1) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 701 | if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) { |
| 702 | ret = afs_update_cell(cell); |
| 703 | if (ret < 0) |
| 704 | cell->error = ret; |
| 705 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 706 | goto done; |
| 707 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 708 | smp_store_release(&cell->state, AFS_CELL_DEACTIVATING); |
| 709 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 710 | goto again; |
| 711 | |
| 712 | case AFS_CELL_DEACTIVATING: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 713 | if (atomic_read(&cell->usage) > 1) |
| 714 | goto reverse_deactivation; |
| 715 | afs_deactivate_cell(net, cell); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 716 | smp_store_release(&cell->state, AFS_CELL_INACTIVE); |
| 717 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 718 | goto again; |
| 719 | |
| 720 | default: |
| 721 | break; |
| 722 | } |
| 723 | _debug("bad state %u", cell->state); |
| 724 | BUG(); /* Unhandled state */ |
| 725 | |
| 726 | activation_failed: |
| 727 | cell->error = ret; |
| 728 | afs_deactivate_cell(net, cell); |
| 729 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 730 | smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */ |
| 731 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 732 | goto again; |
| 733 | |
| 734 | reverse_deactivation: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 735 | smp_store_release(&cell->state, AFS_CELL_ACTIVE); |
| 736 | wake_up_var(&cell->state); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 737 | _leave(" [deact->act]"); |
| 738 | return; |
| 739 | |
| 740 | done: |
| 741 | _leave(" [done %u]", cell->state); |
| 742 | return; |
| 743 | |
| 744 | final_destruction: |
| 745 | call_rcu(&cell->rcu, afs_cell_destroy); |
| 746 | afs_dec_cells_outstanding(net); |
| 747 | _leave(" [destruct %d]", atomic_read(&net->cells_outstanding)); |
| 748 | } |
| 749 | |
| 750 | /* |
| 751 | * Manage the records of cells known to a network namespace. This includes |
| 752 | * updating the DNS records and garbage collecting unused cells that were |
| 753 | * automatically added. |
| 754 | * |
| 755 | * Note that constructed cell records may only be removed from net->cells by |
| 756 | * this work item, so it is safe for this work item to stash a cursor pointing |
| 757 | * into the tree and then return to caller (provided it skips cells that are |
| 758 | * still under construction). |
| 759 | * |
| 760 | * Note also that we were given an increment on net->cells_outstanding by |
| 761 | * whoever queued us that we need to deal with before returning. |
| 762 | */ |
| 763 | void afs_manage_cells(struct work_struct *work) |
| 764 | { |
| 765 | struct afs_net *net = container_of(work, struct afs_net, cells_manager); |
| 766 | struct rb_node *cursor; |
| 767 | time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX; |
| 768 | bool purging = !net->live; |
| 769 | |
| 770 | _enter(""); |
| 771 | |
| 772 | /* Trawl the cell database looking for cells that have expired from |
| 773 | * lack of use and cells whose DNS results have expired and dispatch |
| 774 | * their managers. |
| 775 | */ |
| 776 | read_seqlock_excl(&net->cells_lock); |
| 777 | |
| 778 | for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { |
| 779 | struct afs_cell *cell = |
| 780 | rb_entry(cursor, struct afs_cell, net_node); |
| 781 | unsigned usage; |
| 782 | bool sched_cell = false; |
| 783 | |
| 784 | usage = atomic_read(&cell->usage); |
| 785 | _debug("manage %s %u", cell->name, usage); |
| 786 | |
| 787 | ASSERTCMP(usage, >=, 1); |
| 788 | |
| 789 | if (purging) { |
| 790 | if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) |
| 791 | usage = atomic_dec_return(&cell->usage); |
| 792 | ASSERTCMP(usage, ==, 1); |
| 793 | } |
| 794 | |
| 795 | if (usage == 1) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 796 | struct afs_vlserver_list *vllist; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 797 | time64_t expire_at = cell->last_inactive; |
| 798 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 799 | read_lock(&cell->vl_servers_lock); |
| 800 | vllist = rcu_dereference_protected( |
| 801 | cell->vl_servers, |
| 802 | lockdep_is_held(&cell->vl_servers_lock)); |
| 803 | if (vllist->nr_servers > 0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 804 | expire_at += afs_cell_gc_delay; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 805 | read_unlock(&cell->vl_servers_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 806 | if (purging || expire_at <= now) |
| 807 | sched_cell = true; |
| 808 | else if (expire_at < next_manage) |
| 809 | next_manage = expire_at; |
| 810 | } |
| 811 | |
| 812 | if (!purging) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 813 | if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 814 | sched_cell = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 815 | } |
| 816 | |
| 817 | if (sched_cell) |
| 818 | queue_work(afs_wq, &cell->manager); |
| 819 | } |
| 820 | |
| 821 | read_sequnlock_excl(&net->cells_lock); |
| 822 | |
| 823 | /* Update the timer on the way out. We have to pass an increment on |
| 824 | * cells_outstanding in the namespace that we are in to the timer or |
| 825 | * the work scheduler. |
| 826 | */ |
| 827 | if (!purging && next_manage < TIME64_MAX) { |
| 828 | now = ktime_get_real_seconds(); |
| 829 | |
| 830 | if (next_manage - now <= 0) { |
| 831 | if (queue_work(afs_wq, &net->cells_manager)) |
| 832 | atomic_inc(&net->cells_outstanding); |
| 833 | } else { |
| 834 | afs_set_cell_timer(net, next_manage - now); |
| 835 | } |
| 836 | } |
| 837 | |
| 838 | afs_dec_cells_outstanding(net); |
| 839 | _leave(" [%d]", atomic_read(&net->cells_outstanding)); |
| 840 | } |
| 841 | |
| 842 | /* |
| 843 | * Purge in-memory cell database. |
| 844 | */ |
| 845 | void afs_cell_purge(struct afs_net *net) |
| 846 | { |
| 847 | struct afs_cell *ws; |
| 848 | |
| 849 | _enter(""); |
| 850 | |
| 851 | write_seqlock(&net->cells_lock); |
| 852 | ws = rcu_access_pointer(net->ws_cell); |
| 853 | RCU_INIT_POINTER(net->ws_cell, NULL); |
| 854 | write_sequnlock(&net->cells_lock); |
| 855 | afs_put_cell(net, ws); |
| 856 | |
| 857 | _debug("del timer"); |
| 858 | if (del_timer_sync(&net->cells_timer)) |
| 859 | atomic_dec(&net->cells_outstanding); |
| 860 | |
| 861 | _debug("kick mgr"); |
| 862 | afs_queue_cell_manager(net); |
| 863 | |
| 864 | _debug("wait"); |
| 865 | wait_var_event(&net->cells_outstanding, |
| 866 | !atomic_read(&net->cells_outstanding)); |
| 867 | _leave(""); |
| 868 | } |