David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * include/linux/sunrpc/cache.h |
| 4 | * |
| 5 | * Generic code for various authentication-related caches |
| 6 | * used by sunrpc clients and servers. |
| 7 | * |
| 8 | * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #ifndef _LINUX_SUNRPC_CACHE_H_ |
| 12 | #define _LINUX_SUNRPC_CACHE_H_ |
| 13 | |
| 14 | #include <linux/kref.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/atomic.h> |
| 17 | #include <linux/proc_fs.h> |
| 18 | |
| 19 | /* |
| 20 | * Each cache requires: |
| 21 | * - A 'struct cache_detail' which contains information specific to the cache |
| 22 | * for common code to use. |
| 23 | * - An item structure that must contain a "struct cache_head" |
| 24 | * - A lookup function defined using DefineCacheLookup |
| 25 | * - A 'put' function that can release a cache item. It will only |
| 26 | * be called after cache_put has succeed, so there are guarantee |
| 27 | * to be no references. |
| 28 | * - A function to calculate a hash of an item's key. |
| 29 | * |
| 30 | * as well as assorted code fragments (e.g. compare keys) and numbers |
| 31 | * (e.g. hash size, goal_age, etc). |
| 32 | * |
| 33 | * Each cache must be registered so that it can be cleaned regularly. |
| 34 | * When the cache is unregistered, it is flushed completely. |
| 35 | * |
| 36 | * Entries have a ref count and a 'hashed' flag which counts the existence |
| 37 | * in the hash table. |
| 38 | * We only expire entries when refcount is zero. |
| 39 | * Existence in the cache is counted the refcount. |
| 40 | */ |
| 41 | |
| 42 | /* Every cache item has a common header that is used |
| 43 | * for expiring and refreshing entries. |
| 44 | * |
| 45 | */ |
| 46 | struct cache_head { |
| 47 | struct hlist_node cache_list; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 48 | time64_t expiry_time; /* After time expiry_time, don't use |
| 49 | * the data */ |
| 50 | time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | * sent, else this is when update was |
| 52 | * received, though it is alway set to |
| 53 | * be *after* ->flush_time. |
| 54 | */ |
| 55 | struct kref ref; |
| 56 | unsigned long flags; |
| 57 | }; |
| 58 | #define CACHE_VALID 0 /* Entry contains valid data */ |
| 59 | #define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */ |
| 60 | #define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/ |
| 61 | #define CACHE_CLEANED 3 /* Entry has been cleaned from cache */ |
| 62 | |
| 63 | #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ |
| 64 | |
| 65 | struct cache_detail { |
| 66 | struct module * owner; |
| 67 | int hash_size; |
| 68 | struct hlist_head * hash_table; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 69 | spinlock_t hash_lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | |
| 71 | char *name; |
| 72 | void (*cache_put)(struct kref *); |
| 73 | |
| 74 | int (*cache_upcall)(struct cache_detail *, |
| 75 | struct cache_head *); |
| 76 | |
| 77 | void (*cache_request)(struct cache_detail *cd, |
| 78 | struct cache_head *ch, |
| 79 | char **bpp, int *blen); |
| 80 | |
| 81 | int (*cache_parse)(struct cache_detail *, |
| 82 | char *buf, int len); |
| 83 | |
| 84 | int (*cache_show)(struct seq_file *m, |
| 85 | struct cache_detail *cd, |
| 86 | struct cache_head *h); |
| 87 | void (*warn_no_listener)(struct cache_detail *cd, |
| 88 | int has_died); |
| 89 | |
| 90 | struct cache_head * (*alloc)(void); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 91 | void (*flush)(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 92 | int (*match)(struct cache_head *orig, struct cache_head *new); |
| 93 | void (*init)(struct cache_head *orig, struct cache_head *new); |
| 94 | void (*update)(struct cache_head *orig, struct cache_head *new); |
| 95 | |
| 96 | /* fields below this comment are for internal use |
| 97 | * and should not be touched by cache owners |
| 98 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 99 | time64_t flush_time; /* flush all cache items with |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | * last_refresh at or earlier |
| 101 | * than this. last_refresh |
| 102 | * is never set at or earlier |
| 103 | * than this. |
| 104 | */ |
| 105 | struct list_head others; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 106 | time64_t nextcheck; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | int entries; |
| 108 | |
| 109 | /* fields for communication over channel */ |
| 110 | struct list_head queue; |
| 111 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 112 | atomic_t writers; /* how many time is /channel open */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 113 | time64_t last_close; /* if no writers, when did last close */ |
| 114 | time64_t last_warn; /* when we last warned about no writers */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | |
| 116 | union { |
| 117 | struct proc_dir_entry *procfs; |
| 118 | struct dentry *pipefs; |
| 119 | }; |
| 120 | struct net *net; |
| 121 | }; |
| 122 | |
| 123 | |
| 124 | /* this must be embedded in any request structure that |
| 125 | * identifies an object that will want a callback on |
| 126 | * a cache fill |
| 127 | */ |
| 128 | struct cache_req { |
| 129 | struct cache_deferred_req *(*defer)(struct cache_req *req); |
| 130 | int thread_wait; /* How long (jiffies) we can block the |
| 131 | * current thread to wait for updates. |
| 132 | */ |
| 133 | }; |
| 134 | /* this must be embedded in a deferred_request that is being |
| 135 | * delayed awaiting cache-fill |
| 136 | */ |
| 137 | struct cache_deferred_req { |
| 138 | struct hlist_node hash; /* on hash chain */ |
| 139 | struct list_head recent; /* on fifo */ |
| 140 | struct cache_head *item; /* cache item we wait on */ |
| 141 | void *owner; /* we might need to discard all defered requests |
| 142 | * owned by someone */ |
| 143 | void (*revisit)(struct cache_deferred_req *req, |
| 144 | int too_many); |
| 145 | }; |
| 146 | |
| 147 | /* |
| 148 | * timestamps kept in the cache are expressed in seconds |
| 149 | * since boot. This is the best for measuring differences in |
| 150 | * real time. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 151 | * This reimplemnts ktime_get_boottime_seconds() in a slightly |
| 152 | * faster but less accurate way. When we end up converting |
| 153 | * back to wallclock (CLOCK_REALTIME), that error often |
| 154 | * cancels out during the reverse operation. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 156 | static inline time64_t seconds_since_boot(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 157 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 158 | struct timespec64 boot; |
| 159 | getboottime64(&boot); |
| 160 | return ktime_get_real_seconds() - boot.tv_sec; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 161 | } |
| 162 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 163 | static inline time64_t convert_to_wallclock(time64_t sinceboot) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 165 | struct timespec64 boot; |
| 166 | getboottime64(&boot); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 167 | return boot.tv_sec + sinceboot; |
| 168 | } |
| 169 | |
| 170 | extern const struct file_operations cache_file_operations_pipefs; |
| 171 | extern const struct file_operations content_file_operations_pipefs; |
| 172 | extern const struct file_operations cache_flush_operations_pipefs; |
| 173 | |
| 174 | extern struct cache_head * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 175 | sunrpc_cache_lookup_rcu(struct cache_detail *detail, |
| 176 | struct cache_head *key, int hash); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 177 | extern struct cache_head * |
| 178 | sunrpc_cache_update(struct cache_detail *detail, |
| 179 | struct cache_head *new, struct cache_head *old, int hash); |
| 180 | |
| 181 | extern int |
| 182 | sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 183 | extern int |
| 184 | sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, |
| 185 | struct cache_head *h); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 186 | |
| 187 | |
| 188 | extern void cache_clean_deferred(void *owner); |
| 189 | |
| 190 | static inline struct cache_head *cache_get(struct cache_head *h) |
| 191 | { |
| 192 | kref_get(&h->ref); |
| 193 | return h; |
| 194 | } |
| 195 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 196 | static inline struct cache_head *cache_get_rcu(struct cache_head *h) |
| 197 | { |
| 198 | if (kref_get_unless_zero(&h->ref)) |
| 199 | return h; |
| 200 | return NULL; |
| 201 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 202 | |
| 203 | static inline void cache_put(struct cache_head *h, struct cache_detail *cd) |
| 204 | { |
| 205 | if (kref_read(&h->ref) <= 2 && |
| 206 | h->expiry_time < cd->nextcheck) |
| 207 | cd->nextcheck = h->expiry_time; |
| 208 | kref_put(&h->ref, cd->cache_put); |
| 209 | } |
| 210 | |
| 211 | static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h) |
| 212 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 213 | if (h->expiry_time < seconds_since_boot()) |
| 214 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | if (!test_bit(CACHE_VALID, &h->flags)) |
| 216 | return false; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | return detail->flush_time >= h->last_refresh; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | extern int cache_check(struct cache_detail *detail, |
| 221 | struct cache_head *h, struct cache_req *rqstp); |
| 222 | extern void cache_flush(void); |
| 223 | extern void cache_purge(struct cache_detail *detail); |
| 224 | #define NEVER (0x7FFFFFFF) |
| 225 | extern void __init cache_initialize(void); |
| 226 | extern int cache_register_net(struct cache_detail *cd, struct net *net); |
| 227 | extern void cache_unregister_net(struct cache_detail *cd, struct net *net); |
| 228 | |
| 229 | extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net); |
| 230 | extern void cache_destroy_net(struct cache_detail *cd, struct net *net); |
| 231 | |
| 232 | extern void sunrpc_init_cache_detail(struct cache_detail *cd); |
| 233 | extern void sunrpc_destroy_cache_detail(struct cache_detail *cd); |
| 234 | extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, |
| 235 | umode_t, struct cache_detail *); |
| 236 | extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); |
| 237 | extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *); |
| 238 | |
| 239 | /* Must store cache_detail in seq_file->private if using next three functions */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 240 | extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos); |
| 241 | extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos); |
| 242 | extern void cache_seq_stop_rcu(struct seq_file *file, void *p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 243 | |
| 244 | extern void qword_add(char **bpp, int *lp, char *str); |
| 245 | extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); |
| 246 | extern int qword_get(char **bpp, char *dest, int bufsize); |
| 247 | |
| 248 | static inline int get_int(char **bpp, int *anint) |
| 249 | { |
| 250 | char buf[50]; |
| 251 | char *ep; |
| 252 | int rv; |
| 253 | int len = qword_get(bpp, buf, sizeof(buf)); |
| 254 | |
| 255 | if (len < 0) |
| 256 | return -EINVAL; |
| 257 | if (len == 0) |
| 258 | return -ENOENT; |
| 259 | |
| 260 | rv = simple_strtol(buf, &ep, 0); |
| 261 | if (*ep) |
| 262 | return -EINVAL; |
| 263 | |
| 264 | *anint = rv; |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | static inline int get_uint(char **bpp, unsigned int *anint) |
| 269 | { |
| 270 | char buf[50]; |
| 271 | int len = qword_get(bpp, buf, sizeof(buf)); |
| 272 | |
| 273 | if (len < 0) |
| 274 | return -EINVAL; |
| 275 | if (len == 0) |
| 276 | return -ENOENT; |
| 277 | |
| 278 | if (kstrtouint(buf, 0, anint)) |
| 279 | return -EINVAL; |
| 280 | |
| 281 | return 0; |
| 282 | } |
| 283 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 284 | static inline int get_time(char **bpp, time64_t *time) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 285 | { |
| 286 | char buf[50]; |
| 287 | long long ll; |
| 288 | int len = qword_get(bpp, buf, sizeof(buf)); |
| 289 | |
| 290 | if (len < 0) |
| 291 | return -EINVAL; |
| 292 | if (len == 0) |
| 293 | return -ENOENT; |
| 294 | |
| 295 | if (kstrtoll(buf, 0, &ll)) |
| 296 | return -EINVAL; |
| 297 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 298 | *time = ll; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | return 0; |
| 300 | } |
| 301 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 302 | static inline time64_t get_expiry(char **bpp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 303 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 304 | time64_t rv; |
| 305 | struct timespec64 boot; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 306 | |
| 307 | if (get_time(bpp, &rv)) |
| 308 | return 0; |
| 309 | if (rv < 0) |
| 310 | return 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 311 | getboottime64(&boot); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 312 | return rv - boot.tv_sec; |
| 313 | } |
| 314 | |
| 315 | #endif /* _LINUX_SUNRPC_CACHE_H_ */ |