Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/fs/proc/inode.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | */ |
| 7 | |
| 8 | #include <linux/cache.h> |
| 9 | #include <linux/time.h> |
| 10 | #include <linux/proc_fs.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/pid_namespace.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/stat.h> |
| 16 | #include <linux/completion.h> |
| 17 | #include <linux/poll.h> |
| 18 | #include <linux/printk.h> |
| 19 | #include <linux/file.h> |
| 20 | #include <linux/limits.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/sysctl.h> |
| 24 | #include <linux/seq_file.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/mount.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | |
| 28 | #include <linux/uaccess.h> |
| 29 | |
| 30 | #include "internal.h" |
| 31 | |
| 32 | static void proc_evict_inode(struct inode *inode) |
| 33 | { |
| 34 | struct proc_dir_entry *de; |
| 35 | struct ctl_table_header *head; |
| 36 | |
| 37 | truncate_inode_pages_final(&inode->i_data); |
| 38 | clear_inode(inode); |
| 39 | |
| 40 | /* Stop tracking associated processes */ |
| 41 | put_pid(PROC_I(inode)->pid); |
| 42 | |
| 43 | /* Let go of any associated proc directory entry */ |
| 44 | de = PDE(inode); |
| 45 | if (de) |
| 46 | pde_put(de); |
| 47 | |
| 48 | head = PROC_I(inode)->sysctl; |
| 49 | if (head) { |
| 50 | RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL); |
| 51 | proc_sys_evict_inode(inode, head); |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | static struct kmem_cache *proc_inode_cachep __ro_after_init; |
| 56 | static struct kmem_cache *pde_opener_cache __ro_after_init; |
| 57 | |
| 58 | static struct inode *proc_alloc_inode(struct super_block *sb) |
| 59 | { |
| 60 | struct proc_inode *ei; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | |
| 62 | ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); |
| 63 | if (!ei) |
| 64 | return NULL; |
| 65 | ei->pid = NULL; |
| 66 | ei->fd = 0; |
| 67 | ei->op.proc_get_link = NULL; |
| 68 | ei->pde = NULL; |
| 69 | ei->sysctl = NULL; |
| 70 | ei->sysctl_entry = NULL; |
| 71 | ei->ns_ops = NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 72 | return &ei->vfs_inode; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | } |
| 74 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 75 | static void proc_free_inode(struct inode *inode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 77 | kmem_cache_free(proc_inode_cachep, PROC_I(inode)); |
| 78 | } |
| 79 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | static void init_once(void *foo) |
| 81 | { |
| 82 | struct proc_inode *ei = (struct proc_inode *) foo; |
| 83 | |
| 84 | inode_init_once(&ei->vfs_inode); |
| 85 | } |
| 86 | |
| 87 | void __init proc_init_kmemcache(void) |
| 88 | { |
| 89 | proc_inode_cachep = kmem_cache_create("proc_inode_cache", |
| 90 | sizeof(struct proc_inode), |
| 91 | 0, (SLAB_RECLAIM_ACCOUNT| |
| 92 | SLAB_MEM_SPREAD|SLAB_ACCOUNT| |
| 93 | SLAB_PANIC), |
| 94 | init_once); |
| 95 | pde_opener_cache = |
| 96 | kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0, |
| 97 | SLAB_ACCOUNT|SLAB_PANIC, NULL); |
| 98 | proc_dir_entry_cache = kmem_cache_create_usercopy( |
| 99 | "proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC, |
| 100 | offsetof(struct proc_dir_entry, inline_name), |
| 101 | SIZEOF_PDE_INLINE_NAME, NULL); |
| 102 | BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE); |
| 103 | } |
| 104 | |
| 105 | static int proc_show_options(struct seq_file *seq, struct dentry *root) |
| 106 | { |
| 107 | struct super_block *sb = root->d_sb; |
| 108 | struct pid_namespace *pid = sb->s_fs_info; |
| 109 | |
| 110 | if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID)) |
| 111 | seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid)); |
| 112 | if (pid->hide_pid != HIDEPID_OFF) |
| 113 | seq_printf(seq, ",hidepid=%u", pid->hide_pid); |
| 114 | |
| 115 | return 0; |
| 116 | } |
| 117 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | const struct super_operations proc_sops = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | .alloc_inode = proc_alloc_inode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 120 | .free_inode = proc_free_inode, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | .drop_inode = generic_delete_inode, |
| 122 | .evict_inode = proc_evict_inode, |
| 123 | .statfs = simple_statfs, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 124 | .show_options = proc_show_options, |
| 125 | }; |
| 126 | |
| 127 | enum {BIAS = -1U<<31}; |
| 128 | |
| 129 | static inline int use_pde(struct proc_dir_entry *pde) |
| 130 | { |
| 131 | return likely(atomic_inc_unless_negative(&pde->in_use)); |
| 132 | } |
| 133 | |
| 134 | static void unuse_pde(struct proc_dir_entry *pde) |
| 135 | { |
| 136 | if (unlikely(atomic_dec_return(&pde->in_use) == BIAS)) |
| 137 | complete(pde->pde_unload_completion); |
| 138 | } |
| 139 | |
| 140 | /* pde is locked on entry, unlocked on exit */ |
| 141 | static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo) |
| 142 | { |
| 143 | /* |
| 144 | * close() (proc_reg_release()) can't delete an entry and proceed: |
| 145 | * ->release hook needs to be available at the right moment. |
| 146 | * |
| 147 | * rmmod (remove_proc_entry() et al) can't delete an entry and proceed: |
| 148 | * "struct file" needs to be available at the right moment. |
| 149 | * |
| 150 | * Therefore, first process to enter this function does ->release() and |
| 151 | * signals its completion to the other process which does nothing. |
| 152 | */ |
| 153 | if (pdeo->closing) { |
| 154 | /* somebody else is doing that, just wait */ |
| 155 | DECLARE_COMPLETION_ONSTACK(c); |
| 156 | pdeo->c = &c; |
| 157 | spin_unlock(&pde->pde_unload_lock); |
| 158 | wait_for_completion(&c); |
| 159 | } else { |
| 160 | struct file *file; |
| 161 | struct completion *c; |
| 162 | |
| 163 | pdeo->closing = true; |
| 164 | spin_unlock(&pde->pde_unload_lock); |
| 165 | file = pdeo->file; |
| 166 | pde->proc_fops->release(file_inode(file), file); |
| 167 | spin_lock(&pde->pde_unload_lock); |
| 168 | /* After ->release. */ |
| 169 | list_del(&pdeo->lh); |
| 170 | c = pdeo->c; |
| 171 | spin_unlock(&pde->pde_unload_lock); |
| 172 | if (unlikely(c)) |
| 173 | complete(c); |
| 174 | kmem_cache_free(pde_opener_cache, pdeo); |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | void proc_entry_rundown(struct proc_dir_entry *de) |
| 179 | { |
| 180 | DECLARE_COMPLETION_ONSTACK(c); |
| 181 | /* Wait until all existing callers into module are done. */ |
| 182 | de->pde_unload_completion = &c; |
| 183 | if (atomic_add_return(BIAS, &de->in_use) != BIAS) |
| 184 | wait_for_completion(&c); |
| 185 | |
| 186 | /* ->pde_openers list can't grow from now on. */ |
| 187 | |
| 188 | spin_lock(&de->pde_unload_lock); |
| 189 | while (!list_empty(&de->pde_openers)) { |
| 190 | struct pde_opener *pdeo; |
| 191 | pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); |
| 192 | close_pdeo(de, pdeo); |
| 193 | spin_lock(&de->pde_unload_lock); |
| 194 | } |
| 195 | spin_unlock(&de->pde_unload_lock); |
| 196 | } |
| 197 | |
| 198 | static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) |
| 199 | { |
| 200 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 201 | loff_t rv = -EINVAL; |
| 202 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 203 | typeof_member(struct file_operations, llseek) llseek; |
| 204 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 205 | llseek = pde->proc_fops->llseek; |
| 206 | if (!llseek) |
| 207 | llseek = default_llseek; |
| 208 | rv = llseek(file, offset, whence); |
| 209 | unuse_pde(pde); |
| 210 | } |
| 211 | return rv; |
| 212 | } |
| 213 | |
| 214 | static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
| 215 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 216 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 217 | ssize_t rv = -EIO; |
| 218 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 219 | typeof_member(struct file_operations, read) read; |
| 220 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 221 | read = pde->proc_fops->read; |
| 222 | if (read) |
| 223 | rv = read(file, buf, count, ppos); |
| 224 | unuse_pde(pde); |
| 225 | } |
| 226 | return rv; |
| 227 | } |
| 228 | |
| 229 | static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) |
| 230 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 232 | ssize_t rv = -EIO; |
| 233 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 234 | typeof_member(struct file_operations, write) write; |
| 235 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 236 | write = pde->proc_fops->write; |
| 237 | if (write) |
| 238 | rv = write(file, buf, count, ppos); |
| 239 | unuse_pde(pde); |
| 240 | } |
| 241 | return rv; |
| 242 | } |
| 243 | |
| 244 | static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts) |
| 245 | { |
| 246 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 247 | __poll_t rv = DEFAULT_POLLMASK; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 248 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 249 | typeof_member(struct file_operations, poll) poll; |
| 250 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 251 | poll = pde->proc_fops->poll; |
| 252 | if (poll) |
| 253 | rv = poll(file, pts); |
| 254 | unuse_pde(pde); |
| 255 | } |
| 256 | return rv; |
| 257 | } |
| 258 | |
| 259 | static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 260 | { |
| 261 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 262 | long rv = -ENOTTY; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 264 | typeof_member(struct file_operations, unlocked_ioctl) ioctl; |
| 265 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 266 | ioctl = pde->proc_fops->unlocked_ioctl; |
| 267 | if (ioctl) |
| 268 | rv = ioctl(file, cmd, arg); |
| 269 | unuse_pde(pde); |
| 270 | } |
| 271 | return rv; |
| 272 | } |
| 273 | |
| 274 | #ifdef CONFIG_COMPAT |
| 275 | static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 276 | { |
| 277 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 278 | long rv = -ENOTTY; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 280 | typeof_member(struct file_operations, compat_ioctl) compat_ioctl; |
| 281 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 282 | compat_ioctl = pde->proc_fops->compat_ioctl; |
| 283 | if (compat_ioctl) |
| 284 | rv = compat_ioctl(file, cmd, arg); |
| 285 | unuse_pde(pde); |
| 286 | } |
| 287 | return rv; |
| 288 | } |
| 289 | #endif |
| 290 | |
| 291 | static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) |
| 292 | { |
| 293 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 294 | int rv = -EIO; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 295 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 296 | typeof_member(struct file_operations, mmap) mmap; |
| 297 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 298 | mmap = pde->proc_fops->mmap; |
| 299 | if (mmap) |
| 300 | rv = mmap(file, vma); |
| 301 | unuse_pde(pde); |
| 302 | } |
| 303 | return rv; |
| 304 | } |
| 305 | |
| 306 | static unsigned long |
| 307 | proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, |
| 308 | unsigned long len, unsigned long pgoff, |
| 309 | unsigned long flags) |
| 310 | { |
| 311 | struct proc_dir_entry *pde = PDE(file_inode(file)); |
| 312 | unsigned long rv = -EIO; |
| 313 | |
| 314 | if (use_pde(pde)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 315 | typeof_member(struct file_operations, get_unmapped_area) get_area; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | |
| 317 | get_area = pde->proc_fops->get_unmapped_area; |
| 318 | #ifdef CONFIG_MMU |
| 319 | if (!get_area) |
| 320 | get_area = current->mm->get_unmapped_area; |
| 321 | #endif |
| 322 | |
| 323 | if (get_area) |
| 324 | rv = get_area(file, orig_addr, len, pgoff, flags); |
| 325 | else |
| 326 | rv = orig_addr; |
| 327 | unuse_pde(pde); |
| 328 | } |
| 329 | return rv; |
| 330 | } |
| 331 | |
| 332 | static int proc_reg_open(struct inode *inode, struct file *file) |
| 333 | { |
| 334 | struct proc_dir_entry *pde = PDE(inode); |
| 335 | int rv = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 336 | typeof_member(struct file_operations, open) open; |
| 337 | typeof_member(struct file_operations, release) release; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 338 | struct pde_opener *pdeo; |
| 339 | |
| 340 | /* |
| 341 | * Ensure that |
| 342 | * 1) PDE's ->release hook will be called no matter what |
| 343 | * either normally by close()/->release, or forcefully by |
| 344 | * rmmod/remove_proc_entry. |
| 345 | * |
| 346 | * 2) rmmod isn't blocked by opening file in /proc and sitting on |
| 347 | * the descriptor (including "rmmod foo </proc/foo" scenario). |
| 348 | * |
| 349 | * Save every "struct file" with custom ->release hook. |
| 350 | */ |
| 351 | if (!use_pde(pde)) |
| 352 | return -ENOENT; |
| 353 | |
| 354 | release = pde->proc_fops->release; |
| 355 | if (release) { |
| 356 | pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL); |
| 357 | if (!pdeo) { |
| 358 | rv = -ENOMEM; |
| 359 | goto out_unuse; |
| 360 | } |
| 361 | } |
| 362 | |
| 363 | open = pde->proc_fops->open; |
| 364 | if (open) |
| 365 | rv = open(inode, file); |
| 366 | |
| 367 | if (release) { |
| 368 | if (rv == 0) { |
| 369 | /* To know what to release. */ |
| 370 | pdeo->file = file; |
| 371 | pdeo->closing = false; |
| 372 | pdeo->c = NULL; |
| 373 | spin_lock(&pde->pde_unload_lock); |
| 374 | list_add(&pdeo->lh, &pde->pde_openers); |
| 375 | spin_unlock(&pde->pde_unload_lock); |
| 376 | } else |
| 377 | kmem_cache_free(pde_opener_cache, pdeo); |
| 378 | } |
| 379 | |
| 380 | out_unuse: |
| 381 | unuse_pde(pde); |
| 382 | return rv; |
| 383 | } |
| 384 | |
| 385 | static int proc_reg_release(struct inode *inode, struct file *file) |
| 386 | { |
| 387 | struct proc_dir_entry *pde = PDE(inode); |
| 388 | struct pde_opener *pdeo; |
| 389 | spin_lock(&pde->pde_unload_lock); |
| 390 | list_for_each_entry(pdeo, &pde->pde_openers, lh) { |
| 391 | if (pdeo->file == file) { |
| 392 | close_pdeo(pde, pdeo); |
| 393 | return 0; |
| 394 | } |
| 395 | } |
| 396 | spin_unlock(&pde->pde_unload_lock); |
| 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | static const struct file_operations proc_reg_file_ops = { |
| 401 | .llseek = proc_reg_llseek, |
| 402 | .read = proc_reg_read, |
| 403 | .write = proc_reg_write, |
| 404 | .poll = proc_reg_poll, |
| 405 | .unlocked_ioctl = proc_reg_unlocked_ioctl, |
| 406 | #ifdef CONFIG_COMPAT |
| 407 | .compat_ioctl = proc_reg_compat_ioctl, |
| 408 | #endif |
| 409 | .mmap = proc_reg_mmap, |
| 410 | .get_unmapped_area = proc_reg_get_unmapped_area, |
| 411 | .open = proc_reg_open, |
| 412 | .release = proc_reg_release, |
| 413 | }; |
| 414 | |
| 415 | #ifdef CONFIG_COMPAT |
| 416 | static const struct file_operations proc_reg_file_ops_no_compat = { |
| 417 | .llseek = proc_reg_llseek, |
| 418 | .read = proc_reg_read, |
| 419 | .write = proc_reg_write, |
| 420 | .poll = proc_reg_poll, |
| 421 | .unlocked_ioctl = proc_reg_unlocked_ioctl, |
| 422 | .mmap = proc_reg_mmap, |
| 423 | .get_unmapped_area = proc_reg_get_unmapped_area, |
| 424 | .open = proc_reg_open, |
| 425 | .release = proc_reg_release, |
| 426 | }; |
| 427 | #endif |
| 428 | |
| 429 | static void proc_put_link(void *p) |
| 430 | { |
| 431 | unuse_pde(p); |
| 432 | } |
| 433 | |
| 434 | static const char *proc_get_link(struct dentry *dentry, |
| 435 | struct inode *inode, |
| 436 | struct delayed_call *done) |
| 437 | { |
| 438 | struct proc_dir_entry *pde = PDE(inode); |
| 439 | if (!use_pde(pde)) |
| 440 | return ERR_PTR(-EINVAL); |
| 441 | set_delayed_call(done, proc_put_link, pde); |
| 442 | return pde->data; |
| 443 | } |
| 444 | |
| 445 | const struct inode_operations proc_link_inode_operations = { |
| 446 | .get_link = proc_get_link, |
| 447 | }; |
| 448 | |
| 449 | struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) |
| 450 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 451 | struct inode *inode = new_inode(sb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 452 | |
| 453 | if (inode) { |
| 454 | inode->i_ino = de->low_ino; |
| 455 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); |
| 456 | PROC_I(inode)->pde = de; |
| 457 | |
| 458 | if (is_empty_pde(de)) { |
| 459 | make_empty_dir_inode(inode); |
| 460 | return inode; |
| 461 | } |
| 462 | if (de->mode) { |
| 463 | inode->i_mode = de->mode; |
| 464 | inode->i_uid = de->uid; |
| 465 | inode->i_gid = de->gid; |
| 466 | } |
| 467 | if (de->size) |
| 468 | inode->i_size = de->size; |
| 469 | if (de->nlink) |
| 470 | set_nlink(inode, de->nlink); |
| 471 | WARN_ON(!de->proc_iops); |
| 472 | inode->i_op = de->proc_iops; |
| 473 | if (de->proc_fops) { |
| 474 | if (S_ISREG(inode->i_mode)) { |
| 475 | #ifdef CONFIG_COMPAT |
| 476 | if (!de->proc_fops->compat_ioctl) |
| 477 | inode->i_fop = |
| 478 | &proc_reg_file_ops_no_compat; |
| 479 | else |
| 480 | #endif |
| 481 | inode->i_fop = &proc_reg_file_ops; |
| 482 | } else { |
| 483 | inode->i_fop = de->proc_fops; |
| 484 | } |
| 485 | } |
| 486 | } else |
| 487 | pde_put(de); |
| 488 | return inode; |
| 489 | } |