Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/security/security.c b/security/security.c
index 736e78d..1bc000f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Security plug functions
*
@@ -5,17 +6,14 @@
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
* Copyright (C) 2016 Mellanox Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
+#define pr_fmt(fmt) "LSM: " fmt
+
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/dcache.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/lsm_hooks.h>
@@ -28,40 +26,326 @@
#include <linux/personality.h>
#include <linux/backing-dev.h>
#include <linux/string.h>
+#include <linux/msg.h>
#include <net/flow.h>
-#include <trace/events/initcall.h>
-
#define MAX_LSM_EVM_XATTR 2
-/* Maximum number of letters for an LSM name string */
-#define SECURITY_NAME_MAX 10
+/* How many LSMs were built into the kernel? */
+#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
+#define EARLY_LSM_COUNT (__end_early_lsm_info - __start_early_lsm_info)
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
-static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain);
+static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
+
+static struct kmem_cache *lsm_file_cache;
+static struct kmem_cache *lsm_inode_cache;
char *lsm_names;
+static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;
+
/* Boot-time LSM user choice */
-static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
- CONFIG_DEFAULT_SECURITY;
+static __initdata const char *chosen_lsm_order;
+static __initdata const char *chosen_major_lsm;
-static void __init do_security_initcalls(void)
+static __initconst const char * const builtin_lsm_order = CONFIG_LSM;
+
+/* Ordered list of LSMs to initialize. */
+static __initdata struct lsm_info **ordered_lsms;
+static __initdata struct lsm_info *exclusive;
+
+static __initdata bool debug;
+#define init_debug(...) \
+ do { \
+ if (debug) \
+ pr_info(__VA_ARGS__); \
+ } while (0)
+
+static bool __init is_enabled(struct lsm_info *lsm)
{
- int ret;
- initcall_t call;
- initcall_entry_t *ce;
+ if (!lsm->enabled)
+ return false;
- ce = __security_initcall_start;
- trace_initcall_level("security");
- while (ce < __security_initcall_end) {
- call = initcall_from_entry(ce);
- trace_initcall_start(call);
- ret = call();
- trace_initcall_finish(call, ret);
- ce++;
+ return *lsm->enabled;
+}
+
+/* Mark an LSM's enabled flag. */
+static int lsm_enabled_true __initdata = 1;
+static int lsm_enabled_false __initdata = 0;
+static void __init set_enabled(struct lsm_info *lsm, bool enabled)
+{
+ /*
+ * When an LSM hasn't configured an enable variable, we can use
+ * a hard-coded location for storing the default enabled state.
+ */
+ if (!lsm->enabled) {
+ if (enabled)
+ lsm->enabled = &lsm_enabled_true;
+ else
+ lsm->enabled = &lsm_enabled_false;
+ } else if (lsm->enabled == &lsm_enabled_true) {
+ if (!enabled)
+ lsm->enabled = &lsm_enabled_false;
+ } else if (lsm->enabled == &lsm_enabled_false) {
+ if (enabled)
+ lsm->enabled = &lsm_enabled_true;
+ } else {
+ *lsm->enabled = enabled;
}
}
+/* Is an LSM already listed in the ordered LSMs list? */
+static bool __init exists_ordered_lsm(struct lsm_info *lsm)
+{
+ struct lsm_info **check;
+
+ for (check = ordered_lsms; *check; check++)
+ if (*check == lsm)
+ return true;
+
+ return false;
+}
+
+/* Append an LSM to the list of ordered LSMs to initialize. */
+static int last_lsm __initdata;
+static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
+{
+ /* Ignore duplicate selections. */
+ if (exists_ordered_lsm(lsm))
+ return;
+
+ if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
+ return;
+
+ /* Enable this LSM, if it is not already set. */
+ if (!lsm->enabled)
+ lsm->enabled = &lsm_enabled_true;
+ ordered_lsms[last_lsm++] = lsm;
+
+ init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
+ is_enabled(lsm) ? "en" : "dis");
+}
+
+/* Is an LSM allowed to be initialized? */
+static bool __init lsm_allowed(struct lsm_info *lsm)
+{
+ /* Skip if the LSM is disabled. */
+ if (!is_enabled(lsm))
+ return false;
+
+ /* Not allowed if another exclusive LSM already initialized. */
+ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
+ init_debug("exclusive disabled: %s\n", lsm->name);
+ return false;
+ }
+
+ return true;
+}
+
+static void __init lsm_set_blob_size(int *need, int *lbs)
+{
+ int offset;
+
+ if (*need > 0) {
+ offset = *lbs;
+ *lbs += *need;
+ *need = offset;
+ }
+}
+
+static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
+{
+ if (!needed)
+ return;
+
+ lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
+ lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
+ /*
+ * The inode blob gets an rcu_head in addition to
+ * what the modules might need.
+ */
+ if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
+ blob_sizes.lbs_inode = sizeof(struct rcu_head);
+ lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
+ lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
+ lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
+ lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
+}
+
+/* Prepare LSM for initialization. */
+static void __init prepare_lsm(struct lsm_info *lsm)
+{
+ int enabled = lsm_allowed(lsm);
+
+ /* Record enablement (to handle any following exclusive LSMs). */
+ set_enabled(lsm, enabled);
+
+ /* If enabled, do pre-initialization work. */
+ if (enabled) {
+ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
+ exclusive = lsm;
+ init_debug("exclusive chosen: %s\n", lsm->name);
+ }
+
+ lsm_set_blob_sizes(lsm->blobs);
+ }
+}
+
+/* Initialize a given LSM, if it is enabled. */
+static void __init initialize_lsm(struct lsm_info *lsm)
+{
+ if (is_enabled(lsm)) {
+ int ret;
+
+ init_debug("initializing %s\n", lsm->name);
+ ret = lsm->init();
+ WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
+ }
+}
+
+/* Populate ordered LSMs list from comma-separated LSM name list. */
+static void __init ordered_lsm_parse(const char *order, const char *origin)
+{
+ struct lsm_info *lsm;
+ char *sep, *name, *next;
+
+ /* LSM_ORDER_FIRST is always first. */
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (lsm->order == LSM_ORDER_FIRST)
+ append_ordered_lsm(lsm, "first");
+ }
+
+ /* Process "security=", if given. */
+ if (chosen_major_lsm) {
+ struct lsm_info *major;
+
+ /*
+ * To match the original "security=" behavior, this
+ * explicitly does NOT fallback to another Legacy Major
+ * if the selected one was separately disabled: disable
+ * all non-matching Legacy Major LSMs.
+ */
+ for (major = __start_lsm_info; major < __end_lsm_info;
+ major++) {
+ if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
+ strcmp(major->name, chosen_major_lsm) != 0) {
+ set_enabled(major, false);
+ init_debug("security=%s disabled: %s\n",
+ chosen_major_lsm, major->name);
+ }
+ }
+ }
+
+ sep = kstrdup(order, GFP_KERNEL);
+ next = sep;
+ /* Walk the list, looking for matching LSMs. */
+ while ((name = strsep(&next, ",")) != NULL) {
+ bool found = false;
+
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (lsm->order == LSM_ORDER_MUTABLE &&
+ strcmp(lsm->name, name) == 0) {
+ append_ordered_lsm(lsm, origin);
+ found = true;
+ }
+ }
+
+ if (!found)
+ init_debug("%s ignored: %s\n", origin, name);
+ }
+
+ /* Process "security=", if given. */
+ if (chosen_major_lsm) {
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (exists_ordered_lsm(lsm))
+ continue;
+ if (strcmp(lsm->name, chosen_major_lsm) == 0)
+ append_ordered_lsm(lsm, "security=");
+ }
+ }
+
+ /* Disable all LSMs not in the ordered list. */
+ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
+ if (exists_ordered_lsm(lsm))
+ continue;
+ set_enabled(lsm, false);
+ init_debug("%s disabled: %s\n", origin, lsm->name);
+ }
+
+ kfree(sep);
+}
+
+static void __init lsm_early_cred(struct cred *cred);
+static void __init lsm_early_task(struct task_struct *task);
+
+static int lsm_append(const char *new, char **result);
+
+static void __init ordered_lsm_init(void)
+{
+ struct lsm_info **lsm;
+
+ ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
+ GFP_KERNEL);
+
+ if (chosen_lsm_order) {
+ if (chosen_major_lsm) {
+ pr_info("security= is ignored because it is superseded by lsm=\n");
+ chosen_major_lsm = NULL;
+ }
+ ordered_lsm_parse(chosen_lsm_order, "cmdline");
+ } else
+ ordered_lsm_parse(builtin_lsm_order, "builtin");
+
+ for (lsm = ordered_lsms; *lsm; lsm++)
+ prepare_lsm(*lsm);
+
+ init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
+ init_debug("file blob size = %d\n", blob_sizes.lbs_file);
+ init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
+ init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
+ init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
+ init_debug("task blob size = %d\n", blob_sizes.lbs_task);
+
+ /*
+ * Create any kmem_caches needed for blobs
+ */
+ if (blob_sizes.lbs_file)
+ lsm_file_cache = kmem_cache_create("lsm_file_cache",
+ blob_sizes.lbs_file, 0,
+ SLAB_PANIC, NULL);
+ if (blob_sizes.lbs_inode)
+ lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
+ blob_sizes.lbs_inode, 0,
+ SLAB_PANIC, NULL);
+
+ lsm_early_cred((struct cred *) current->cred);
+ lsm_early_task(current);
+ for (lsm = ordered_lsms; *lsm; lsm++)
+ initialize_lsm(*lsm);
+
+ kfree(ordered_lsms);
+}
+
+int __init early_security_init(void)
+{
+ int i;
+ struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
+ struct lsm_info *lsm;
+
+ for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
+ i++)
+ INIT_HLIST_HEAD(&list[i]);
+
+ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
+ if (!lsm->enabled)
+ lsm->enabled = &lsm_enabled_true;
+ prepare_lsm(lsm);
+ initialize_lsm(lsm);
+ }
+
+ return 0;
+}
+
/**
* security_init - initializes the security framework
*
@@ -69,36 +353,48 @@
*/
int __init security_init(void)
{
- int i;
- struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
+ struct lsm_info *lsm;
- for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
- i++)
- INIT_HLIST_HEAD(&list[i]);
- pr_info("Security Framework initialized\n");
+ pr_info("Security Framework initializing\n");
/*
- * Load minor LSMs, with the capability module always first.
+ * Append the names of the early LSM modules now that kmalloc() is
+ * available
*/
- capability_add_hooks();
- yama_add_hooks();
- loadpin_add_hooks();
+ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
+ if (lsm->enabled)
+ lsm_append(lsm->name, &lsm_names);
+ }
- /*
- * Load all the remaining security modules.
- */
- do_security_initcalls();
+ /* Load LSMs in specified order. */
+ ordered_lsm_init();
return 0;
}
/* Save user chosen LSM */
-static int __init choose_lsm(char *str)
+static int __init choose_major_lsm(char *str)
{
- strncpy(chosen_lsm, str, SECURITY_NAME_MAX);
+ chosen_major_lsm = str;
return 1;
}
-__setup("security=", choose_lsm);
+__setup("security=", choose_major_lsm);
+
+/* Explicitly choose LSM initialization order. */
+static int __init choose_lsm_order(char *str)
+{
+ chosen_lsm_order = str;
+ return 1;
+}
+__setup("lsm=", choose_lsm_order);
+
+/* Enable LSM order debugging. */
+static int __init enable_debug(char *str)
+{
+ debug = true;
+ return 1;
+}
+__setup("lsm.debug", enable_debug);
static bool match_last_lsm(const char *list, const char *lsm)
{
@@ -115,7 +411,7 @@
return !strcmp(last, lsm);
}
-static int lsm_append(char *new, char **result)
+static int lsm_append(const char *new, char **result)
{
char *cp;
@@ -137,29 +433,6 @@
}
/**
- * security_module_enable - Load given security module on boot ?
- * @module: the name of the module
- *
- * Each LSM must pass this method before registering its own operations
- * to avoid security registration races. This method may also be used
- * to check if your LSM is currently loaded during kernel initialization.
- *
- * Returns:
- *
- * true if:
- *
- * - The passed LSM is the one chosen by user at boot time,
- * - or the passed LSM is configured as the default and the user did not
- * choose an alternate LSM at boot time.
- *
- * Otherwise, return false.
- */
-int __init security_module_enable(const char *module)
-{
- return !strcmp(module, chosen_lsm);
-}
-
-/**
* security_add_hooks - Add a modules hooks to the hook lists.
* @hooks: the hooks to add
* @count: the number of hooks to add
@@ -176,27 +449,192 @@
hooks[i].lsm = lsm;
hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
}
- if (lsm_append(lsm, &lsm_names) < 0)
- panic("%s - Cannot get early memory.\n", __func__);
+
+ /*
+ * Don't try to append during early_security_init(), we'll come back
+ * and fix this up afterwards.
+ */
+ if (slab_is_available()) {
+ if (lsm_append(lsm, &lsm_names) < 0)
+ panic("%s - Cannot get early memory.\n", __func__);
+ }
}
-int call_lsm_notifier(enum lsm_event event, void *data)
+int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
- return atomic_notifier_call_chain(&lsm_notifier_chain, event, data);
+ return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
+ event, data);
}
-EXPORT_SYMBOL(call_lsm_notifier);
+EXPORT_SYMBOL(call_blocking_lsm_notifier);
-int register_lsm_notifier(struct notifier_block *nb)
+int register_blocking_lsm_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_register(&lsm_notifier_chain, nb);
+ return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
+ nb);
}
-EXPORT_SYMBOL(register_lsm_notifier);
+EXPORT_SYMBOL(register_blocking_lsm_notifier);
-int unregister_lsm_notifier(struct notifier_block *nb)
+int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&lsm_notifier_chain, nb);
+ return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
+ nb);
}
-EXPORT_SYMBOL(unregister_lsm_notifier);
+EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
+
+/**
+ * lsm_cred_alloc - allocate a composite cred blob
+ * @cred: the cred that needs a blob
+ * @gfp: allocation type
+ *
+ * Allocate the cred blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
+{
+ if (blob_sizes.lbs_cred == 0) {
+ cred->security = NULL;
+ return 0;
+ }
+
+ cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
+ if (cred->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_early_cred - during initialization allocate a composite cred blob
+ * @cred: the cred that needs a blob
+ *
+ * Allocate the cred blob for all the modules
+ */
+static void __init lsm_early_cred(struct cred *cred)
+{
+ int rc = lsm_cred_alloc(cred, GFP_KERNEL);
+
+ if (rc)
+ panic("%s: Early cred alloc failed.\n", __func__);
+}
+
+/**
+ * lsm_file_alloc - allocate a composite file blob
+ * @file: the file that needs a blob
+ *
+ * Allocate the file blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_file_alloc(struct file *file)
+{
+ if (!lsm_file_cache) {
+ file->f_security = NULL;
+ return 0;
+ }
+
+ file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
+ if (file->f_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_inode_alloc - allocate a composite inode blob
+ * @inode: the inode that needs a blob
+ *
+ * Allocate the inode blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+int lsm_inode_alloc(struct inode *inode)
+{
+ if (!lsm_inode_cache) {
+ inode->i_security = NULL;
+ return 0;
+ }
+
+ inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
+ if (inode->i_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_task_alloc - allocate a composite task blob
+ * @task: the task that needs a blob
+ *
+ * Allocate the task blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_task_alloc(struct task_struct *task)
+{
+ if (blob_sizes.lbs_task == 0) {
+ task->security = NULL;
+ return 0;
+ }
+
+ task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
+ if (task->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_ipc_alloc - allocate a composite ipc blob
+ * @kip: the ipc that needs a blob
+ *
+ * Allocate the ipc blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
+{
+ if (blob_sizes.lbs_ipc == 0) {
+ kip->security = NULL;
+ return 0;
+ }
+
+ kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
+ if (kip->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_msg_msg_alloc - allocate a composite msg_msg blob
+ * @mp: the msg_msg that needs a blob
+ *
+ * Allocate the ipc blob for all the modules
+ *
+ * Returns 0, or -ENOMEM if memory can't be allocated.
+ */
+static int lsm_msg_msg_alloc(struct msg_msg *mp)
+{
+ if (blob_sizes.lbs_msg_msg == 0) {
+ mp->security = NULL;
+ return 0;
+ }
+
+ mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
+ if (mp->security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * lsm_early_task - during initialization allocate a composite task blob
+ * @task: the task that needs a blob
+ *
+ * Allocate the task blob for all the modules
+ */
+static void __init lsm_early_task(struct task_struct *task)
+{
+ int rc = lsm_task_alloc(task);
+
+ if (rc)
+ panic("%s: Early task alloc failed.\n", __func__);
+}
/*
* Hook list operation macros.
@@ -283,16 +721,12 @@
effective, inheritable, permitted);
}
-int security_capable(const struct cred *cred, struct user_namespace *ns,
- int cap)
+int security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ unsigned int opts)
{
- return call_int_hook(capable, 0, cred, ns, cap, SECURITY_CAP_AUDIT);
-}
-
-int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
- int cap)
-{
- return call_int_hook(capable, 0, cred, ns, cap, SECURITY_CAP_NOAUDIT);
+ return call_int_hook(capable, 0, cred, ns, cap, opts);
}
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
@@ -363,6 +797,16 @@
call_void_hook(bprm_committed_creds, bprm);
}
+int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+ return call_int_hook(fs_context_dup, 0, fc, src_fc);
+}
+
+int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param);
+}
+
int security_sb_alloc(struct super_block *sb)
{
return call_int_hook(sb_alloc_security, 0, sb);
@@ -373,20 +817,31 @@
call_void_hook(sb_free_security, sb);
}
-int security_sb_copy_data(char *orig, char *copy)
+void security_free_mnt_opts(void **mnt_opts)
{
- return call_int_hook(sb_copy_data, 0, orig, copy);
+ if (!*mnt_opts)
+ return;
+ call_void_hook(sb_free_mnt_opts, *mnt_opts);
+ *mnt_opts = NULL;
}
-EXPORT_SYMBOL(security_sb_copy_data);
+EXPORT_SYMBOL(security_free_mnt_opts);
-int security_sb_remount(struct super_block *sb, void *data)
+int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
{
- return call_int_hook(sb_remount, 0, sb, data);
+ return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
}
+EXPORT_SYMBOL(security_sb_eat_lsm_opts);
-int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
+int security_sb_remount(struct super_block *sb,
+ void *mnt_opts)
{
- return call_int_hook(sb_kern_mount, 0, sb, flags, data);
+ return call_int_hook(sb_remount, 0, sb, mnt_opts);
+}
+EXPORT_SYMBOL(security_sb_remount);
+
+int security_sb_kern_mount(struct super_block *sb)
+{
+ return call_int_hook(sb_kern_mount, 0, sb);
}
int security_sb_show_options(struct seq_file *m, struct super_block *sb)
@@ -416,13 +871,13 @@
}
int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts,
+ void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
return call_int_hook(sb_set_mnt_opts,
- opts->num_mnt_opts ? -EOPNOTSUPP : 0, sb,
- opts, kern_flags, set_kern_flags);
+ mnt_opts ? -EOPNOTSUPP : 0, sb,
+ mnt_opts, kern_flags, set_kern_flags);
}
EXPORT_SYMBOL(security_sb_set_mnt_opts);
@@ -436,22 +891,61 @@
}
EXPORT_SYMBOL(security_sb_clone_mnt_opts);
-int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+int security_add_mnt_opt(const char *option, const char *val, int len,
+ void **mnt_opts)
{
- return call_int_hook(sb_parse_opts_str, 0, options, opts);
+ return call_int_hook(sb_add_mnt_opt, -EINVAL,
+ option, val, len, mnt_opts);
}
-EXPORT_SYMBOL(security_sb_parse_opts_str);
+EXPORT_SYMBOL(security_add_mnt_opt);
+
+int security_move_mount(const struct path *from_path, const struct path *to_path)
+{
+ return call_int_hook(move_mount, 0, from_path, to_path);
+}
+
+int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ return call_int_hook(path_notify, 0, path, mask, obj_type);
+}
int security_inode_alloc(struct inode *inode)
{
- inode->i_security = NULL;
- return call_int_hook(inode_alloc_security, 0, inode);
+ int rc = lsm_inode_alloc(inode);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(inode_alloc_security, 0, inode);
+ if (unlikely(rc))
+ security_inode_free(inode);
+ return rc;
+}
+
+static void inode_free_by_rcu(struct rcu_head *head)
+{
+ /*
+ * The rcu head is at the start of the inode blob
+ */
+ kmem_cache_free(lsm_inode_cache, head);
}
void security_inode_free(struct inode *inode)
{
integrity_inode_free(inode);
call_void_hook(inode_free_security, inode);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to security_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ if (inode->i_security)
+ call_rcu((struct rcu_head *)inode->i_security,
+ inode_free_by_rcu);
}
int security_dentry_init_security(struct dentry *dentry, int mode,
@@ -868,6 +1362,12 @@
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);
+int security_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+{
+ return call_int_hook(kernfs_init_security, 0, kn_dir, kn);
+}
+
int security_file_permission(struct file *file, int mask)
{
int ret;
@@ -881,12 +1381,27 @@
int security_file_alloc(struct file *file)
{
- return call_int_hook(file_alloc_security, 0, file);
+ int rc = lsm_file_alloc(file);
+
+ if (rc)
+ return rc;
+ rc = call_int_hook(file_alloc_security, 0, file);
+ if (unlikely(rc))
+ security_file_free(file);
+ return rc;
}
void security_file_free(struct file *file)
{
+ void *blob;
+
call_void_hook(file_free_security, file);
+
+ blob = file->f_security;
+ if (blob) {
+ file->f_security = NULL;
+ kmem_cache_free(lsm_file_cache, blob);
+ }
}
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -988,27 +1503,63 @@
int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
{
- return call_int_hook(task_alloc, 0, task, clone_flags);
+ int rc = lsm_task_alloc(task);
+
+ if (rc)
+ return rc;
+ rc = call_int_hook(task_alloc, 0, task, clone_flags);
+ if (unlikely(rc))
+ security_task_free(task);
+ return rc;
}
void security_task_free(struct task_struct *task)
{
call_void_hook(task_free, task);
+
+ kfree(task->security);
+ task->security = NULL;
}
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- return call_int_hook(cred_alloc_blank, 0, cred, gfp);
+ int rc = lsm_cred_alloc(cred, gfp);
+
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
+ if (unlikely(rc))
+ security_cred_free(cred);
+ return rc;
}
void security_cred_free(struct cred *cred)
{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
call_void_hook(cred_free, cred);
+
+ kfree(cred->security);
+ cred->security = NULL;
}
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
{
- return call_int_hook(cred_prepare, 0, new, old, gfp);
+ int rc = lsm_cred_alloc(new, gfp);
+
+ if (rc)
+ return rc;
+
+ rc = call_int_hook(cred_prepare, 0, new, old, gfp);
+ if (unlikely(rc))
+ security_cred_free(new);
+ return rc;
}
void security_transfer_creds(struct cred *new, const struct cred *old)
@@ -1147,7 +1698,7 @@
return call_int_hook(task_movememory, 0, p);
}
-int security_task_kill(struct task_struct *p, struct siginfo *info,
+int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
return call_int_hook(task_kill, 0, p, info, sig, cred);
@@ -1189,22 +1740,40 @@
int security_msg_msg_alloc(struct msg_msg *msg)
{
- return call_int_hook(msg_msg_alloc_security, 0, msg);
+ int rc = lsm_msg_msg_alloc(msg);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(msg_msg_alloc_security, 0, msg);
+ if (unlikely(rc))
+ security_msg_msg_free(msg);
+ return rc;
}
void security_msg_msg_free(struct msg_msg *msg)
{
call_void_hook(msg_msg_free_security, msg);
+ kfree(msg->security);
+ msg->security = NULL;
}
int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
- return call_int_hook(msg_queue_alloc_security, 0, msq);
+ int rc = lsm_ipc_alloc(msq);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(msg_queue_alloc_security, 0, msq);
+ if (unlikely(rc))
+ security_msg_queue_free(msq);
+ return rc;
}
void security_msg_queue_free(struct kern_ipc_perm *msq)
{
call_void_hook(msg_queue_free_security, msq);
+ kfree(msq->security);
+ msq->security = NULL;
}
int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
@@ -1231,12 +1800,21 @@
int security_shm_alloc(struct kern_ipc_perm *shp)
{
- return call_int_hook(shm_alloc_security, 0, shp);
+ int rc = lsm_ipc_alloc(shp);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(shm_alloc_security, 0, shp);
+ if (unlikely(rc))
+ security_shm_free(shp);
+ return rc;
}
void security_shm_free(struct kern_ipc_perm *shp)
{
call_void_hook(shm_free_security, shp);
+ kfree(shp->security);
+ shp->security = NULL;
}
int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
@@ -1256,12 +1834,21 @@
int security_sem_alloc(struct kern_ipc_perm *sma)
{
- return call_int_hook(sem_alloc_security, 0, sma);
+ int rc = lsm_ipc_alloc(sma);
+
+ if (unlikely(rc))
+ return rc;
+ rc = call_int_hook(sem_alloc_security, 0, sma);
+ if (unlikely(rc))
+ security_sem_free(sma);
+ return rc;
}
void security_sem_free(struct kern_ipc_perm *sma)
{
call_void_hook(sem_free_security, sma);
+ kfree(sma->security);
+ sma->security = NULL;
}
int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
@@ -1288,14 +1875,30 @@
}
EXPORT_SYMBOL(security_d_instantiate);
-int security_getprocattr(struct task_struct *p, char *name, char **value)
+int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+ char **value)
{
- return call_int_hook(getprocattr, -EINVAL, p, name, value);
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
+ if (lsm != NULL && strcmp(lsm, hp->lsm))
+ continue;
+ return hp->hook.getprocattr(p, name, value);
+ }
+ return -EINVAL;
}
-int security_setprocattr(const char *name, void *value, size_t size)
+int security_setprocattr(const char *lsm, const char *name, void *value,
+ size_t size)
{
- return call_int_hook(setprocattr, -EINVAL, name, value, size);
+ struct security_hook_list *hp;
+
+ hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
+ if (lsm != NULL && strcmp(lsm, hp->lsm))
+ continue;
+ return hp->hook.setprocattr(name, value, size);
+ }
+ return -EINVAL;
}
int security_netlink_send(struct sock *sk, struct sk_buff *skb)
@@ -1759,11 +2362,9 @@
call_void_hook(audit_rule_free, lsmrule);
}
-int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
- struct audit_context *actx)
+int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
{
- return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule,
- actx);
+ return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule);
}
#endif /* CONFIG_AUDIT */
@@ -1797,3 +2398,9 @@
call_void_hook(bpf_prog_free_security, aux);
}
#endif /* CONFIG_BPF_SYSCALL */
+
+int security_locked_down(enum lockdown_reason what)
+{
+ return call_int_hook(locked_down, 0, what);
+}
+EXPORT_SYMBOL(security_locked_down);