summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-15 10:03:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-15 10:03:15 -0700
commit9ffc66941df278c9f4df979b6bcf6c6ddafedd16 (patch)
treea2cff20aafb7ecb352a0c2dd41a5430f64a248e0
parent133d970e0dadf7b413db19893acc5b26664bf4a1 (diff)
parent0766f788eb727e2e330d55d30545db65bcf2623f (diff)
downloadlinux-stable-9ffc66941df278c9f4df979b6bcf6c6ddafedd16.tar.gz
linux-stable-9ffc66941df278c9f4df979b6bcf6c6ddafedd16.tar.bz2
linux-stable-9ffc66941df278c9f4df979b6bcf6c6ddafedd16.zip
Merge tag 'gcc-plugins-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull gcc plugins update from Kees Cook: "This adds a new gcc plugin named "latent_entropy". It is designed to extract as much possible uncertainty from a running system at boot time as possible, hoping to capitalize on any possible variation in CPU operation (due to runtime data differences, hardware differences, SMP ordering, thermal timing variation, cache behavior, etc). At the very least, this plugin is a much more comprehensive example for how to manipulate kernel code using the gcc plugin internals" * tag 'gcc-plugins-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: latent_entropy: Mark functions with __latent_entropy gcc-plugins: Add latent_entropy plugin
-rw-r--r--arch/Kconfig18
-rw-r--r--arch/powerpc/kernel/Makefile5
-rw-r--r--block/blk-softirq.c2
-rw-r--r--drivers/char/random.c4
-rw-r--r--fs/namespace.c1
-rw-r--r--include/linux/compiler-gcc.h7
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/fdtable.h2
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/init.h5
-rw-r--r--include/linux/random.h15
-rw-r--r--init/main.c1
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/time/timer.c2
-rw-r--r--lib/irq_poll.c2
-rw-r--r--lib/random32.c2
-rw-r--r--mm/page_alloc.c5
-rw-r--r--net/core/dev.c4
-rw-r--r--scripts/Makefile.gcc-plugins9
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c640
24 files changed, 725 insertions, 22 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 11d349561ece..659bdd079277 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -383,6 +383,24 @@ config GCC_PLUGIN_SANCOV
gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
by Dmitry Vyukov <dvyukov@google.com>.
+config GCC_PLUGIN_LATENT_ENTROPY
+ bool "Generate some entropy during boot and runtime"
+ depends on GCC_PLUGINS
+ help
+ By saying Y here the kernel will instrument some kernel code to
+ extract some entropy from both original and artificially created
+ program state. This will help especially embedded systems where
+ there is little 'natural' source of entropy normally. The cost
+ is some slowdown of the boot process (about 0.5%) and fork and
+ irq processing.
+
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
config HAVE_CC_STACKPROTECTOR
bool
help
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 6913f6725ce1..1925341dbb9c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
+CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 96631e6a22b9..06cf9807f49a 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
* Softirq action handler - move entries to local list and loop over them
* while passing them to the queue registered handler.
*/
-static void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
struct list_head *cpu_list, local_list;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d131e152c8ce..d6876d506220 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
diff --git a/fs/namespace.c b/fs/namespace.c
index 58aca9c931ac..e6c234b1a645 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2824,6 +2824,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
+__latent_entropy
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 573c5a18908f..432f5c97e18f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -188,6 +188,13 @@
#endif /* GCC_VERSION >= 40300 */
#if GCC_VERSION >= 40500
+
+#ifndef __CHECKER__
+#ifdef LATENT_ENTROPY_PLUGIN
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+#endif
+
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f1bfa15b6f9b..cf0fa5d86059 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -429,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __attribute_const__ /* unimplemented */
#endif
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index aca2a6a1d035..6e84b2cae6ad 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
void reset_files_struct(struct files_struct *);
int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *);
+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 1dbf52f9c24b..e0341af6950e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk);
+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
extern void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
diff --git a/include/linux/init.h b/include/linux/init.h
index 024a0b5b3ed0..e30104ceb86d 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,7 +39,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold notrace
+#define __init __section(.init.text) __cold notrace __latent_entropy
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
@@ -75,7 +75,8 @@
#define __exit __section(.exit.text) __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace
+#define __meminit __section(.meminit.text) __cold notrace \
+ __latent_entropy
#define __meminitdata __section(.meminit.data)
#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
diff --git a/include/linux/random.h b/include/linux/random.h
index f7bb7a355cf7..7bd2403e4fef 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -18,9 +18,20 @@ struct random_ready_callback {
};
extern void add_device_randomness(const void *, unsigned int);
+
+#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
+static inline void add_latent_entropy(void)
+{
+ add_device_randomness((const void *)&latent_entropy,
+ sizeof(latent_entropy));
+}
+#else
+static inline void add_latent_entropy(void) {}
+#endif
+
extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+ unsigned int value) __latent_entropy;
+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
diff --git a/init/main.c b/init/main.c
index a8a58e2794a5..2858be732f6d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -789,6 +789,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
}
WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
+ add_latent_entropy();
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d42242485cb..623259fc794d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -547,7 +547,8 @@ free_tsk:
}
#ifdef CONFIG_MMU
-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
@@ -1441,7 +1442,8 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
-static struct task_struct *copy_process(unsigned long clone_flags,
+static __latent_entropy struct task_struct *copy_process(
+ unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
@@ -1926,6 +1928,7 @@ long _do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
+ add_latent_entropy();
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 944b1b491ed8..1898559e6b60 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -170,7 +170,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
false));
}
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7e2e03879c2e..69a5611a7e7c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
struct rcu_state *rsp;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 502e95a6e927..2d4ad72f8f3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8522,7 +8522,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
-static void run_rebalance_domains(struct softirq_action *h)
+static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 66762645f9e8..1bf81ef91375 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -496,7 +496,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-static void tasklet_action(struct softirq_action *a)
+static __latent_entropy void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
@@ -532,7 +532,7 @@ static void tasklet_action(struct softirq_action *a)
}
}
-static void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 32bf6f75a8fe..2d47980a1bc4 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1633,7 +1633,7 @@ static inline void __run_timers(struct timer_base *base)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
-static void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2be55692aa43..1d6565e81030 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop)
}
EXPORT_SYMBOL(irq_poll_complete);
-static void irq_poll_softirq(struct softirq_action *h)
+static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
{
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;
diff --git a/lib/random32.c b/lib/random32.c
index 915982b304bb..fa594b1140e6 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
}
#endif
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
/**
* prandom_u32_state - seeded pseudo-random number generator.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ca423cc20b59..2b3bf6767d54 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -91,6 +91,11 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_);
int _node_numa_mem_[MAX_NUMNODES];
#endif
+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
+volatile u64 latent_entropy __latent_entropy;
+EXPORT_SYMBOL(latent_entropy);
+#endif
+
/*
* Array of node states.
*/
diff --git a/net/core/dev.c b/net/core/dev.c
index f1fe26f66458..4bc19a164ba5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3845,7 +3845,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
-static void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -5198,7 +5198,7 @@ out_unlock:
return work;
}
-static void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 61f0e6db909b..060d2cb373db 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -6,6 +6,12 @@ ifdef CONFIG_GCC_PLUGINS
gcc-plugin-$(CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) += cyc_complexity_plugin.so
+ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
+ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += -DLATENT_ENTROPY_PLUGIN
+ ifdef CONFIG_PAX_LATENT_ENTROPY
+ DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
+ endif
+
ifdef CONFIG_GCC_PLUGIN_SANCOV
ifeq ($(CFLAGS_KCOV),)
# It is needed because of the gcc-plugin.sh and gcc version checks.
@@ -21,7 +27,8 @@ ifdef CONFIG_GCC_PLUGINS
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
- export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
+ export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR
+ export SANCOV_PLUGIN DISABLE_LATENT_ENTROPY_PLUGIN
ifneq ($(PLUGINCC),)
# SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
new file mode 100644
index 000000000000..ff1939b804ae
--- /dev/null
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright 2012-2016 by the PaX Team <pageexec@freemail.hu>
+ * Copyright 2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ * but for the kernel it doesn't matter since it doesn't link against
+ * any of the gcc libraries
+ *
+ * This gcc plugin helps generate a little bit of entropy from program state,
+ * used throughout the uptime of the kernel. Here is an instrumentation example:
+ *
+ * before:
+ * void __latent_entropy test(int argc, char *argv[])
+ * {
+ * if (argc <= 1)
+ * printf("%s: no command arguments :(\n", *argv);
+ * else
+ * printf("%s: %d command arguments!\n", *argv, args - 1);
+ * }
+ *
+ * after:
+ * void __latent_entropy test(int argc, char *argv[])
+ * {
+ * // latent_entropy_execute() 1.
+ * unsigned long local_entropy;
+ * // init_local_entropy() 1.
+ * void *local_entropy_frameaddr;
+ * // init_local_entropy() 3.
+ * unsigned long tmp_latent_entropy;
+ *
+ * // init_local_entropy() 2.
+ * local_entropy_frameaddr = __builtin_frame_address(0);
+ * local_entropy = (unsigned long) local_entropy_frameaddr;
+ *
+ * // init_local_entropy() 4.
+ * tmp_latent_entropy = latent_entropy;
+ * // init_local_entropy() 5.
+ * local_entropy ^= tmp_latent_entropy;
+ *
+ * // latent_entropy_execute() 3.
+ * if (argc <= 1) {
+ * // perturb_local_entropy()
+ * local_entropy += 4623067384293424948;
+ * printf("%s: no command arguments :(\n", *argv);
+ * // perturb_local_entropy()
+ * } else {
+ * local_entropy ^= 3896280633962944730;
+ * printf("%s: %d command arguments!\n", *argv, args - 1);
+ * }
+ *
+ * // latent_entropy_execute() 4.
+ * tmp_latent_entropy = rol(tmp_latent_entropy, local_entropy);
+ * latent_entropy = tmp_latent_entropy;
+ * }
+ *
+ * TODO:
+ * - add ipa pass to identify not explicitly marked candidate functions
+ * - mix in more program state (function arguments/return values,
+ * loop variables, etc)
+ * - more instrumentation control via attribute parameters
+ *
+ * BUGS:
+ * - none known
+ *
+ * Options:
+ * -fplugin-arg-latent_entropy_plugin-disable
+ *
+ * Attribute: __attribute__((latent_entropy))
+ * The latent_entropy gcc attribute can be only on functions and variables.
+ * If it is on a function then the plugin will instrument it. If the attribute
+ * is on a variable then the plugin will initialize it with a random value.
+ * The variable must be an integer, an integer array type or a structure
+ * with integer fields.
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static GTY(()) tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
+ .version = "201606141920vanilla",
+ .help = "disable\tturn off latent entropy instrumentation\n",
+};
+
+static unsigned HOST_WIDE_INT seed;
+/*
+ * get_random_seed() (this is a GCC function) generates the seed.
+ * This is a simple random generator without any cryptographic security because
+ * the entropy doesn't come from here.
+ */
+static unsigned HOST_WIDE_INT get_random_const(void)
+{
+ unsigned int i;
+ unsigned HOST_WIDE_INT ret = 0;
+
+ for (i = 0; i < 8 * sizeof(ret); i++) {
+ ret = (ret << 1) | (seed & 1);
+ seed >>= 1;
+ if (ret & 1)
+ seed ^= 0xD800000000000000ULL;
+ }
+
+ return ret;
+}
+
+static tree tree_get_random_const(tree type)
+{
+ unsigned long long mask;
+
+ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
+ mask = 2 * (mask - 1) + 1;
+
+ if (TYPE_UNSIGNED(type))
+ return build_int_cstu(type, mask & get_random_const());
+ return build_int_cst(type, mask & get_random_const());
+}
+
+static tree handle_latent_entropy_attribute(tree *node, tree name,
+ tree args __unused,
+ int flags __unused,
+ bool *no_add_attrs)
+{
+ tree type;
+#if BUILDING_GCC_VERSION <= 4007
+ VEC(constructor_elt, gc) *vals;
+#else
+ vec<constructor_elt, va_gc> *vals;
+#endif
+
+ switch (TREE_CODE(*node)) {
+ default:
+ *no_add_attrs = true;
+ error("%qE attribute only applies to functions and variables",
+ name);
+ break;
+
+ case VAR_DECL:
+ if (DECL_INITIAL(*node)) {
+ *no_add_attrs = true;
+ error("variable %qD with %qE attribute must not be initialized",
+ *node, name);
+ break;
+ }
+
+ if (!TREE_STATIC(*node)) {
+ *no_add_attrs = true;
+ error("variable %qD with %qE attribute must not be local",
+ *node, name);
+ break;
+ }
+
+ type = TREE_TYPE(*node);
+ switch (TREE_CODE(type)) {
+ default:
+ *no_add_attrs = true;
+ error("variable %qD with %qE attribute must be an integer or a fixed length integer array type or a fixed sized structure with integer fields",
+ *node, name);
+ break;
+
+ case RECORD_TYPE: {
+ tree fld, lst = TYPE_FIELDS(type);
+ unsigned int nelt = 0;
+
+ for (fld = lst; fld; nelt++, fld = TREE_CHAIN(fld)) {
+ tree fieldtype;
+
+ fieldtype = TREE_TYPE(fld);
+ if (TREE_CODE(fieldtype) == INTEGER_TYPE)
+ continue;
+
+ *no_add_attrs = true;
+ error("structure variable %qD with %qE attribute has a non-integer field %qE",
+ *node, name, fld);
+ break;
+ }
+
+ if (fld)
+ break;
+
+#if BUILDING_GCC_VERSION <= 4007
+ vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+ vec_alloc(vals, nelt);
+#endif
+
+ for (fld = lst; fld; fld = TREE_CHAIN(fld)) {
+ tree random_const, fld_t = TREE_TYPE(fld);
+
+ random_const = tree_get_random_const(fld_t);
+ CONSTRUCTOR_APPEND_ELT(vals, fld, random_const);
+ }
+
+ /* Initialize the fields with random constants */
+ DECL_INITIAL(*node) = build_constructor(type, vals);
+ break;
+ }
+
+ /* Initialize the variable with a random constant */
+ case INTEGER_TYPE:
+ DECL_INITIAL(*node) = tree_get_random_const(type);
+ break;
+
+ case ARRAY_TYPE: {
+ tree elt_type, array_size, elt_size;
+ unsigned int i, nelt;
+ HOST_WIDE_INT array_size_int, elt_size_int;
+
+ elt_type = TREE_TYPE(type);
+ elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type));
+ array_size = TYPE_SIZE_UNIT(type);
+
+ if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size
+ || TREE_CODE(array_size) != INTEGER_CST) {
+ *no_add_attrs = true;
+ error("array variable %qD with %qE attribute must be a fixed length integer array type",
+ *node, name);
+ break;
+ }
+
+ array_size_int = TREE_INT_CST_LOW(array_size);
+ elt_size_int = TREE_INT_CST_LOW(elt_size);
+ nelt = array_size_int / elt_size_int;
+
+#if BUILDING_GCC_VERSION <= 4007
+ vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+ vec_alloc(vals, nelt);
+#endif
+
+ for (i = 0; i < nelt; i++) {
+ tree cst = size_int(i);
+ tree rand_cst = tree_get_random_const(elt_type);
+
+ CONSTRUCTOR_APPEND_ELT(vals, cst, rand_cst);
+ }
+
+ /*
+ * Initialize the elements of the array with random
+ * constants
+ */
+ DECL_INITIAL(*node) = build_constructor(type, vals);
+ break;
+ }
+ }
+ break;
+
+ case FUNCTION_DECL:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+static struct attribute_spec latent_entropy_attr = {
+ .name = "latent_entropy",
+ .min_length = 0,
+ .max_length = 0,
+ .decl_required = true,
+ .type_required = false,
+ .function_type_required = false,
+ .handler = handle_latent_entropy_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = false
+#endif
+};
+
+static void register_attributes(void *event_data __unused, void *data __unused)
+{
+ register_attribute(&latent_entropy_attr);
+}
+
+static bool latent_entropy_gate(void)
+{
+ tree list;
+
+ /* don't bother with noreturn functions for now */
+ if (TREE_THIS_VOLATILE(current_function_decl))
+ return false;
+
+ /* gcc-4.5 doesn't discover some trivial noreturn functions */
+ if (EDGE_COUNT(EXIT_BLOCK_PTR_FOR_FN(cfun)->preds) == 0)
+ return false;
+
+ list = DECL_ATTRIBUTES(current_function_decl);
+ return lookup_attribute("latent_entropy", list) != NULL_TREE;
+}
+
+static tree create_var(tree type, const char *name)
+{
+ tree var;
+
+ var = create_tmp_var(type, name);
+ add_referenced_var(var);
+ mark_sym_for_renaming(var);
+ return var;
+}
+
+/*
+ * Set up the next operation and its constant operand to use in the latent
+ * entropy PRNG. When RHS is specified, the request is for perturbing the
+ * local latent entropy variable, otherwise it is for perturbing the global
+ * latent entropy variable where the two operands are already given by the
+ * local and global latent entropy variables themselves.
+ *
+ * The operation is one of add/xor/rol when instrumenting the local entropy
+ * variable and one of add/xor when perturbing the global entropy variable.
+ * Rotation is not used for the latter case because it would transmit less
+ * entropy to the global variable than the other two operations.
+ */
+static enum tree_code get_op(tree *rhs)
+{
+ static enum tree_code op;
+ unsigned HOST_WIDE_INT random_const;
+
+ random_const = get_random_const();
+
+ switch (op) {
+ case BIT_XOR_EXPR:
+ op = PLUS_EXPR;
+ break;
+
+ case PLUS_EXPR:
+ if (rhs) {
+ op = LROTATE_EXPR;
+ /*
+ * This code limits the value of random_const to
+ * the size of a wide int for the rotation
+ */
+ random_const &= HOST_BITS_PER_WIDE_INT - 1;
+ break;
+ }
+
+ case LROTATE_EXPR:
+ default:
+ op = BIT_XOR_EXPR;
+ break;
+ }
+ if (rhs)
+ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
+ return op;
+}
+
+static gimple create_assign(enum tree_code code, tree lhs, tree op1,
+ tree op2)
+{
+ return gimple_build_assign_with_ops(code, lhs, op1, op2);
+}
+
+static void perturb_local_entropy(basic_block bb, tree local_entropy)
+{
+ gimple_stmt_iterator gsi;
+ gimple assign;
+ tree rhs;
+ enum tree_code op;
+
+ op = get_op(&rhs);
+ assign = create_assign(op, local_entropy, local_entropy, rhs);
+ gsi = gsi_after_labels(bb);
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
+
+static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
+ tree local_entropy)
+{
+ gimple assign;
+ tree temp;
+ enum tree_code op;
+
+ /* 1. create temporary copy of latent_entropy */
+ temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+
+ /* 2. read... */
+ add_referenced_var(latent_entropy_decl);
+ mark_sym_for_renaming(latent_entropy_decl);
+ assign = gimple_build_assign(temp, latent_entropy_decl);
+ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 3. ...modify... */
+ op = get_op(NULL);
+ assign = create_assign(op, temp, temp, local_entropy);
+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 4. ...write latent_entropy */
+ assign = gimple_build_assign(latent_entropy_decl, temp);
+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
+
+static bool handle_tail_calls(basic_block bb, tree local_entropy)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gcall *call;
+ gimple stmt = gsi_stmt(gsi);
+
+ if (!is_gimple_call(stmt))
+ continue;
+
+ call = as_a_gcall(stmt);
+ if (!gimple_call_tail_p(call))
+ continue;
+
+ __perturb_latent_entropy(&gsi, local_entropy);
+ return true;
+ }
+
+ return false;
+}
+
+static void perturb_latent_entropy(tree local_entropy)
+{
+ edge_iterator ei;
+ edge e, last_bb_e;
+ basic_block last_bb;
+
+ gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
+ last_bb_e = single_pred_edge(EXIT_BLOCK_PTR_FOR_FN(cfun));
+
+ FOR_EACH_EDGE(e, ei, last_bb_e->src->preds) {
+ if (ENTRY_BLOCK_PTR_FOR_FN(cfun) == e->src)
+ continue;
+ if (EXIT_BLOCK_PTR_FOR_FN(cfun) == e->src)
+ continue;
+
+ handle_tail_calls(e->src, local_entropy);
+ }
+
+ last_bb = single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun));
+ if (!handle_tail_calls(last_bb, local_entropy)) {
+ gimple_stmt_iterator gsi = gsi_last_bb(last_bb);
+
+ __perturb_latent_entropy(&gsi, local_entropy);
+ }
+}
+
+static void init_local_entropy(basic_block bb, tree local_entropy)
+{
+ gimple assign, call;
+ tree frame_addr, rand_const, tmp, fndecl, udi_frame_addr;
+ enum tree_code op;
+ unsigned HOST_WIDE_INT rand_cst;
+ gimple_stmt_iterator gsi = gsi_after_labels(bb);
+
+ /* 1. create local_entropy_frameaddr */
+ frame_addr = create_var(ptr_type_node, "local_entropy_frameaddr");
+
+ /* 2. local_entropy_frameaddr = __builtin_frame_address() */
+ fndecl = builtin_decl_implicit(BUILT_IN_FRAME_ADDRESS);
+ call = gimple_build_call(fndecl, 1, integer_zero_node);
+ gimple_call_set_lhs(call, frame_addr);
+ gsi_insert_before(&gsi, call, GSI_NEW_STMT);
+ update_stmt(call);
+
+ udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr);
+ assign = gimple_build_assign(local_entropy, udi_frame_addr);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 3. create temporary copy of latent_entropy */
+ tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+
+ /* 4. read the global entropy variable into local entropy */
+ add_referenced_var(latent_entropy_decl);
+ mark_sym_for_renaming(latent_entropy_decl);
+ assign = gimple_build_assign(tmp, latent_entropy_decl);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 5. mix local_entropy_frameaddr into local entropy */
+ assign = create_assign(BIT_XOR_EXPR, local_entropy, local_entropy, tmp);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ rand_cst = get_random_const();
+ rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst);
+ op = get_op(NULL);
+ assign = create_assign(op, local_entropy, local_entropy, rand_const);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
+
+static bool create_latent_entropy_decl(void)
+{
+ varpool_node_ptr node;
+
+ if (latent_entropy_decl != NULL_TREE)
+ return true;
+
+ FOR_EACH_VARIABLE(node) {
+ tree name, var = NODE_DECL(node);
+
+ if (DECL_NAME_LENGTH(var) < sizeof("latent_entropy") - 1)
+ continue;
+
+ name = DECL_NAME(var);
+ if (strcmp(IDENTIFIER_POINTER(name), "latent_entropy"))
+ continue;
+
+ latent_entropy_decl = var;
+ break;
+ }
+
+ return latent_entropy_decl != NULL_TREE;
+}
+
+static unsigned int latent_entropy_execute(void)
+{
+ basic_block bb;
+ tree local_entropy;
+
+ if (!create_latent_entropy_decl())
+ return 0;
+
+ /* prepare for step 2 below */
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+ if (!single_pred_p(bb)) {
+ split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+ }
+
+ /* 1. create the local entropy variable */
+ local_entropy = create_var(unsigned_intDI_type_node, "local_entropy");
+
+ /* 2. initialize the local entropy variable */
+ init_local_entropy(bb, local_entropy);
+
+ bb = bb->next_bb;
+
+ /*
+ * 3. instrument each BB with an operation on the
+ * local entropy variable
+ */
+ while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) {
+ perturb_local_entropy(bb, local_entropy);
+ bb = bb->next_bb;
+ };
+
+ /* 4. mix local entropy into the global entropy variable */
+ perturb_latent_entropy(local_entropy);
+ return 0;
+}
+
+static void latent_entropy_start_unit(void *gcc_data __unused,
+ void *user_data __unused)
+{
+ tree type, id;
+ int quals;
+
+ seed = get_random_seed(false);
+
+ if (in_lto_p)
+ return;
+
+ /* extern volatile u64 latent_entropy */
+ gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
+ quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
+ type = build_qualified_type(long_long_unsigned_type_node, quals);
+ id = get_identifier("latent_entropy");
+ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
+
+ TREE_STATIC(latent_entropy_decl) = 1;
+ TREE_PUBLIC(latent_entropy_decl) = 1;
+ TREE_USED(latent_entropy_decl) = 1;
+ DECL_PRESERVE_P(latent_entropy_decl) = 1;
+ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
+ DECL_EXTERNAL(latent_entropy_decl) = 1;
+ DECL_ARTIFICIAL(latent_entropy_decl) = 1;
+ lang_hooks.decls.pushdecl(latent_entropy_decl);
+}
+
+#define PASS_NAME latent_entropy
+#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg
+#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \
+ | TODO_update_ssa
+#include "gcc-generate-gimple-pass.h"
+
+int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
+{
+ bool enabled = true;
+ const char * const plugin_name = plugin_info->base_name;
+ const int argc = plugin_info->argc;
+ const struct plugin_argument * const argv = plugin_info->argv;
+ int i;
+
+ struct register_pass_info latent_entropy_pass_info;
+
+ latent_entropy_pass_info.pass = make_latent_entropy_pass();
+ latent_entropy_pass_info.reference_pass_name = "optimized";
+ latent_entropy_pass_info.ref_pass_instance_number = 1;
+ latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
+ static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
+ {
+ .base = &latent_entropy_decl,
+ .nelt = 1,
+ .stride = sizeof(latent_entropy_decl),
+ .cb = &gt_ggc_mx_tree_node,
+ .pchw = &gt_pch_nx_tree_node
+ },
+ LAST_GGC_ROOT_TAB
+ };
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ for (i = 0; i < argc; ++i) {
+ if (!(strcmp(argv[i].key, "disable"))) {
+ enabled = false;
+ continue;
+ }
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ }
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL,
+ &latent_entropy_plugin_info);
+ if (enabled) {
+ register_callback(plugin_name, PLUGIN_START_UNIT,
+ &latent_entropy_start_unit, NULL);
+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS,
+ NULL, (void *)&gt_ggc_r_gt_latent_entropy);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &latent_entropy_pass_info);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes,
+ NULL);
+
+ return 0;
+}